diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4b4c37f399b7..d591c3bc46c7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -38,5 +38,7 @@ updates: directory: "/" schedule: interval: "weekly" + day: "saturday" + time: "07:00" # UTC pull-request-branch-name: separator: "-" diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000000..fc68079617a6 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration for .github/workflows/label-pr.yml + +# This rule can be deleted once the container reconciliation feature branch is merged. +container-reconciliation: +- base-branch: HDDS-10239-container-reconciliation + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 74c4756cfd04..e4c9431caa9a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -92,10 +92,10 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -164,7 +164,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -202,7 +202,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -246,7 +246,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -314,7 +314,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -434,12 +434,14 @@ jobs: matrix: profile: - client - - contract + - container - filesystem - hdds - om - ozone + - recon - shell + - snapshot - flaky fail-fast: false steps: @@ -449,7 +451,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -509,7 +511,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index 3239215aa907..dda123305b51 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -75,10 +75,10 @@ jobs: - name: Checkout project uses: actions/checkout@v4 - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | @@ -115,7 +115,7 @@ jobs: uses: actions/cache/restore@v4 with: path: | - ~/.m2/repository + ~/.m2/repository/*/*/* !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }} restore-keys: | diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml new file mode 100644 index 000000000000..abc620b7ef09 --- /dev/null +++ b/.github/workflows/label-pr.yml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow reads its configuration from the .github/labeler.yml file. +name: pull-request-labeler +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml new file mode 100644 index 000000000000..d4c9cd8120ab --- /dev/null +++ b/.github/workflows/populate-cache.yml @@ -0,0 +1,74 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow creates cache with Maven dependencies for Ozone build. + +name: populate-cache + +on: + push: + branches: + - master + - ozone-1.4 + paths: + - 'pom.xml' + - '**/pom.xml' + - '.github/workflows/populate-cache.yml' + schedule: + - cron: '20 3 * * *' + +jobs: + build: + runs-on: ubuntu-20.04 + steps: + - name: Checkout project + uses: actions/checkout@v4 + + - name: Restore cache for Maven dependencies + id: restore-cache + uses: actions/cache/restore@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} + + - name: Setup Java + if: steps.restore-cache.outputs.cache-hit != 'true' + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: 8 + + - name: Fetch dependencies + if: steps.restore-cache.outputs.cache-hit != 'true' + run: mvn --batch-mode --fail-never --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify + + - name: Delete Ozone jars from repo + if: steps.restore-cache.outputs.cache-hit != 'true' + run: rm -fr ~/.m2/repository/org/apache/ozone + + - name: List repo contents + if: steps.restore-cache.outputs.cache-hit != 'true' + run: find ~/.m2/repository -type f | sort | xargs ls -lh + + - name: Save cache for Maven dependencies + if: steps.restore-cache.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone + key: maven-repo-${{ hashFiles('**/pom.xml') }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index 7269a9c417a6..6eb9c26f07df 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -91,9 +91,11 @@ jobs: restore-keys: | ${{ runner.os }}-pnpm- - name: Cache for maven dependencies - uses: actions/cache@v4 + uses: actions/cache/restore@v4 with: - path: ~/.m2/repository + path: | + ~/.m2/repository/*/*/* + !~/.m2/repository/org/apache/ozone key: maven-repo-${{ hashFiles('**/pom.xml') }}-${{ env.JAVA_VERSION }} restore-keys: | maven-repo-${{ hashFiles('**/pom.xml') }} @@ -115,12 +117,6 @@ jobs: hadoop-ozone/dist/target/ozone-*.tar.gz !hadoop-ozone/dist/target/ozone-*-src.tar.gz retention-days: 1 - - name: Delete temporary build artifacts before caching - run: | - #Never cache local artifacts - rm -rf ~/.m2/repository/org/apache/ozone/hdds* - rm -rf ~/.m2/repository/org/apache/ozone/ozone* - if: always() acceptance: needs: - prepare-job diff --git a/.gitignore b/.gitignore index 61a3d80e87a0..4cf4dd86288f 100644 --- a/.gitignore +++ b/.gitignore @@ -29,46 +29,13 @@ azure-bfs-auth-keys.xml */.externalToolBuilders */maven-eclipse.xml -hadoop-common-project/hadoop-kms/downloads/ -hadoop-hdfs-project/hadoop-hdfs/downloads -hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node_modules -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower_components -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/* -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp -yarnregistry.pdf -patchprocess/ -.history/ -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json -hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log - -# Ignore files generated by HDDS acceptance tests. -hadoop-ozone/acceptance-test/docker-compose.log -hadoop-ozone/acceptance-test/junit-results.xml - #robotframework outputs log.html output.xml report.html -hadoop-hdds/docs/public -hadoop-hdds/docs/.hugo_build.lock -hadoop-ozone/recon/node_modules - .dev-tools dev-support/ci/bats-assert dev-support/ci/bats-support -hadoop-ozone/dist/src/main/license/current.txt - -.mvn/.gradle-enterprise/ \ No newline at end of file +.mvn/.gradle-enterprise/ diff --git a/LICENSE.txt b/LICENSE.txt index 021266844b82..8a367a318628 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -213,8 +213,6 @@ Apache License 2.0 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java BSD 3-Clause diff --git a/SECURITY.md b/SECURITY.md index 2f92dd685c12..3a89968026a2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -5,13 +5,16 @@ The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community. | Version | Supported | -| ------------- | ------------------ | +|---------------| ------------------ | | 0.3.0 (alpha) | :x: | | 0.4.0 (alpha) | :x: | | 0.4.1 (alpha) | :x: | | 0.5.0 (beta) | :x: | -| 1.0 | :white_check_mark: | -| 1.1 | :white_check_mark: | +| 1.0.0 | :x: | +| 1.1.0 | :x: | +| 1.2.1 | :x: | +| 1.3.0 | :x: | +| 1.4.0 | :white_check_mark: | ## Reporting a Vulnerability diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 9fe1708c9137..69ca1d9f99f6 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -57,6 +57,18 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } +@test "dashboard only" { + run dev-support/ci/selective_ci_checks.sh 039dea9 + + assert_output -p 'basic-checks=["rat"]' + assert_output -p needs-build=false + assert_output -p needs-compile=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} + @test "compose and robot" { run dev-support/ci/selective_ci_checks.sh b83039eef @@ -96,7 +108,7 @@ load bats-assert/load.bash @test "integration and unit: java change" { run dev-support/ci/selective_ci_checks.sh 9aebf6e25 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -108,7 +120,7 @@ load bats-assert/load.bash @test "integration and unit: script change" { run dev-support/ci/selective_ci_checks.sh c6850484f - assert_output -p 'basic-checks=["rat","bats","unit"]' + assert_output -p 'basic-checks=["rat","bats"]' assert_output -p needs-build=false assert_output -p needs-compile=false assert_output -p needs-compose-tests=false @@ -120,7 +132,7 @@ load bats-assert/load.bash @test "script change including junit.sh" { run dev-support/ci/selective_ci_checks.sh 66093e52c6 - assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -132,19 +144,19 @@ load bats-assert/load.bash @test "unit only" { run dev-support/ci/selective_ci_checks.sh 1dd1d0ba3 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false assert_output -p needs-dependency-check=false - assert_output -p needs-integration-tests=false + assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @test "unit helper" { run dev-support/ci/selective_ci_checks.sh 88383d1d5 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -177,20 +189,17 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } -# disabled, because this test fails if -# hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java -# is not present in the current tree (i.e. if file is renamed, moved or deleted) -#@test "native test in other module" { -# run dev-support/ci/selective_ci_checks.sh 7d01cc14a6 -# -# assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native","unit"]' -# assert_output -p needs-build=true -# assert_output -p needs-compile=true -# assert_output -p needs-compose-tests=false -# assert_output -p needs-dependency-check=false -# assert_output -p needs-integration-tests=false -# assert_output -p needs-kubernetes-tests=false -#} +@test "native test in other module" { + run dev-support/ci/selective_ci_checks.sh 822c0dee1a + + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native"]' + assert_output -p needs-build=true + assert_output -p needs-compile=true + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} @test "kubernetes only" { run dev-support/ci/selective_ci_checks.sh 5336bb9bd @@ -219,7 +228,7 @@ load bats-assert/load.bash @test "main/java change" { run dev-support/ci/selective_ci_checks.sh 86a771dfe - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -231,7 +240,7 @@ load bats-assert/load.bash @test "..../java change" { run dev-support/ci/selective_ci_checks.sh 01c616536 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -243,7 +252,7 @@ load bats-assert/load.bash @test "java and compose change" { run dev-support/ci/selective_ci_checks.sh d0f0f806e - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -255,7 +264,7 @@ load bats-assert/load.bash @test "java and docs change" { run dev-support/ci/selective_ci_checks.sh 2c0adac26 - assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -267,7 +276,7 @@ load bats-assert/load.bash @test "pom change" { run dev-support/ci/selective_ci_checks.sh 9129424a9 - assert_output -p 'basic-checks=["rat","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -279,7 +288,7 @@ load bats-assert/load.bash @test "CI lib change" { run dev-support/ci/selective_ci_checks.sh ceb79acaa - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -291,7 +300,7 @@ load bats-assert/load.bash @test "CI workflow change" { run dev-support/ci/selective_ci_checks.sh 90a8d7c01 - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -316,7 +325,7 @@ load bats-assert/load.bash @test "CI workflow change (ci.yaml)" { run dev-support/ci/selective_ci_checks.sh 90fd5f2adc - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 996bd382be36..bb0faa962e46 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -233,6 +233,7 @@ function get_count_compose_files() { local ignore_array=( "^hadoop-ozone/dist/src/main/k8s" "^hadoop-ozone/dist/src/main/license" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" "\.md$" ) filter_changed_files true @@ -262,18 +263,10 @@ function get_count_integration_files() { "^hadoop-ozone/integration-test" "^hadoop-ozone/fault-injection-test/mini-chaos-tests" "src/test/java" + "src/test/resources" ) - # Ozone's unit test naming convention: Test*.java - # The following makes this filter ignore all tests except those in - # integration-test and fault-injection-test. - # Directories starting with `i` under hadoop-ozone need to be listed - # explicitly, other subdirectories are captured by the second item. local ignore_array=( - "^hadoop-hdds/.*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/[a-eghj-z].*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/insight/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-client/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-storage/src/test/java/.*/Test.*.java" + $(grep -Flr 'org.apache.ozone.test.tag.Native' hadoop-ozone/integration-test) ) filter_changed_files true COUNT_INTEGRATION_CHANGED_FILES=${match_count} @@ -455,29 +448,6 @@ function check_needs_native() { start_end::group_end } -function check_needs_unit_test() { - start_end::group_start "Check if unit test is needed" - local pattern_array=( - "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" - "^hadoop-ozone/dev-support/checks/unit.sh" - "^hadoop-ozone/dev-support/checks/junit.sh" - "src/test/java" - "src/test/resources" - ) - local ignore_array=( - "^hadoop-ozone/dist" - "^hadoop-ozone/fault-injection-test/mini-chaos-tests" - "^hadoop-ozone/integration-test" - ) - filter_changed_files true - - if [[ ${match_count} != "0" ]]; then - add_basic_check unit - fi - - start_end::group_end -} - # Counts other files which do not need to trigger any functional test # (i.e. no compose/integration/kubernetes) function get_count_misc_files() { @@ -494,12 +464,14 @@ function get_count_misc_files() { "\.md$" "findbugsExcludeFile.xml" "/NOTICE$" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" ) local ignore_array=( "^.github/workflows/post-commit.yml" "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" "^hadoop-ozone/dev-support/checks/acceptance.sh" "^hadoop-ozone/dev-support/checks/integration.sh" + "^hadoop-ozone/dev-support/checks/junit.sh" "^hadoop-ozone/dev-support/checks/kubernetes.sh" ) filter_changed_files true @@ -532,7 +504,6 @@ function calculate_test_types_to_run() { compose_tests_needed=true integration_tests_needed=true kubernetes_tests_needed=true - add_basic_check unit else echo "All ${COUNT_ALL_CHANGED_FILES} changed files are known to be handled by specific checks." echo @@ -612,6 +583,5 @@ check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native -check_needs_unit_test calculate_test_types_to_run set_outputs diff --git a/dev-support/rat/rat-exclusions.txt b/dev-support/rat/rat-exclusions.txt new file mode 100644 index 000000000000..4531b1b601c0 --- /dev/null +++ b/dev-support/rat/rat-exclusions.txt @@ -0,0 +1,73 @@ +###### Licensed to the Apache Software Foundation (ASF) under one +###### or more contributor license agreements. See the NOTICE file +###### distributed with this work for additional information +###### regarding copyright ownership. The ASF licenses this file +###### to you under the Apache License, Version 2.0 (the +###### "License"); you may not use this file except in compliance +###### with the License. You may obtain a copy of the License at +###### +###### http://www.apache.org/licenses/LICENSE-2.0 +###### +###### Unless required by applicable law or agreed to in writing, +###### software distributed under the License is distributed on an +###### "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +###### KIND, either express or implied. See the License for the +###### specific language governing permissions and limitations +###### under the License. + +**/*.json +.gitattributes +.github/* +CONTRIBUTING.md +README.md +SECURITY.md + +# hadoop-hdds/interface-client +src/main/resources/proto.lock + +# tools/fault-injection-service +tools/fault-injection-service/README.md + +# hadoop-hdds/framework +**/webapps/static/angular-1.8.0.min.js +**/webapps/static/angular-nvd3-1.0.9.min.js +**/webapps/static/angular-route-1.8.0.min.js +**/webapps/static/bootstrap-3.4.1/** +**/webapps/static/d3-3.5.17.min.js +**/webapps/static/jquery-3.5.1.min.js +**/webapps/static/nvd3-1.8.5.min.css +**/webapps/static/nvd3-1.8.5.min.css.map +**/webapps/static/nvd3-1.8.5.min.js +**/webapps/static/nvd3-1.8.5.min.js.map + +# hadoop-hdds/container-service +src/test/resources/123-dn-container.db/** +src/test/resources/123.container +src/test/resources/additionalfields.container +src/test/resources/incorrect.checksum.container +src/test/resources/incorrect.container +src/test/resources/test.db.ini + +# hadoop-hdds/docs +**/themes/ozonedoc/** +static/slides/* + +# hadoop-ozone/dist +**/.ssh/id_rsa* +**/log.html +**/output.xml +**/report.html +src/main/license/** + +# hadoop-ozone/integration-test +src/test/resources/ssl/* + +# hadoop-ozone/recon +**/pnpm-lock.yaml +src/test/resources/prometheus-test-response.txt + +# hadoop-ozone/shaded +**/dependency-reduced-pom.xml + +# hadoop-ozone/tools +src/test/resources/*.log \ No newline at end of file diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index 1045f7a6a172..422943fff042 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.ozone.OzoneConsts; import java.util.Map; @@ -51,6 +52,11 @@ public final class ContainerClientMetrics { private MutableCounterLong totalWriteChunkCalls; @Metric private MutableCounterLong totalWriteChunkBytes; + private MutableQuantiles[] listBlockLatency; + private MutableQuantiles[] getBlockLatency; + private MutableQuantiles[] getCommittedBlockLengthLatency; + private MutableQuantiles[] readChunkLatency; + private MutableQuantiles[] getSmallFileLatency; private final Map writeChunkCallsByPipeline; private final Map writeChunkBytesByPipeline; private final Map writeChunksCallsByLeaders; @@ -84,6 +90,36 @@ private ContainerClientMetrics() { writeChunkCallsByPipeline = new ConcurrentHashMap<>(); writeChunkBytesByPipeline = new ConcurrentHashMap<>(); writeChunksCallsByLeaders = new ConcurrentHashMap<>(); + + listBlockLatency = new MutableQuantiles[3]; + getBlockLatency = new MutableQuantiles[3]; + getCommittedBlockLengthLatency = new MutableQuantiles[3]; + readChunkLatency = new MutableQuantiles[3]; + getSmallFileLatency = new MutableQuantiles[3]; + int[] intervals = {60, 300, 900}; + for (int i = 0; i < intervals.length; i++) { + int interval = intervals[i]; + listBlockLatency[i] = registry + .newQuantiles("listBlockLatency" + interval + + "s", "ListBlock latency in microseconds", "ops", + "latency", interval); + getBlockLatency[i] = registry + .newQuantiles("getBlockLatency" + interval + + "s", "GetBlock latency in microseconds", "ops", + "latency", interval); + getCommittedBlockLengthLatency[i] = registry + .newQuantiles("getCommittedBlockLengthLatency" + interval + + "s", "GetCommittedBlockLength latency in microseconds", + "ops", "latency", interval); + readChunkLatency[i] = registry + .newQuantiles("readChunkLatency" + interval + + "s", "ReadChunk latency in microseconds", "ops", + "latency", interval); + getSmallFileLatency[i] = registry + .newQuantiles("getSmallFileLatency" + interval + + "s", "GetSmallFile latency in microseconds", "ops", + "latency", interval); + } } public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { @@ -111,28 +147,64 @@ public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { totalWriteChunkBytes.incr(chunkSizeBytes); } + public void addListBlockLatency(long latency) { + for (MutableQuantiles q : listBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetBlockLatency(long latency) { + for (MutableQuantiles q : getBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetCommittedBlockLengthLatency(long latency) { + for (MutableQuantiles q : getCommittedBlockLengthLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addReadChunkLatency(long latency) { + for (MutableQuantiles q : readChunkLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetSmallFileLatency(long latency) { + for (MutableQuantiles q : getSmallFileLatency) { + if (q != null) { + q.add(latency); + } + } + } + @VisibleForTesting public MutableCounterLong getTotalWriteChunkBytes() { return totalWriteChunkBytes; } - @VisibleForTesting - public MutableCounterLong getTotalWriteChunkCalls() { + MutableCounterLong getTotalWriteChunkCalls() { return totalWriteChunkCalls; } - @VisibleForTesting - public Map getWriteChunkBytesByPipeline() { + Map getWriteChunkBytesByPipeline() { return writeChunkBytesByPipeline; } - @VisibleForTesting - public Map getWriteChunkCallsByPipeline() { + Map getWriteChunkCallsByPipeline() { return writeChunkCallsByPipeline; } - @VisibleForTesting - public Map getWriteChunksCallsByLeaders() { + Map getWriteChunksCallsByLeaders() { return writeChunksCallsByLeaders; } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index d1dcc654b100..d1992ac931e5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -144,6 +144,23 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private int retryInterval = 0; + @Config(key = "read.max.retries", + defaultValue = "3", + description = "Maximum number of retries by Ozone Client on " + + "encountering connectivity exception when reading a key.", + tags = ConfigTag.CLIENT) + private int maxReadRetryCount = 3; + + @Config(key = "read.retry.interval", + defaultValue = "1", + description = + "Indicates the time duration in seconds a client will wait " + + "before retrying a read key request on encountering " + + "a connectivity excepetion from Datanodes . " + + "By default the interval is 1 second", + tags = ConfigTag.CLIENT) + private int readRetryInterval = 1; + @Config(key = "checksum.type", defaultValue = "CRC32", description = "The checksum type [NONE/ CRC32/ CRC32C/ SHA256/ MD5] " @@ -201,6 +218,13 @@ public enum ChecksumCombineMode { // 3 concurrent stripe read should be enough. private int ecReconstructStripeReadPoolLimit = 10 * 3; + @Config(key = "ec.reconstruct.stripe.write.pool.limit", + defaultValue = "30", + description = "Thread pool max size for parallelly write" + + " available ec chunks to reconstruct the whole stripe.", + tags = ConfigTag.CLIENT) + private int ecReconstructStripeWritePoolLimit = 10 * 3; + @Config(key = "checksum.combine.mode", defaultValue = "COMPOSITE_CRC", description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] " @@ -230,10 +254,18 @@ public enum ChecksumCombineMode { "list rather than full chunk list to optimize performance. " + "Critical to HBase.", tags = ConfigTag.CLIENT) - private boolean incrementalChunkList = false; + private boolean incrementalChunkList = true; + + @Config(key = "stream.putblock.piggybacking", + defaultValue = "false", + type = ConfigType.BOOLEAN, + description = "Allow PutBlock to be piggybacked in WriteChunk " + + "requests if the chunk is small.", + tags = ConfigTag.CLIENT) + private boolean enablePutblockPiggybacking = false; @PostConstruct - private void validate() { + public void validate() { Preconditions.checkState(streamBufferSize > 0); Preconditions.checkState(streamBufferFlushSize > 0); Preconditions.checkState(streamBufferMaxSize > 0); @@ -328,6 +360,22 @@ public void setRetryInterval(int retryInterval) { this.retryInterval = retryInterval; } + public int getMaxReadRetryCount() { + return maxReadRetryCount; + } + + public void setMaxReadRetryCount(int maxReadRetryCount) { + this.maxReadRetryCount = maxReadRetryCount; + } + + public int getReadRetryInterval() { + return readRetryInterval; + } + + public void setReadRetryInterval(int readRetryInterval) { + this.readRetryInterval = readRetryInterval; + } + public ChecksumType getChecksumType() { return ChecksumType.valueOf(checksumType); } @@ -396,6 +444,14 @@ public int getEcReconstructStripeReadPoolLimit() { return ecReconstructStripeReadPoolLimit; } + public void setEcReconstructStripeWritePoolLimit(int poolLimit) { + this.ecReconstructStripeWritePoolLimit = poolLimit; + } + + public int getEcReconstructStripeWritePoolLimit() { + return ecReconstructStripeWritePoolLimit; + } + public void setFsDefaultBucketLayout(String bucketLayout) { if (!bucketLayout.isEmpty()) { this.fsDefaultBucketLayout = bucketLayout; @@ -406,6 +462,14 @@ public String getFsDefaultBucketLayout() { return fsDefaultBucketLayout; } + public void setEnablePutblockPiggybacking(boolean enablePutblockPiggybacking) { + this.enablePutblockPiggybacking = enablePutblockPiggybacking; + } + + public boolean getEnablePutblockPiggybacking() { + return enablePutblockPiggybacking; + } + public boolean isDatastreamPipelineMode() { return datastreamPipelineMode; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e6604897..cb2b85ef1e29 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -42,6 +44,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -166,8 +169,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } // Add credential context to the client call @@ -384,6 +387,12 @@ private XceiverClientReply sendCommandWithRetry( } } + boolean allInService = datanodeList.stream() + .allMatch(dn -> dn.getPersistedOpState() == NodeOperationalState.IN_SERVICE); + if (!allInService) { + datanodeList = sortDatanodeByOperationalState(datanodeList); + } + for (DatanodeDetails dn : datanodeList) { try { if (LOG.isDebugEnabled()) { @@ -440,13 +449,37 @@ private XceiverClientReply sendCommandWithRetry( LOG.debug(message + " on the pipeline {}.", processForDebug(request), pipeline); } else { - LOG.error(message + " on the pipeline {}.", + LOG.warn(message + " on the pipeline {}.", request.getCmdType(), pipeline); } throw ioException; } } + private static List sortDatanodeByOperationalState( + List datanodeList) { + List sortedDatanodeList = new ArrayList<>(datanodeList); + // Make IN_SERVICE's Datanode precede all other State's Datanodes. + // This is a stable sort that does not change the order of the + // IN_SERVICE's Datanode. + Comparator byOpStateStable = (first, second) -> { + boolean firstInService = first.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + boolean secondInService = second.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + + if (firstInService == secondInService) { + return 0; + } else if (firstInService) { + return -1; + } else { + return 1; + } + }; + sortedDatanodeList.sort(byOpStateStable); + return sortedDatanodeList; + } + @Override public XceiverClientReply sendCommandAsync( ContainerCommandRequestProto request) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java index 96db6d13fea5..ade4cbcab3c4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientMetrics.java @@ -19,21 +19,27 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.util.PerformanceMetrics; +import org.apache.hadoop.util.PerformanceMetricsInitializer; /** * The client metrics for the Storage Container protocol. */ @InterfaceAudience.Private @Metrics(about = "Storage Container Client Metrics", context = "dfs") -public class XceiverClientMetrics { +public class XceiverClientMetrics implements MetricsSource { public static final String SOURCE_NAME = XceiverClientMetrics.class .getSimpleName(); @@ -43,8 +49,11 @@ public class XceiverClientMetrics { private @Metric MutableCounterLong ecReconstructionFailsTotal; private MutableCounterLong[] pendingOpsArray; private MutableCounterLong[] opsArray; - private MutableRate[] containerOpsLatency; + private PerformanceMetrics[] containerOpsLatency; private MetricsRegistry registry; + private OzoneConfiguration conf = new OzoneConfiguration(); + private int[] intervals = conf.getInts(OzoneConfigKeys + .OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY); public XceiverClientMetrics() { init(); @@ -56,7 +65,7 @@ public void init() { this.pendingOpsArray = new MutableCounterLong[numEnumEntries]; this.opsArray = new MutableCounterLong[numEnumEntries]; - this.containerOpsLatency = new MutableRate[numEnumEntries]; + this.containerOpsLatency = new PerformanceMetrics[numEnumEntries]; for (int i = 0; i < numEnumEntries; i++) { pendingOpsArray[i] = registry.newCounter( "numPending" + ContainerProtos.Type.forNumber(i + 1), @@ -66,11 +75,11 @@ public void init() { .newCounter("opCount" + ContainerProtos.Type.forNumber(i + 1), "number of" + ContainerProtos.Type.forNumber(i + 1) + " ops", (long) 0); - - containerOpsLatency[i] = registry.newRate( - ContainerProtos.Type.forNumber(i + 1) + "Latency", - "latency of " + ContainerProtos.Type.forNumber(i + 1) - + " ops"); + containerOpsLatency[i] = + PerformanceMetricsInitializer.getMetrics(registry, + ContainerProtos.Type.forNumber(i + 1) + "Latency", + "latency of " + ContainerProtos.Type.forNumber(i + 1), + "Ops", "Time", intervals); } } @@ -129,4 +138,21 @@ public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); } + + @Override + public void getMetrics(MetricsCollector collector, boolean b) { + MetricsRecordBuilder recordBuilder = collector.addRecord(SOURCE_NAME); + + pendingOps.snapshot(recordBuilder, true); + totalOps.snapshot(recordBuilder, true); + ecReconstructionTotal.snapshot(recordBuilder, true); + ecReconstructionFailsTotal.snapshot(recordBuilder, true); + + int numEnumEntries = ContainerProtos.Type.values().length; + for (int i = 0; i < numEnumEntries; i++) { + pendingOpsArray[i].snapshot(recordBuilder, true); + opsArray[i].snapshot(recordBuilder, true); + containerOpsLatency[i].snapshot(recordBuilder, true); + } + } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..58a2153352a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,8 +83,8 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new SecurityConfig(ozoneConf), trustManager); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 0c5501c7922c..957f761ccbc2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -73,7 +73,7 @@ SortedMap> getCommitIndexMap() { return commitIndexMap; } - void updateCommitInfoMap(long index, List buffers) { + synchronized void updateCommitInfoMap(long index, List buffers) { commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>()) .addAll(buffers); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index d06b1816dc56..374e90a24c76 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -21,6 +21,7 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; @@ -78,8 +80,8 @@ public class BlockInputStream extends BlockExtendedInputStream { private XceiverClientSpi xceiverClient; private boolean initialized = false; // TODO: do we need to change retrypolicy based on exception. - private final RetryPolicy retryPolicy = - HddsClientUtils.createRetryPolicy(3, TimeUnit.SECONDS.toMillis(1)); + private final RetryPolicy retryPolicy; + private int retries; // List of ChunkInputStreams, one for each chunk in the block @@ -113,28 +115,34 @@ public class BlockInputStream extends BlockExtendedInputStream { private final Function refreshFunction; - public BlockInputStream(BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + public BlockInputStream( + BlockLocationInfo blockInfo, + Pipeline pipeline, + Token token, XceiverClientFactory xceiverClientFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { this.blockInfo = blockInfo; this.blockID = blockInfo.getBlockID(); this.length = blockInfo.getLength(); setPipeline(pipeline); tokenRef.set(token); - this.verifyChecksum = verifyChecksum; + this.verifyChecksum = config.isChecksumVerify(); this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.retryPolicy = + HddsClientUtils.createRetryPolicy(config.getMaxReadRetryCount(), + TimeUnit.SECONDS.toMillis(config.getReadRetryInterval())); } // only for unit tests public BlockInputStream(BlockID blockId, long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, - XceiverClientFactory xceiverClientFactory) { + XceiverClientFactory xceiverClientFactory, + OzoneClientConfig config + ) { this(new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockId).setLength(blockLen)), - pipeline, token, verifyChecksum, - xceiverClientFactory, null); + pipeline, token, xceiverClientFactory, null, config); } /** @@ -217,18 +225,25 @@ private boolean isConnectivityIssue(IOException ex) { } private void refreshBlockInfo(IOException cause) throws IOException { - LOG.info("Unable to read information for block {} from pipeline {}: {}", + LOG.info("Attempting to update pipeline and block token for block {} from pipeline {}: {}", blockID, pipelineRef.get().getId(), cause.getMessage()); if (refreshFunction != null) { LOG.debug("Re-fetching pipeline and block token for block {}", blockID); BlockLocationInfo blockLocationInfo = refreshFunction.apply(blockID); if (blockLocationInfo == null) { - LOG.debug("No new block location info for block {}", blockID); + LOG.warn("No new block location info for block {}", blockID); } else { - LOG.debug("New pipeline for block {}: {}", blockID, - blockLocationInfo.getPipeline()); setPipeline(blockLocationInfo.getPipeline()); + LOG.info("New pipeline for block {}: {}", blockID, + blockLocationInfo.getPipeline()); + tokenRef.set(blockLocationInfo.getToken()); + if (blockLocationInfo.getToken() != null) { + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); + tokenId.readFromByteArray(tokenRef.get().getIdentifier()); + LOG.info("A new token is added for block {}. Expiry: {}", + blockID, Instant.ofEpochMilli(tokenId.getExpiryDate())); + } } } else { throw cause; @@ -574,7 +589,20 @@ private boolean shouldRetryRead(IOException cause) throws IOException { } catch (Exception e) { throw new IOException(e); } - return retryAction.action == RetryPolicy.RetryAction.RetryDecision.RETRY; + if (retryAction.action == RetryPolicy.RetryAction.RetryDecision.RETRY) { + if (retryAction.delayMillis > 0) { + try { + LOG.debug("Retry read after {}ms", retryAction.delayMillis); + Thread.sleep(retryAction.delayMillis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + String msg = "Interrupted: action=" + retryAction.action + ", retry policy=" + retryPolicy; + throw new IOException(msg, e); + } + } + return true; + } + return false; } private void handleReadError(IOException cause) throws IOException { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index a6cd98e48ad9..f29bf490382f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -27,9 +27,9 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -55,6 +55,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; + +import static org.apache.hadoop.hdds.DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; @@ -140,6 +142,7 @@ public class BlockOutputStream extends OutputStream { private int replicationIndex; private Pipeline pipeline; private final ContainerClientMetrics clientMetrics; + private boolean allowPutBlockPiggybacking; /** * Creates a new BlockOutputStream. @@ -157,7 +160,8 @@ public BlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { this.xceiverClientFactory = xceiverClientManager; this.config = config; @@ -199,8 +203,7 @@ public BlockOutputStream( (long) flushPeriod * streamBufferArgs.getStreamBufferSize() == streamBufferArgs .getStreamBufferFlushSize()); - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); + this.responseExecutor = blockOutputStreamResourceProvider.get(); bufferList = null; totalDataFlushedLength = 0; writtenDataLength = 0; @@ -211,6 +214,20 @@ public BlockOutputStream( this.clientMetrics = clientMetrics; this.pipeline = pipeline; this.streamBufferArgs = streamBufferArgs; + this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && + allDataNodesSupportPiggybacking(); + } + + private boolean allDataNodesSupportPiggybacking() { + // return true only if all DataNodes in the pipeline are on a version + // that supports PutBlock piggybacking. + for (DatanodeDetails dn : pipeline.getNodes()) { + if (dn.getCurrentVersion() < + COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue()) { + return false; + } + } + return true; } void refreshCurrentBuffer() { @@ -499,22 +516,8 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } // if the ioException is not set, putBlock is successful if (getIoException() == null && !force) { - BlockID responseBlockID = BlockID.getFromProtobuf( - e.getPutBlock().getCommittedBlockLength().getBlockID()); - Preconditions.checkState(blockID.get().getContainerBlockID() - .equals(responseBlockID.getContainerBlockID())); - // updates the bcsId of the block - blockID.set(responseBlockID); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); - } - // for standalone protocol, logIndex will always be 0. - updateCommitInfo(asyncReply, byteBufferList); + handleSuccessfulPutBlock(e.getPutBlock().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); } return e; }, responseExecutor).exceptionally(e -> { @@ -551,7 +554,7 @@ public void flush() throws IOException { } } - private void writeChunk(ChunkBuffer buffer) + private void writeChunkCommon(ChunkBuffer buffer) throws IOException { // This data in the buffer will be pushed to datanode and a reference will // be added to the bufferList. Once putBlock gets executed, this list will @@ -562,7 +565,18 @@ private void writeChunk(ChunkBuffer buffer) bufferList = new ArrayList<>(); } bufferList.add(buffer); - writeChunkToContainer(buffer.duplicate(0, buffer.position())); + } + + private void writeChunk(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), false); + } + + private void writeChunkAndPutBlock(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), true); } /** @@ -594,14 +608,23 @@ private void handleFlushInternal(boolean close) if (totalDataFlushedLength < writtenDataLength) { refreshCurrentBuffer(); Preconditions.checkArgument(currentBuffer.position() > 0); - if (currentBuffer.hasRemaining()) { - writeChunk(currentBuffer); - } + // This can be a partially filled chunk. Since we are flushing the buffer // here, we just limit this buffer to the current position. So that next // write will happen in new buffer - updateFlushLength(); - executePutBlock(close, false); + if (currentBuffer.hasRemaining()) { + if (allowPutBlockPiggybacking) { + updateFlushLength(); + writeChunkAndPutBlock(currentBuffer); + } else { + writeChunk(currentBuffer); + updateFlushLength(); + executePutBlock(close, false); + } + } else { + updateFlushLength(); + executePutBlock(close, false); + } } else if (close) { // forcing an "empty" putBlock if stream is being closed without new // data since latest flush - we need to send the "EOF" flag @@ -682,7 +705,6 @@ public void cleanup(boolean invalidateClient) { bufferList.clear(); } bufferList = null; - responseExecutor.shutdown(); } /** @@ -714,7 +736,7 @@ public boolean isClosed() { * @return */ CompletableFuture writeChunkToContainer( - ChunkBuffer chunk) throws IOException { + ChunkBuffer chunk, boolean putBlockPiggybacking) throws IOException { int effectiveChunkSize = chunk.remaining(); final long offset = chunkOffset.getAndAdd(effectiveChunkSize); final ByteString data = chunk.toByteString( @@ -727,6 +749,8 @@ CompletableFuture writeChunkToContainer( .setChecksumData(checksumData.getProtoBufMessage()) .build(); + long flushPos = totalDataFlushedLength; + if (LOG.isDebugEnabled()) { LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset); @@ -744,42 +768,93 @@ CompletableFuture writeChunkToContainer( + ", previous = " + previous); } + final List byteBufferList; + CompletableFuture + validateFuture = null; try { - XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, - blockID.get(), data, tokenString, replicationIndex); - CompletableFuture - respFuture = asyncReply.getResponse(); - CompletableFuture - validateFuture = respFuture.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - respFuture.completeExceptionally(sce); - } - return e; - }, responseExecutor).exceptionally(e -> { - String msg = "Failed to write chunk " + chunkInfo.getChunkName() + - " into block " + blockID; - LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); - CompletionException ce = new CompletionException(msg, e); - setIoException(ce); - throw ce; - }); + BlockData blockData = null; + if (config.getIncrementalChunkList()) { updateBlockDataForWriteChunk(chunk); } else { containerBlockData.addChunks(chunkInfo); } + if (putBlockPiggybacking) { + Preconditions.checkNotNull(bufferList); + byteBufferList = bufferList; + bufferList = null; + Preconditions.checkNotNull(byteBufferList); + + blockData = containerBlockData.build(); + LOG.debug("piggyback chunk list {}", blockData); + + if (config.getIncrementalChunkList()) { + // remove any chunks in the containerBlockData list. + // since they are sent. + containerBlockData.clearChunks(); + } + } else { + byteBufferList = null; + } + XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, + blockID.get(), data, tokenString, replicationIndex, blockData); + CompletableFuture + respFuture = asyncReply.getResponse(); + validateFuture = respFuture.thenApplyAsync(e -> { + try { + validateResponse(e); + } catch (IOException sce) { + respFuture.completeExceptionally(sce); + } + // if the ioException is not set, putBlock is successful + if (getIoException() == null && putBlockPiggybacking) { + handleSuccessfulPutBlock(e.getWriteChunk().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); + } + return e; + }, responseExecutor).exceptionally(e -> { + String msg = "Failed to write chunk " + chunkInfo.getChunkName() + + " into block " + blockID; + LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); + CompletionException ce = new CompletionException(msg, e); + setIoException(ce); + throw ce; + }); clientMetrics.recordWriteChunk(pipeline, chunkInfo.getLen()); - return validateFuture; + } catch (IOException | ExecutionException e) { throw new IOException(EXCEPTION_MSG + e.toString(), e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); } - return null; + if (putBlockPiggybacking) { + putFlushFuture(flushPos, validateFuture); + } + return validateFuture; + } + + private void handleSuccessfulPutBlock( + ContainerProtos.GetCommittedBlockLengthResponseProto e, + XceiverClientReply asyncReply, long flushPos, + List byteBufferList) { + BlockID responseBlockID = BlockID.getFromProtobuf( + e.getBlockID()); + Preconditions.checkState(blockID.get().getContainerBlockID() + .equals(responseBlockID.getContainerBlockID())); + // updates the bcsId of the block + blockID.set(responseBlockID); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Adding index " + asyncReply.getLogIndex() + " flushLength " + + flushPos + " numBuffers " + byteBufferList.size() + + " blockID " + blockID + " bufferPool size" + bufferPool + .getSize() + " currentBufferIndex " + bufferPool + .getCurrentBufferIndex()); + } + // for standalone protocol, logIndex will always be 0. + updateCommitInfo(asyncReply, byteBufferList); } /** @@ -855,7 +930,11 @@ private void appendLastChunkBuffer(ChunkBuffer chunkBuffer, int offset, try { LOG.debug("put into last chunk buffer start = {} len = {}", copyStart, copyLen); - lastChunkBuffer.put(bb.array(), copyStart, copyLen); + int origPos = bb.position(); + int origLimit = bb.limit(); + bb.position(copyStart).limit(copyStart + copyLen); + lastChunkBuffer.put(bb); + bb.position(origPos).limit(origLimit); } catch (BufferOverflowException e) { LOG.error("appending from " + copyStart + " for len=" + copyLen + ". lastChunkBuffer remaining=" + lastChunkBuffer.remaining() + diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 274b977ef623..b68b56f67c72 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -111,6 +111,7 @@ void releaseBuffer(ChunkBuffer chunkBuffer) { } public void clearBufferPool() { + bufferList.forEach(ChunkBuffer::close); bufferList.clear(); currentBufferIndex = -1; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java index 3c7f8a2360c8..aa339409eceb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java @@ -24,6 +24,7 @@ */ package org.apache.hadoop.hdds.scm.storage; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.ozone.common.ChunkBuffer; @@ -32,6 +33,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; /** * This class executes watchForCommit on ratis pipeline and releases @@ -42,8 +44,8 @@ class CommitWatcher extends AbstractCommitWatcher { private final BufferPool bufferPool; // future Map to hold up all putBlock futures - private final ConcurrentMap> futureMap = new ConcurrentHashMap<>(); + private final ConcurrentMap> + futureMap = new ConcurrentHashMap<>(); CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient) { super(xceiverClient); @@ -67,11 +69,24 @@ void releaseBuffers(long index) { + totalLength + ": existing = " + futureMap.keySet()); } - ConcurrentMap> getFutureMap() { + @VisibleForTesting + ConcurrentMap> getFutureMap() { return futureMap; } + public void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + futureMap.compute(flushPos, + (key, previous) -> previous == null ? flushFuture : + previous.thenCombine(flushFuture, (prev, curr) -> curr)); + } + + + public void waitOnFlushFutures() throws InterruptedException, ExecutionException { + // wait for all the transactions to complete + CompletableFuture.allOf(futureMap.values().toArray( + new CompletableFuture[0])).get(); + } + @Override public void cleanup() { super.cleanup(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 0abc2274bf08..c8bfaf3e1bce 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -44,6 +44,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; @@ -75,10 +77,11 @@ public ECBlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier executorServiceSupplier ) throws IOException { super(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, executorServiceSupplier); // In EC stream, there will be only one node in pipeline. this.datanodeDetails = pipeline.getClosestNode(); } @@ -86,13 +89,14 @@ public ECBlockOutputStream( @Override public void write(byte[] b, int off, int len) throws IOException { this.currentChunkRspFuture = - writeChunkToContainer(ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len))); + writeChunkToContainer( + ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len)), false); updateWrittenDataLength(len); } public CompletableFuture write( ByteBuffer buff) throws IOException { - return writeChunkToContainer(ChunkBuffer.wrap(buff)); + return writeChunkToContainer(ChunkBuffer.wrap(buff), false); } public CompletableFuture token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { super(blockID, xceiverClientManager, pipeline, - bufferPool, config, token, clientMetrics, streamBufferArgs); + bufferPool, config, token, clientMetrics, streamBufferArgs, blockOutputStreamResourceProvider); this.commitWatcher = new CommitWatcher(bufferPool, getXceiverClient()); } @@ -110,16 +113,13 @@ void updateCommitInfo(XceiverClientReply reply, List buffers) { } @Override - void putFlushFuture(long flushPos, - CompletableFuture flushFuture) { - commitWatcher.getFutureMap().put(flushPos, flushFuture); + void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + commitWatcher.putFlushFuture(flushPos, flushFuture); } @Override void waitOnFlushFutures() throws InterruptedException, ExecutionException { - // wait for all the transactions to complete - CompletableFuture.allOf(commitWatcher.getFutureMap().values().toArray( - new CompletableFuture[0])).get(); + commitWatcher.waitOnFlushFutures(); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java index bd100214ae48..6f8a744f762d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactory.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -48,8 +49,9 @@ public interface BlockInputStreamFactory { */ BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java index b9233f42d555..7edc498cf676 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -76,16 +77,18 @@ public BlockInputStreamFactoryImpl(ByteBufferPool byteBufferPool, */ public BlockExtendedInputStream create(ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (repConfig.getReplicationType().equals(HddsProtos.ReplicationType.EC)) { return new ECBlockInputStreamProxy((ECReplicationConfig)repConfig, - blockInfo, verifyChecksum, xceiverFactory, refreshFunction, - ecBlockStreamFactory); + blockInfo, xceiverFactory, refreshFunction, + ecBlockStreamFactory, config); } else { - return new BlockInputStream(blockInfo, pipeline, token, verifyChecksum, xceiverFactory, - refreshFunction); + return new BlockInputStream(blockInfo, + pipeline, token, xceiverFactory, refreshFunction, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java index 174fd8c75f6d..19ce31c52932 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java @@ -39,6 +39,11 @@ public void write(@Nonnull byte[] byteArray) throws IOException { write(ByteBuffer.wrap(byteArray)); } + @Override + public void write(@Nonnull byte[] byteArray, int off, int len) throws IOException { + write(ByteBuffer.wrap(byteArray), off, len); + } + @Override public void write(int b) throws IOException { write(new byte[]{(byte) b}); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java index e85bf27d530f..8dc07f129b9c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStream.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -60,7 +61,6 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private final int ecChunkSize; private final long stripeSize; private final BlockInputStreamFactory streamFactory; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -75,7 +75,7 @@ public class ECBlockInputStream extends BlockExtendedInputStream { private long position = 0; private boolean closed = false; private boolean seeked = false; - + private OzoneClientConfig config; protected ECReplicationConfig getRepConfig() { return repConfig; } @@ -108,13 +108,13 @@ protected int availableDataLocations(int expectedLocations) { } public ECBlockInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - BlockInputStreamFactory streamFactory) { + BlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; this.ecChunkSize = repConfig.getEcChunkSize(); - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.streamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; @@ -123,6 +123,7 @@ public ECBlockInputStream(ECReplicationConfig repConfig, this.dataLocations = new DatanodeDetails[repConfig.getRequiredNodes()]; this.blockStreams = new BlockExtendedInputStream[repConfig.getRequiredNodes()]; + this.config = config; this.stripeSize = (long)ecChunkSize * repConfig.getData(); setBlockLocations(this.blockInfo.getPipeline()); @@ -191,8 +192,9 @@ protected BlockExtendedInputStream getOrOpenStream(int locationIndex) { StandaloneReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE), blkInfo, pipeline, - blockInfo.getToken(), verifyChecksum, xceiverClientFactory, - ecPipelineRefreshFunction(locationIndex + 1, refreshFunction)); + blockInfo.getToken(), xceiverClientFactory, + ecPipelineRefreshFunction(locationIndex + 1, refreshFunction), + config); blockStreams[locationIndex] = stream; LOG.debug("{}: created stream [{}]: {}", this, locationIndex, stream); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java index 0e2ef22c1e94..66e7a31337a6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactory.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -51,7 +52,8 @@ public interface ECBlockInputStreamFactory { */ BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, - Function refreshFunction); + Function refreshFunction, + OzoneClientConfig config); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java index 36b6539ea817..01d0b0a7b7e8 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamFactoryImpl.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -74,16 +75,17 @@ private ECBlockInputStreamFactoryImpl(BlockInputStreamFactory streamFactory, */ public BlockExtendedInputStream create(boolean missingLocations, List failedLocations, ReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { if (missingLocations) { // We create the reconstruction reader ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - (ECReplicationConfig)repConfig, blockInfo, verifyChecksum, + (ECReplicationConfig)repConfig, blockInfo, xceiverFactory, refreshFunction, inputStreamFactory, - byteBufferPool, ecReconstructExecutorSupplier.get()); + byteBufferPool, ecReconstructExecutorSupplier.get(), config); if (failedLocations != null) { sis.addFailedDatanodes(failedLocations); } @@ -92,7 +94,8 @@ public BlockExtendedInputStream create(boolean missingLocations, } else { // Otherwise create the more efficient non-reconstruction reader return new ECBlockInputStream((ECReplicationConfig)repConfig, blockInfo, - verifyChecksum, xceiverFactory, refreshFunction, inputStreamFactory); + xceiverFactory, refreshFunction, inputStreamFactory, + config); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java index 973561616f7b..68a0337cef1d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockInputStreamProxy.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -49,7 +50,6 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { LoggerFactory.getLogger(ECBlockInputStreamProxy.class); private final ECReplicationConfig repConfig; - private final boolean verifyChecksum; private final XceiverClientFactory xceiverClientFactory; private final Function refreshFunction; private final BlockLocationInfo blockInfo; @@ -59,6 +59,7 @@ public class ECBlockInputStreamProxy extends BlockExtendedInputStream { private boolean reconstructionReader = false; private List failedLocations = new ArrayList<>(); private boolean closed = false; + private OzoneClientConfig config; /** * Given the ECReplicationConfig and the block length, calculate how many @@ -97,16 +98,17 @@ public static int availableDataLocations(Pipeline pipeline, } public ECBlockInputStreamProxy(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, - ECBlockInputStreamFactory streamFactory) { + ECBlockInputStreamFactory streamFactory, + OzoneClientConfig config) { this.repConfig = repConfig; - this.verifyChecksum = verifyChecksum; this.blockInfo = blockInfo; this.ecBlockInputStreamFactory = streamFactory; this.xceiverClientFactory = xceiverClientFactory; this.refreshFunction = refreshFunction; + this.config = config; setReaderType(); createBlockReader(); @@ -124,8 +126,8 @@ private void createBlockReader() { .incECReconstructionTotal(); } blockReader = ecBlockInputStreamFactory.create(reconstructionReader, - failedLocations, repConfig, blockInfo, verifyChecksum, - xceiverClientFactory, refreshFunction); + failedLocations, repConfig, blockInfo, + xceiverClientFactory, refreshFunction, config); } @Override diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java index 142825cb1206..31f94e0acad6 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockReconstructedStripeInputStream.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -152,14 +153,15 @@ public class ECBlockReconstructedStripeInputStream extends ECBlockInputStream { @SuppressWarnings("checkstyle:ParameterNumber") public ECBlockReconstructedStripeInputStream(ECReplicationConfig repConfig, - BlockLocationInfo blockInfo, boolean verifyChecksum, + BlockLocationInfo blockInfo, XceiverClientFactory xceiverClientFactory, Function refreshFunction, BlockInputStreamFactory streamFactory, ByteBufferPool byteBufferPool, - ExecutorService ecReconstructExecutor) { - super(repConfig, blockInfo, verifyChecksum, xceiverClientFactory, - refreshFunction, streamFactory); + ExecutorService ecReconstructExecutor, + OzoneClientConfig config) { + super(repConfig, blockInfo, xceiverClientFactory, + refreshFunction, streamFactory, config); this.byteBufferPool = byteBufferPool; this.executor = ecReconstructExecutor; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java new file mode 100644 index 000000000000..0dd29cb50a45 --- /dev/null +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.jupiter.api.Test; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class TestOzoneClientConfig { + + @Test + void missingSizeSuffix() { + final int bytes = 1024; + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt("ozone.client.bytes.per.checksum", bytes); + + OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); + + assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum()); + } +} diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java index ca3199d8acfb..2987a9b6136f 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; @@ -45,14 +46,14 @@ class DummyBlockInputStream extends BlockInputStream { long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, Function refreshFunction, List chunkList, - Map chunks) { + Map chunks, + OzoneClientConfig config) { super(new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockId).setLength(blockLen)), - pipeline, token, verifyChecksum, - xceiverClientManager, refreshFunction); + pipeline, token, + xceiverClientManager, refreshFunction, config); this.chunkDataMap = chunks; this.chunks = chunkList; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java index d66c76dcddcb..172e62887bdd 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; @@ -52,12 +53,12 @@ final class DummyBlockInputStreamWithRetry long blockLen, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverClientManager, List chunkList, Map chunkMap, - AtomicBoolean isRerfreshed, IOException ioException) { - super(blockId, blockLen, pipeline, token, verifyChecksum, + AtomicBoolean isRerfreshed, IOException ioException, + OzoneClientConfig config) { + super(blockId, blockLen, pipeline, token, xceiverClientManager, blockID -> { isRerfreshed.set(true); try { @@ -69,7 +70,7 @@ final class DummyBlockInputStreamWithRetry throw new RuntimeException(e); } - }, chunkList, chunkMap); + }, chunkList, chunkMap, config); this.ioException = ioException; } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 4db569b7c07a..0012d691f92d 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -22,9 +22,11 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -34,6 +36,7 @@ import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.thirdparty.io.grpc.Status; import org.apache.ratis.thirdparty.io.grpc.StatusException; import org.junit.jupiter.api.BeforeEach; @@ -42,6 +45,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.stubbing.OngoingStubbing; +import org.slf4j.event.Level; import java.io.EOFException; import java.io.IOException; @@ -58,6 +62,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -86,6 +91,8 @@ public class TestBlockInputStream { private Function refreshFunction; + private OzoneConfiguration conf = new OzoneConfiguration(); + @BeforeEach @SuppressWarnings("unchecked") public void setup() throws Exception { @@ -93,10 +100,12 @@ public void setup() throws Exception { BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); checksum = new Checksum(ChecksumType.NONE, CHUNK_SIZE); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); Pipeline pipeline = MockPipeline.createSingleNodePipeline(); blockStream = new DummyBlockInputStream(blockID, blockSize, pipeline, null, - false, null, refreshFunction, chunks, chunkDataMap); + null, refreshFunction, chunks, chunkDataMap, clientConfig); } /** @@ -257,18 +266,25 @@ public void testSeekAndRead() throws Exception { @Test public void testRefreshPipelineFunction() throws Exception { + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(BlockInputStream.LOG); + GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); BlockID blockID = new BlockID(new ContainerBlockID(1, 1)); AtomicBoolean isRefreshed = new AtomicBoolean(); createChunkList(5); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (BlockInputStream blockInputStreamWithRetry = new DummyBlockInputStreamWithRetry(blockID, blockSize, MockPipeline.createSingleNodePipeline(), null, - false, null, chunks, chunkDataMap, isRefreshed, null)) { + null, chunks, chunkDataMap, isRefreshed, null, + clientConfig)) { assertFalse(isRefreshed.get()); seekAndVerify(50); byte[] b = new byte[200]; blockInputStreamWithRetry.read(b, 0, 200); + assertThat(logCapturer.getOutput()).contains("Retry read after"); assertTrue(isRefreshed.get()); } } @@ -348,8 +364,10 @@ private static ChunkInputStream throwingChunkInputStream(IOException ex, private BlockInputStream createSubject(BlockID blockID, Pipeline pipeline, ChunkInputStream stream) { - return new DummyBlockInputStream(blockID, blockSize, pipeline, null, false, - null, refreshFunction, chunks, null) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); + return new DummyBlockInputStream(blockID, blockSize, pipeline, null, + null, refreshFunction, chunks, null, clientConfig) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; @@ -401,9 +419,12 @@ public void testRefreshOnReadFailureAfterUnbuffer(IOException ex) .thenReturn(blockLocationInfo); when(blockLocationInfo.getPipeline()).thenReturn(newPipeline); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); BlockInputStream subject = new BlockInputStream( new BlockLocationInfo(new BlockLocationInfo.Builder().setBlockID(blockID).setLength(blockSize)), - pipeline, null, false, clientFactory, refreshFunction) { + pipeline, null, clientFactory, refreshFunction, + clientConfig) { @Override protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 9b061f5392d3..d06c9cf684f4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -47,6 +47,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import static java.util.concurrent.Executors.newFixedThreadPool; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -108,7 +109,9 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) bufferPool, config, null, - ContainerClientMetrics.acquire(), streamBufferArgs); + ContainerClientMetrics.acquire(), + streamBufferArgs, + () -> newFixedThreadPool(10)); } /** diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java index 41bf46a8ea20..049037bc4dce 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/ECStreamTestUtil.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -258,9 +259,10 @@ public synchronized void setFailIndexes(Integer... fail) { public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, - Token token, boolean verifyChecksum, + Token token, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + Function refreshFunction, + OzoneClientConfig config) { int repInd = currentPipeline.getReplicaIndex(pipeline.getNodes().get(0)); TestBlockInputStream stream = new TestBlockInputStream( diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java index cf3f4f13ef94..623f7a4f86f1 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestBlockInputStreamFactoryImpl.java @@ -21,9 +21,11 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -43,6 +45,8 @@ */ public class TestBlockInputStreamFactoryImpl { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testNonECGivesBlockInputStream() { BlockInputStreamFactory factory = new BlockInputStreamFactoryImpl(); @@ -52,9 +56,12 @@ public void testNonECGivesBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 3, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(BlockInputStream.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); @@ -69,9 +76,12 @@ public void testECGivesECBlockInputStream() { BlockLocationInfo blockInfo = createKeyLocationInfo(repConfig, 5, 1024 * 1024 * 10); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); BlockExtendedInputStream stream = factory.create(repConfig, blockInfo, blockInfo.getPipeline(), - blockInfo.getToken(), true, null, null); + blockInfo.getToken(), null, null, + clientConfig); assertInstanceOf(ECBlockInputStreamProxy.class, stream); assertEquals(stream.getBlockID(), blockInfo.getBlockID()); assertEquals(stream.getLength(), blockInfo.getLength()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java index bd34e7546c12..60974b35a95c 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStream.java @@ -20,9 +20,11 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -59,6 +61,7 @@ public class TestECBlockInputStream { private ECReplicationConfig repConfig; private TestBlockInputStreamFactory streamFactory; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -72,15 +75,19 @@ public void testSufficientLocations() { // EC-3-2, 5MB block, so all 3 data locations are needed BlockLocationInfo keyInfo = ECStreamTestUtil .createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } // EC-3-2, very large block, so all 3 data locations are needed keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5000 * ONEMB); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -90,7 +97,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, ONEMB - 1, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertTrue(ecb.hasSufficientLocations()); } @@ -100,7 +108,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 1); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } @@ -112,7 +121,8 @@ keyInfo, true, null, null, new TestBlockInputStreamFactory())) { dnMap.put(MockDatanodeDetails.randomDatanodeDetails(), 5); keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5 * ONEMB, dnMap); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, new TestBlockInputStreamFactory())) { + keyInfo, null, null, new TestBlockInputStreamFactory(), + clientConfig)) { assertFalse(ecb.hasSufficientLocations()); } } @@ -124,8 +134,11 @@ public void testCorrectBlockSizePassedToBlockStreamLessThanCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB - 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); // We expect only 1 block stream and it should have a length passed of // ONEMB - 100. @@ -141,8 +154,11 @@ public void testCorrectBlockSizePassedToBlockStreamTwoCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -157,8 +173,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 2 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -174,8 +193,11 @@ public void testCorrectBlockSizePassedToBlockStreamThreeFullAndPartialStripe() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 10 * ONEMB + 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(4 * ONEMB, streams.get(0).getLength()); @@ -191,8 +213,11 @@ public void testCorrectBlockSizePassedToBlockStreamSingleFullCell() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(ONEMB, streams.get(0).getLength()); @@ -206,8 +231,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 9 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.read(buf); List streams = streamFactory.getBlockStreams(); assertEquals(3 * ONEMB, streams.get(0).getLength()); @@ -220,8 +248,11 @@ public void testCorrectBlockSizePassedToBlockStreamSeveralFullCells() public void testSimpleRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -243,8 +274,11 @@ public void testSimpleRead() throws IOException { public void testSimpleReadUnderOneChunk() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 1, ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -262,8 +296,11 @@ public void testSimpleReadUnderOneChunk() throws IOException { public void testReadPastEOF() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 50); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ByteBuffer buf = ByteBuffer.allocate(100); @@ -281,8 +318,11 @@ public void testReadCrossingMultipleECChunkBounds() throws IOException { 100); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // EC Chunk size is 100 and 3-2. Create a byte buffer to read 3.5 chunks, // so 350 @@ -316,8 +356,11 @@ public void testSeekPastBlockLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { assertThrows(EOFException.class, () -> ecb.seek(1000)); } } @@ -328,8 +371,11 @@ public void testSeekToLength() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 100); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // When seek more than the length, should throw EOFException. assertThrows(EOFException.class, () -> ecb.seek(101)); } @@ -341,8 +387,11 @@ public void testSeekToLengthZeroLengthBlock() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 0); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(0); assertEquals(0, ecb.getPos()); assertEquals(0, ecb.getRemaining()); @@ -355,8 +404,11 @@ public void testSeekToValidPosition() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { ecb.seek(ONEMB - 1); assertEquals(ONEMB - 1, ecb.getPos()); assertEquals(ONEMB * 4 + 1, ecb.getRemaining()); @@ -384,8 +436,11 @@ public void testErrorReadingBlockReportsBadLocation() throws IOException { ONEMB); BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 5, 5 * ONEMB); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -415,8 +470,11 @@ public void testNoErrorIfSpareLocationToRead() throws IOException { BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, 8 * ONEMB, datanodes); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { // Read a full stripe to ensure all streams are created in the stream // factory ByteBuffer buf = ByteBuffer.allocate(3 * ONEMB); @@ -479,8 +537,11 @@ public void testEcPipelineRefreshFunction() { return blockLocation; }; + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (ECBlockInputStream ecb = new ECBlockInputStream(repConfig, - keyInfo, true, null, null, streamFactory)) { + keyInfo, null, null, streamFactory, + clientConfig)) { Pipeline pipeline = ecb.ecPipelineRefreshFunction(3, refreshFunction) .apply(blockID) @@ -513,8 +574,9 @@ public synchronized List getBlockStreams() { public synchronized BlockExtendedInputStream create( ReplicationConfig repConfig, BlockLocationInfo blockInfo, Pipeline pipeline, Token token, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + XceiverClientFactory xceiverFactory, + Function refreshFunction, + OzoneClientConfig config) { TestBlockInputStream stream = new TestBlockInputStream( blockInfo.getBlockID(), blockInfo.getLength(), (byte)blockStreams.size()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java index 97bf71c204ad..ca0b9710a960 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockInputStreamProxy.java @@ -20,7 +20,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -52,6 +54,7 @@ public class TestECBlockInputStreamProxy { private long randomSeed; private ThreadLocalRandom random = ThreadLocalRandom.current(); private SplittableRandom dataGenerator; + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() { @@ -342,8 +345,11 @@ private void resetAndAdvanceDataGenerator(long position) { private ECBlockInputStreamProxy createBISProxy(ECReplicationConfig rConfig, BlockLocationInfo blockInfo) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); return new ECBlockInputStreamProxy( - rConfig, blockInfo, true, null, null, streamFactory); + rConfig, blockInfo, null, null, streamFactory, + clientConfig); } private static class TestECBlockInputStreamFactory @@ -372,8 +378,9 @@ public List getFailedLocations() { public BlockExtendedInputStream create(boolean missingLocations, List failedDatanodes, ReplicationConfig repConfig, BlockLocationInfo blockInfo, - boolean verifyChecksum, XceiverClientFactory xceiverFactory, - Function refreshFunction) { + XceiverClientFactory xceiverFactory, + Function refreshFunction, + OzoneClientConfig config) { this.failedLocations = failedDatanodes; ByteBuffer wrappedBuffer = ByteBuffer.wrap(data.array(), 0, data.capacity()); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java index 0425f6943a48..6b60bef66af4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedInputStream.java @@ -19,7 +19,9 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -54,6 +56,7 @@ public class TestECBlockReconstructedInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); + private OzoneConfiguration conf = new OzoneConfiguration(); @BeforeEach public void setup() throws IOException { @@ -74,8 +77,11 @@ private ECBlockReconstructedStripeInputStream createStripeInputStream( BlockLocationInfo keyInfo = ECStreamTestUtil.createKeyInfo(repConfig, blockLength, dnMap); streamFactory.setCurrentPipeline(keyInfo.getPipeline()); - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, + null, null, streamFactory, bufferPool, ecReconstructExecutor, + clientConfig); } @Test diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java index f7a4bb0643ec..e526b12a5142 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java @@ -20,8 +20,10 @@ import com.google.common.collect.ImmutableSet; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.io.ByteBufferPool; import org.apache.hadoop.io.ElasticByteBufferPool; @@ -73,7 +75,8 @@ public class TestECBlockReconstructedStripeInputStream { private ByteBufferPool bufferPool = new ElasticByteBufferPool(); private ExecutorService ecReconstructExecutor = Executors.newFixedThreadPool(3); - + private OzoneConfiguration conf = new OzoneConfiguration(); + static List> recoveryCases() { // TODO better name List> params = new ArrayList<>(); params.add(emptySet()); // non-recovery @@ -808,8 +811,11 @@ public void testFailedLocationsAreNotRead() throws IOException { private ECBlockReconstructedStripeInputStream createInputStream( BlockLocationInfo keyInfo) { - return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, true, - null, null, streamFactory, bufferPool, ecReconstructExecutor); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new ECBlockReconstructedStripeInputStream(repConfig, keyInfo, + null, null, streamFactory, bufferPool, ecReconstructExecutor, + clientConfig); } private void addDataStreamsToFactory(ByteBuffer[] data, ByteBuffer[] parity) { diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 20dce15d4d1b..2e0c96ac1d64 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -135,10 +135,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.commons - commons-pool2 - org.bouncycastle bcpkix-jdk18on @@ -181,6 +177,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> grpc-api ${io.grpc.version} compile + + + com.google.code.findbugs + jsr305 + + @@ -200,11 +202,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java index 30f9df597b51..e35d20d53e15 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DatanodeVersion.java @@ -31,6 +31,8 @@ public enum DatanodeVersion implements ComponentVersion { DEFAULT_VERSION(0, "Initial version"), SEPARATE_RATIS_PORTS_AVAILABLE(1, "Version with separated Ratis port."), + COMBINED_PUTBLOCK_WRITECHUNK_RPC(2, "WriteChunk can optionally support " + + "a PutBlock request"), FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 787f023df2ea..d0c31bf2884f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -338,6 +338,9 @@ private HddsConfigKeys() { HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL = "hdds.security.client.scm.secretkey.datanode.protocol.acl"; + public static final String OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL = + "ozone.security.reconfigure.protocol.acl"; + // Determines if the Container Chunk Manager will write user data to disk // Set to false only for specific performance tests public static final String HDDS_CONTAINER_PERSISTDATA = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index ee1c9669a1b1..e1188f1cd1e6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -424,6 +424,7 @@ public static boolean isReadOnly( case ListContainer: case ListChunk: case GetCommittedBlockLength: + case Echo: return true; case CloseContainer: case WriteChunk: diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index 25ea315af284..9469fee7e284 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -154,6 +154,14 @@ public String getReplication() { + chunkKB(); } + /** Similar to {@link #getReplication()}, but applies to proto structure, without any validation. */ + public static String toString(HddsProtos.ECReplicationConfig proto) { + return proto.getCodec() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getData() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getParity() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getEcChunkSize(); + } + public HddsProtos.ECReplicationConfig toProto() { return HddsProtos.ECReplicationConfig.newBuilder() .setData(data) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 69cce8db6d6b..e324a63d3ba0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.server.RaftServerConfigKeys; import static java.util.Collections.unmodifiableSortedSet; @@ -323,7 +324,67 @@ private static void addDeprecatedKeys() { new DeprecationDelta("ozone.scm.chunk.layout", ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY), new DeprecationDelta("hdds.datanode.replication.work.dir", - OZONE_CONTAINER_COPY_WORKDIR) + OZONE_CONTAINER_COPY_WORKDIR), + new DeprecationDelta("dfs.container.chunk.write.sync", + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + new DeprecationDelta("dfs.container.ipc", + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT), + new DeprecationDelta("dfs.container.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.admin.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT), + new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + new DeprecationDelta("dfs.container.ratis.datastream.enabled", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED), + new DeprecationDelta("dfs.container.ratis.datastream.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT), + new DeprecationDelta("dfs.container.ratis.datastream.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.enabled", + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY), + new DeprecationDelta("dfs.container.ratis.ipc", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT), + new DeprecationDelta("dfs.container.ratis.ipc.random.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.log.purge.gap", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP), + new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.num.container.op.executors", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + new DeprecationDelta("dfs.container.ratis.replication.level", + ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + new DeprecationDelta("dfs.container.ratis.rpc.type", + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.size", + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.server.port", + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.snapshot.threshold", + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java index c25c0a40c53e..a367cfbdc061 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java @@ -57,9 +57,9 @@ final class Fixed implements SpaceUsageSource { private final long available; private final long used; - Fixed(long capacity, long available, long used) { + public Fixed(long capacity, long available, long used) { this.capacity = capacity; - this.available = available; + this.available = Math.max(Math.min(available, capacity - used), 0); this.used = used; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 5b6fb6fe9b81..49f690f7b8c1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -86,57 +86,40 @@ public static Codec getCodec() { private String ipAddress; private String hostName; - private List ports; + private final List ports; private String certSerialId; private String version; private long setupTime; private String revision; private String buildDate; private volatile HddsProtos.NodeOperationalState persistedOpState; - private volatile long persistedOpStateExpiryEpochSec = 0; + private volatile long persistedOpStateExpiryEpochSec; private int initialVersion; private int currentVersion; - /** - * Constructs DatanodeDetails instance. DatanodeDetails.Builder is used - * for instantiating DatanodeDetails. - * @param uuid DataNode's UUID - * @param ipAddress IP Address of this DataNode - * @param hostName DataNode's hostname - * @param networkLocation DataNode's network location path - * @param ports Ports used by the DataNode - * @param certSerialId serial id from SCM issued certificate. - * @param version DataNode's version - * @param setupTime the setup time of DataNode - * @param revision DataNodes's revision - * @param buildDate DataNodes's build timestamp - * @param persistedOpState Operational State stored on DN. - * @param persistedOpStateExpiryEpochSec Seconds after the epoch the stored - * state should expire. - */ - @SuppressWarnings("parameternumber") - private DatanodeDetails(UUID uuid, String ipAddress, String hostName, - String networkLocation, List ports, String certSerialId, - String version, long setupTime, String revision, String buildDate, - HddsProtos.NodeOperationalState persistedOpState, - long persistedOpStateExpiryEpochSec, - int initialVersion, int currentVersion) { - super(hostName, networkLocation, NetConstants.NODE_COST_DEFAULT); - this.uuid = uuid; - this.uuidString = uuid.toString(); + private DatanodeDetails(Builder b) { + super(b.hostName, b.networkLocation, NetConstants.NODE_COST_DEFAULT); + uuid = b.id; + uuidString = uuid.toString(); threadNamePrefix = HddsUtils.threadNamePrefix(uuidString); - this.ipAddress = ipAddress; - this.hostName = hostName; - this.ports = ports; - this.certSerialId = certSerialId; - this.version = version; - this.setupTime = setupTime; - this.revision = revision; - this.buildDate = buildDate; - this.persistedOpState = persistedOpState; - this.persistedOpStateExpiryEpochSec = persistedOpStateExpiryEpochSec; - this.initialVersion = initialVersion; - this.currentVersion = currentVersion; + ipAddress = b.ipAddress; + hostName = b.hostName; + ports = b.ports; + certSerialId = b.certSerialId; + version = b.version; + setupTime = b.setupTime; + revision = b.revision; + buildDate = b.buildDate; + persistedOpState = b.persistedOpState; + persistedOpStateExpiryEpochSec = b.persistedOpStateExpiryEpochSec; + initialVersion = b.initialVersion; + currentVersion = b.currentVersion; + if (b.networkName != null) { + setNetworkName(b.networkName); + } + if (b.level > 0) { + setLevel(b.level); + } } public DatanodeDetails(DatanodeDetails datanodeDetails) { @@ -149,6 +132,7 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) { this.ipAddress = datanodeDetails.ipAddress; this.hostName = datanodeDetails.hostName; this.ports = datanodeDetails.ports; + this.certSerialId = datanodeDetails.certSerialId; this.setNetworkName(datanodeDetails.getNetworkName()); this.setParent(datanodeDetails.getParent()); this.version = datanodeDetails.version; @@ -364,6 +348,9 @@ public static DatanodeDetails.Builder newBuilder( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } + if (datanodeDetailsProto.hasLevel()) { + builder.setLevel(datanodeDetailsProto.getLevel()); + } if (datanodeDetailsProto.hasPersistedOpState()) { builder.setPersistedOpState(datanodeDetailsProto.getPersistedOpState()); } @@ -371,6 +358,9 @@ public static DatanodeDetails.Builder newBuilder( builder.setPersistedOpStateExpiry( datanodeDetailsProto.getPersistedOpStateExpiry()); } + if (datanodeDetailsProto.hasCurrentVersion()) { + builder.setCurrentVersion(datanodeDetailsProto.getCurrentVersion()); + } return builder; } @@ -456,6 +446,9 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( if (!Strings.isNullOrEmpty(getNetworkLocation())) { builder.setNetworkLocation(getNetworkLocation()); } + if (getLevel() > 0) { + builder.setLevel(getLevel()); + } if (persistedOpState != null) { builder.setPersistedOpState(persistedOpState); } @@ -475,6 +468,8 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( } } + builder.setCurrentVersion(currentVersion); + return builder; } @@ -505,6 +500,7 @@ public ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() { } /** + * Note: Datanode initial version is not passed to the client due to no use case. See HDDS-9884 * @return the version this datanode was initially created with */ public int getInitialVersion() { @@ -585,6 +581,7 @@ public static final class Builder { private String hostName; private String networkName; private String networkLocation; + private int level; private List ports; private String certSerialId; private String version; @@ -616,6 +613,7 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.hostName = details.getHostName(); this.networkName = details.getNetworkName(); this.networkLocation = details.getNetworkLocation(); + this.level = details.getLevel(); this.ports = details.getPorts(); this.certSerialId = details.getCertSerialId(); this.version = details.getVersion(); @@ -683,6 +681,11 @@ public Builder setNetworkLocation(String loc) { return this; } + public Builder setLevel(int level) { + this.level = level; + return this; + } + /** * Adds a DataNode Port. * @@ -800,14 +803,7 @@ public DatanodeDetails build() { if (networkLocation == null) { networkLocation = NetConstants.DEFAULT_RACK; } - DatanodeDetails dn = new DatanodeDetails(id, ipAddress, hostName, - networkLocation, ports, certSerialId, version, setupTime, revision, - buildDate, persistedOpState, persistedOpStateExpiryEpochSec, - initialVersion, currentVersion); - if (networkName != null) { - dn.setNetworkName(networkName); - } - return dn; + return new DatanodeDetails(this); } } @@ -854,9 +850,6 @@ public enum Name { /** * Private constructor for constructing Port object. Use * DatanodeDetails#newPort to create a new Port object. - * - * @param name - * @param value */ private Port(Name name, Integer value) { this.name = name; @@ -1011,4 +1004,12 @@ public String getBuildDate() { public void setBuildDate(String date) { this.buildDate = date; } + + @Override + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + return HddsProtos.NetworkNode.newBuilder() + .setDatanodeDetails(toProtoBuilder(clientVersion).build()) + .build(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b31..bcea4d0193bd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -234,8 +234,8 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } public static BiFunction newRaftClient( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 42a74dd12c2e..b7f1d1e61500 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,95 +41,95 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = "hdds.container.ratis.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = "hdds.container.ratis.rpc.type"; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME - = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = "hdds.container.ratis.replication.level"; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = "hdds.container.ratis.num.container.op.executors"; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + "hdds.container.ratis.segment.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + "hdds.container.ratis.segment.preallocated.size"; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + "hdds.container.ratis.statemachinedata.sync.retries"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = + "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.queue.num-elements"; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.queue.byte-limit"; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + "hdds.container.ratis.log.appender.queue.num-elements"; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - "dfs.container.ratis.leader.pending.bytes.limit"; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + "hdds.container.ratis.leader.pending.bytes.limit"; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + "hdds.ratis.snapshot.threshold"; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; @@ -146,8 +146,8 @@ public final class ScmConfigKeys { "32KB"; public static final String OZONE_CHUNK_LIST_INCREMENTAL = - "ozone.chunk.list.incremental"; - public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = false; + "ozone.incremental.chunk.list"; + public static final boolean OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT = true; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 402398e36c3f..6a46741a06ec 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -217,11 +217,12 @@ List queryNode(HddsProtos.NodeOperationalState opState, * Allows a list of hosts to be decommissioned. The hosts are identified * by their hostname and optionally port in the format foo.com:port. * @param hosts A list of hostnames, optionally with port + * @param force true to forcefully decommission Datanodes * @throws IOException * @return A list of DatanodeAdminError for any hosts which failed to * decommission */ - List decommissionNodes(List hosts) + List decommissionNodes(List hosts, boolean force) throws IOException; /** @@ -356,13 +357,20 @@ Map> getSafeModeRuleStatuses() /** * Start ContainerBalancer. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. @@ -452,4 +460,6 @@ StatusAndMessages queryUpgradeFinalizationProgress( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index b11428581e7b..6bf2d5500c88 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -90,31 +90,19 @@ public static Codec getCodec() { // container replica should have the same sequenceId. private long sequenceId; - @SuppressWarnings("parameternumber") - private ContainerInfo( - long containerID, - HddsProtos.LifeCycleState state, - PipelineID pipelineID, - long usedBytes, - long numberOfKeys, - long stateEnterTime, - String owner, - long deleteTransactionId, - long sequenceId, - ReplicationConfig repConfig, - Clock clock) { - this.containerID = ContainerID.valueOf(containerID); - this.pipelineID = pipelineID; - this.usedBytes = usedBytes; - this.numberOfKeys = numberOfKeys; - this.lastUsed = clock.instant(); - this.state = state; - this.stateEnterTime = Instant.ofEpochMilli(stateEnterTime); - this.owner = owner; - this.deleteTransactionId = deleteTransactionId; - this.sequenceId = sequenceId; - this.replicationConfig = repConfig; - this.clock = clock; + private ContainerInfo(Builder b) { + containerID = ContainerID.valueOf(b.containerID); + pipelineID = b.pipelineID; + usedBytes = b.used; + numberOfKeys = b.keys; + lastUsed = b.clock.instant(); + state = b.state; + stateEnterTime = Instant.ofEpochMilli(b.stateEnterTime); + owner = b.owner; + deleteTransactionId = b.deleteTransactionId; + sequenceId = b.sequenceId; + replicationConfig = b.replicationConfig; + clock = b.clock; } public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { @@ -445,9 +433,7 @@ public Builder setClock(Clock clock) { } public ContainerInfo build() { - return new ContainerInfo(containerID, state, pipelineID, - used, keys, stateEnterTime, owner, deleteTransactionId, - sequenceId, replicationConfig, clock); + return new ContainerInfo(this); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java index 7ac0401af117..5a1d8f90ea84 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java @@ -26,8 +26,8 @@ * contains a Pipeline and the key. */ public final class AllocatedBlock { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; + private final Pipeline pipeline; + private final ContainerBlockID containerBlockID; /** * Builder for AllocatedBlock. @@ -63,4 +63,14 @@ public Pipeline getPipeline() { public ContainerBlockID getBlockID() { return containerBlockID; } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder() + .setContainerBlockID(containerBlockID) + .setPipeline(pipeline); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java index c87d826d2529..6074e7da0afc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java @@ -20,6 +20,8 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines an inner node in a network topology. * An inner node represents network topology entities, such as data center, @@ -89,4 +91,16 @@ N newInnerNode(String name, String location, InnerNode parent, int level, */ Node getLeaf(int leafIndex, List excludedScopes, Collection excludedNodes, int ancestorGen); + + @Override + HddsProtos.NetworkNode toProtobuf(int clientVersion); + + boolean equals(Object o); + + int hashCode(); + + static InnerNode fromProtobuf( + HddsProtos.InnerNode innerNode) { + return InnerNodeImpl.fromProtobuf(innerNode); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index f2648f3d294c..332dddac25c9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -27,6 +27,7 @@ import java.util.Map; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,10 +48,10 @@ public InnerNodeImpl newInnerNode(String name, String location, } } - static final Factory FACTORY = new Factory(); + public static final Factory FACTORY = new Factory(); // a map of node's network name to Node for quick search and keep // the insert order - private final HashMap childrenMap = + private HashMap childrenMap = new LinkedHashMap(); // number of descendant leaves under this node private int numOfLeaves; @@ -66,6 +67,76 @@ protected InnerNodeImpl(String name, String location, InnerNode parent, super(name, location, parent, level, cost); } + /** + * Construct an InnerNode from its name, network location, level, cost, + * childrenMap and number of leaves. This constructor is used as part of + * protobuf deserialization. + */ + protected InnerNodeImpl(String name, String location, int level, int cost, + HashMap childrenMap, int numOfLeaves) { + super(name, location, null, level, cost); + this.childrenMap = childrenMap; + this.numOfLeaves = numOfLeaves; + } + + /** + * InnerNodeImpl Builder to help construct an InnerNodeImpl object from + * protobuf objects. + */ + public static class Builder { + private String name; + private String location; + private int cost; + private int level; + private HashMap childrenMap = new LinkedHashMap<>(); + private int numOfLeaves; + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setCost(int cost) { + this.cost = cost; + return this; + } + + public Builder setLevel(int level) { + this.level = level; + return this; + } + + public Builder setChildrenMap( + List childrenMapList) { + HashMap newChildrenMap = new LinkedHashMap<>(); + for (HddsProtos.ChildrenMap childrenMapProto : + childrenMapList) { + String networkName = childrenMapProto.hasNetworkName() ? + childrenMapProto.getNetworkName() : null; + Node node = childrenMapProto.hasNetworkNode() ? + Node.fromProtobuf(childrenMapProto.getNetworkNode()) : null; + newChildrenMap.put(networkName, node); + } + this.childrenMap = newChildrenMap; + return this; + } + + public Builder setNumOfLeaves(int numOfLeaves) { + this.numOfLeaves = numOfLeaves; + return this; + } + + public InnerNodeImpl build() { + return new InnerNodeImpl(name, location, level, cost, childrenMap, + numOfLeaves); + } + } + /** @return the number of children this node has */ private int getNumOfChildren() { return childrenMap.size(); @@ -77,6 +148,11 @@ public int getNumOfLeaves() { return numOfLeaves; } + /** @return a map of node's network name to Node. */ + public HashMap getChildrenMap() { + return childrenMap; + } + /** * @return number of its all nodes at level level. Here level is a * relative level. If level is 1, means node itself. If level is 2, means its @@ -390,14 +466,83 @@ public Node getLeaf(int leafIndex, List excludedScopes, } @Override - public boolean equals(Object to) { - if (to == null) { - return false; + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + + HddsProtos.InnerNode.Builder innerNode = + HddsProtos.InnerNode.newBuilder() + .setNumOfLeaves(numOfLeaves) + .setNodeTopology( + NodeImpl.toProtobuf(getNetworkName(), getNetworkLocation(), + getLevel(), getCost())); + + if (childrenMap != null && !childrenMap.isEmpty()) { + for (Map.Entry entry : childrenMap.entrySet()) { + if (entry.getValue() != null) { + HddsProtos.ChildrenMap childrenMapProto = + HddsProtos.ChildrenMap.newBuilder() + .setNetworkName(entry.getKey()) + .setNetworkNode(entry.getValue().toProtobuf(clientVersion)) + .build(); + innerNode.addChildrenMap(childrenMapProto); + } + } + } + innerNode.build(); + + HddsProtos.NetworkNode networkNode = + HddsProtos.NetworkNode.newBuilder() + .setInnerNode(innerNode).build(); + + return networkNode; + } + + public static InnerNode fromProtobuf(HddsProtos.InnerNode innerNode) { + InnerNodeImpl.Builder builder = new InnerNodeImpl.Builder(); + + if (innerNode.hasNodeTopology()) { + HddsProtos.NodeTopology nodeTopology = innerNode.getNodeTopology(); + + if (nodeTopology.hasName()) { + builder.setName(nodeTopology.getName()); + } + if (nodeTopology.hasLocation()) { + builder.setLocation(nodeTopology.getLocation()); + } + if (nodeTopology.hasLevel()) { + builder.setLevel(nodeTopology.getLevel()); + } + if (nodeTopology.hasCost()) { + builder.setCost(nodeTopology.getCost()); + } + } + + if (!innerNode.getChildrenMapList().isEmpty()) { + builder.setChildrenMap(innerNode.getChildrenMapList()); + } + if (innerNode.hasNumOfLeaves()) { + builder.setNumOfLeaves(innerNode.getNumOfLeaves()); } - if (this == to) { + + return builder.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { return true; } - return this.toString().equals(to.toString()); + if (o == null || getClass() != o.getClass()) { + return false; + } + InnerNodeImpl innerNode = (InnerNodeImpl) o; + return this.getNetworkName().equals(innerNode.getNetworkName()) && + this.getNetworkLocation().equals(innerNode.getNetworkLocation()) && + this.getLevel() == innerNode.getLevel() && + this.getCost() == innerNode.getCost() && + this.numOfLeaves == innerNode.numOfLeaves && + this.childrenMap.size() == innerNode.childrenMap.size() && + this.childrenMap.equals(innerNode.childrenMap); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 2dc86c1b6856..1f3d0f02e6de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -30,6 +30,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.NavigableMap; +import java.util.Objects; import java.util.TreeMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.locks.ReadWriteLock; @@ -75,6 +76,15 @@ public NetworkTopologyImpl(ConfigurationSource conf) { schemaManager.getCost(NetConstants.ROOT_LEVEL)); } + public NetworkTopologyImpl(String schemaFile, InnerNode clusterTree) { + schemaManager = NodeSchemaManager.getInstance(); + schemaManager.init(schemaFile); + maxLevel = schemaManager.getMaxLevel(); + shuffleOperation = Collections::shuffle; + factory = InnerNodeImpl.FACTORY; + this.clusterTree = clusterTree; + } + @VisibleForTesting public NetworkTopologyImpl(NodeSchemaManager manager, Consumer> shuffleOperation) { @@ -223,10 +233,10 @@ public boolean contains(Node node) { private boolean containsNode(Node node) { Node parent = node.getParent(); - while (parent != null && parent != clusterTree) { + while (parent != null && !Objects.equals(parent, clusterTree)) { parent = parent.getParent(); } - return parent == clusterTree; + return Objects.equals(parent, clusterTree); } /** @@ -240,7 +250,9 @@ public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) { } netlock.readLock().lock(); try { - return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen); + Node ancestor1 = node1.getAncestor(ancestorGen); + Node ancestor2 = node2.getAncestor(ancestorGen); + return Objects.equals(ancestor1, ancestor2); } finally { netlock.readLock().unlock(); } @@ -259,7 +271,7 @@ public boolean isSameParent(Node node1, Node node2) { try { node1 = node1.getParent(); node2 = node2.getParent(); - return node1 == node2; + return Objects.equals(node1, node2); } finally { netlock.readLock().unlock(); } @@ -704,8 +716,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, */ @Override public int getDistanceCost(Node node1, Node node2) { - if ((node1 != null && node1.equals(node2)) || - (node1 == null && node2 == null)) { + if (Objects.equals(node1, node2)) { return 0; } if (node1 == null || node2 == null) { @@ -726,8 +737,10 @@ public int getDistanceCost(Node node1, Node node2) { int cost = 0; netlock.readLock().lock(); try { - if ((node1.getAncestor(level1 - 1) != clusterTree) || - (node2.getAncestor(level2 - 1) != clusterTree)) { + Node ancestor1 = node1.getAncestor(level1 - 1); + Node ancestor2 = node2.getAncestor(level2 - 1); + if (!Objects.equals(ancestor1, clusterTree) || + !Objects.equals(ancestor2, clusterTree)) { LOG.debug("One of the nodes is outside of network topology"); return Integer.MAX_VALUE; } @@ -741,7 +754,7 @@ public int getDistanceCost(Node node1, Node node2) { level2--; cost += node2 == null ? 0 : node2.getCost(); } - while (node1 != null && node2 != null && node1 != node2) { + while (node1 != null && node2 != null && !Objects.equals(node1, node2)) { node1 = node1.getParent(); node2 = node2.getParent(); cost += node1 == null ? 0 : node1.getCost(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java index 9884888a1dd4..50f702cce08e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines a node in a network topology. * A node may be a leave representing a data node or an inner @@ -126,4 +129,21 @@ public interface Node { * @return true if this node is under a specific scope */ boolean isDescendant(String nodePath); + + default HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + return null; + } + + static Node fromProtobuf( + HddsProtos.NetworkNode networkNode) { + if (networkNode.hasDatanodeDetails()) { + return DatanodeDetails.getFromProtoBuf( + networkNode.getDatanodeDetails()); + } else if (networkNode.hasInnerNode()) { + return InnerNode.fromProtobuf(networkNode.getInnerNode()); + } else { + return null; + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java index e7a45f649b6e..e4d76cd3dbc7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.net; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; @@ -229,6 +230,20 @@ public boolean isDescendant(String nodePath) { NetUtils.addSuffix(nodePath)); } + public static HddsProtos.NodeTopology toProtobuf(String name, String location, + int level, int cost) { + + HddsProtos.NodeTopology.Builder nodeTopologyBuilder = + HddsProtos.NodeTopology.newBuilder() + .setName(name) + .setLocation(location) + .setLevel(level) + .setCost(cost); + + HddsProtos.NodeTopology nodeTopology = nodeTopologyBuilder.build(); + return nodeTopology; + } + @Override public boolean equals(Object to) { if (to == null) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index eecd79876720..fb37b214cad1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -62,6 +62,14 @@ public void init(ConfigurationSource conf) { String schemaFile = conf.get( ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); + loadSchemaFile(schemaFile); + } + + public void init(String schemaFile) { + loadSchemaFile(schemaFile); + } + + private void loadSchemaFile(String schemaFile) { NodeSchemaLoadResult result; try { result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 9d95cee48366..05d83a8b8b56 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -34,6 +34,8 @@ import java.util.UUID; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -76,10 +78,10 @@ public static Codec getCodec() { private final ReplicationConfig replicationConfig; private final PipelineState state; - private Map nodeStatus; - private Map replicaIndexes; + private final Map nodeStatus; + private final Map replicaIndexes; // nodes with ordered distance to client - private List nodesInOrder = new ArrayList<>(); + private final ImmutableList nodesInOrder; // Current reported Leader for the pipeline private UUID leaderId; // Timestamp for pipeline upon creation @@ -103,17 +105,17 @@ public static Codec getCodec() { * set to Instant.now when you crate the Pipeline object as part of * state change. */ - private Pipeline(PipelineID id, - ReplicationConfig replicationConfig, PipelineState state, - Map nodeStatus, UUID suggestedLeaderId) { - this.id = id; - this.replicationConfig = replicationConfig; - this.state = state; - this.nodeStatus = nodeStatus; - this.creationTimestamp = Instant.now(); - this.suggestedLeaderId = suggestedLeaderId; - this.replicaIndexes = new HashMap<>(); - this.stateEnterTime = Instant.now(); + private Pipeline(Builder b) { + id = b.id; + replicationConfig = b.replicationConfig; + state = b.state; + leaderId = b.leaderId; + suggestedLeaderId = b.suggestedLeaderId; + nodeStatus = b.nodeStatus; + nodesInOrder = b.nodesInOrder != null ? ImmutableList.copyOf(b.nodesInOrder) : ImmutableList.of(); + replicaIndexes = b.replicaIndexes != null ? ImmutableMap.copyOf(b.replicaIndexes) : ImmutableMap.of(); + creationTimestamp = b.creationTimestamp != null ? b.creationTimestamp : Instant.now(); + stateEnterTime = Instant.now(); } /** @@ -310,19 +312,6 @@ public boolean isOpen() { return state == PipelineState.OPEN; } - public boolean isAllocationTimeout() { - //TODO: define a system property to control the timeout value - return false; - } - - public void setNodesInOrder(List nodes) { - nodesInOrder.clear(); - if (null == nodes) { - return; - } - nodesInOrder.addAll(nodes); - } - public List getNodesInOrder() { if (nodesInOrder.isEmpty()) { LOG.debug("Nodes in order is empty, delegate to getNodes"); @@ -406,33 +395,39 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion) // To save the message size on wire, only transfer the node order based on // network topology - List nodes = nodesInOrder; - if (!nodes.isEmpty()) { - for (int i = 0; i < nodes.size(); i++) { + if (!nodesInOrder.isEmpty()) { + for (int i = 0; i < nodesInOrder.size(); i++) { Iterator it = nodeStatus.keySet().iterator(); for (int j = 0; j < nodeStatus.keySet().size(); j++) { - if (it.next().equals(nodes.get(i))) { + if (it.next().equals(nodesInOrder.get(i))) { builder.addMemberOrders(j); break; } } } if (LOG.isDebugEnabled()) { - LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodes); + LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodesInOrder); } } return builder.build(); } - static Pipeline getFromProtobufSetCreationTimestamp( + private static Pipeline getFromProtobufSetCreationTimestamp( HddsProtos.Pipeline proto) throws UnknownPipelineStateException { - final Pipeline pipeline = getFromProtobuf(proto); - // When SCM is restarted, set Creation time with current time. - pipeline.setCreationTimestamp(Instant.now()); - return pipeline; + return toBuilder(proto) + .setCreateTimestamp(Instant.now()) + .build(); } - public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + public Pipeline copyWithNodesInOrder(List nodes) { + return toBuilder().setNodesInOrder(nodes).build(); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + public static Builder toBuilder(HddsProtos.Pipeline pipeline) throws UnknownPipelineStateException { Preconditions.checkNotNull(pipeline, "Pipeline is null"); @@ -473,9 +468,13 @@ public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) .setReplicaIndexes(nodes) .setLeaderId(leaderId) .setSuggestedLeaderId(suggestedLeaderId) - .setNodesInOrder(pipeline.getMemberOrdersList()) - .setCreateTimestamp(pipeline.getCreationTimeStamp()) - .build(); + .setNodeOrder(pipeline.getMemberOrdersList()) + .setCreateTimestamp(pipeline.getCreationTimeStamp()); + } + + public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + throws UnknownPipelineStateException { + return toBuilder(pipeline).build(); } @Override @@ -529,10 +528,6 @@ public static Builder newBuilder(Pipeline pipeline) { return new Builder(pipeline); } - private void setReplicaIndexes(Map replicaIndexes) { - this.replicaIndexes = replicaIndexes; - } - /** * Builder class for Pipeline. */ @@ -546,7 +541,7 @@ public static class Builder { private UUID leaderId = null; private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; - private Map replicaIndexes = new HashMap<>(); + private Map replicaIndexes; public Builder() { } @@ -559,8 +554,8 @@ public Builder(Pipeline pipeline) { this.leaderId = pipeline.getLeaderId(); this.creationTimestamp = pipeline.getCreationTimestamp(); this.suggestedLeaderId = pipeline.getSuggestedLeaderId(); - this.replicaIndexes = new HashMap<>(); if (nodeStatus != null) { + replicaIndexes = new HashMap<>(); for (DatanodeDetails dn : nodeStatus.keySet()) { int index = pipeline.getReplicaIndex(dn); if (index > 0) { @@ -601,11 +596,22 @@ public Builder setNodes(List nodes) { return this; } - public Builder setNodesInOrder(List orders) { + public Builder setNodeOrder(List orders) { + // for build from ProtoBuf this.nodeOrder = orders; return this; } + public Builder setNodesInOrder(List nodes) { + this.nodesInOrder = new LinkedList<>(nodes); + return this; + } + + public Builder setCreateTimestamp(Instant instant) { + this.creationTimestamp = instant; + return this; + } + public Builder setCreateTimestamp(long createTimestamp) { this.creationTimestamp = Instant.ofEpochMilli(createTimestamp); return this; @@ -627,19 +633,8 @@ public Pipeline build() { Preconditions.checkNotNull(replicationConfig); Preconditions.checkNotNull(state); Preconditions.checkNotNull(nodeStatus); - Pipeline pipeline = - new Pipeline(id, replicationConfig, state, nodeStatus, - suggestedLeaderId); - pipeline.setLeaderId(leaderId); - // overwrite with original creationTimestamp - if (creationTimestamp != null) { - pipeline.setCreationTimestamp(creationTimestamp); - } - - pipeline.setReplicaIndexes(replicaIndexes); if (nodeOrder != null && !nodeOrder.isEmpty()) { - // This branch is for build from ProtoBuf List nodesWithOrder = new ArrayList<>(); for (int i = 0; i < nodeOrder.size(); i++) { int nodeIndex = nodeOrder.get(i); @@ -657,13 +652,10 @@ public Pipeline build() { LOG.debug("Deserialize nodesInOrder {} in pipeline {}", nodesWithOrder, id); } - pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null) { - // This branch is for pipeline clone - pipeline.setNodesInOrder(nodesInOrder); + nodesInOrder = nodesWithOrder; } - return pipeline; + return new Pipeline(this); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e8bddb42cfbd..90838366317f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -245,7 +245,7 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.Node queryNode(UUID uuid) throws IOException; - List decommissionNodes(List nodes) + List decommissionNodes(List nodes, boolean force) throws IOException; List recommissionNodes(List nodes) @@ -402,13 +402,20 @@ Map> getSafeModeRuleStatuses() * @return {@link StartContainerBalancerResponseProto} that contains the * start status and an optional message. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. @@ -474,4 +481,6 @@ List getListOfContainers( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index 9acb0e5c33a7..d3f39c023b73 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -21,6 +21,8 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.function.Function; + +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; @@ -38,10 +40,12 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ListBlockResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; @@ -210,6 +214,28 @@ public static ContainerCommandResponseProto getPutFileResponseSuccess( .build(); } + /** + * Gets a response for the WriteChunk RPC. + * @param msg - ContainerCommandRequestProto + * @return - ContainerCommandResponseProto + */ + public static ContainerCommandResponseProto getWriteChunkResponseSuccess( + ContainerCommandRequestProto msg, BlockData blockData) { + + WriteChunkResponseProto.Builder writeChunk = + WriteChunkResponseProto.newBuilder(); + if (blockData != null) { + writeChunk.setCommittedBlockLength( + getCommittedBlockLengthResponseBuilder( + blockData.getSize(), blockData.getBlockID())); + + } + return getSuccessResponseBuilder(msg) + .setCmdType(Type.WriteChunk) + .setWriteChunk(writeChunk) + .build(); + } + /** * Gets a response to the read small file call. * @param request - Msg @@ -319,6 +345,31 @@ public static ContainerCommandResponseProto getFinalizeBlockResponse( .build(); } + public static ContainerCommandResponseProto getEchoResponse( + ContainerCommandRequestProto msg) { + + ContainerProtos.EchoRequestProto echoRequest = msg.getEcho(); + int responsePayload = echoRequest.getPayloadSizeResp(); + + int sleepTimeMs = echoRequest.getSleepTimeMs(); + try { + if (sleepTimeMs > 0) { + Thread.sleep(sleepTimeMs); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + ContainerProtos.EchoResponseProto.Builder echo = + ContainerProtos.EchoResponseProto + .newBuilder() + .setPayload(UnsafeByteOperations.unsafeWrap(RandomUtils.nextBytes(responsePayload))); + + return getSuccessResponseBuilder(msg) + .setEcho(echo) + .build(); + } + private ContainerCommandResponseBuilders() { throw new UnsupportedOperationException("no instances"); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index c85405566ca5..5f94f6d08474 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -29,6 +29,9 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -57,6 +60,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.FinalizeBlockRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.EchoRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.EchoResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientReply; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.XceiverClientSpi.Validator; @@ -65,6 +70,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.security.token.Token; @@ -76,6 +82,7 @@ import org.slf4j.LoggerFactory; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED; /** * Implementation of all container protocol calls performed by Container @@ -128,6 +135,10 @@ public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = @@ -146,6 +157,17 @@ static T tryEachDatanode(Pipeline pipeline, try { return op.apply(d); } catch (IOException e) { + Span span = GlobalTracer.get().activeSpan(); + if (e instanceof StorageContainerException) { + StorageContainerException sce = (StorageContainerException)e; + // Block token expired. There's no point retrying other DN. + // Throw the exception to request a new block token right away. + if (sce.getResult() == BLOCK_TOKEN_VERIFICATION_FAILED) { + span.log("block token verification failed at DN " + d); + throw e; + } + } + span.log("failed to connect to DN " + d); excluded.add(d); if (excluded.size() < pipeline.size()) { LOG.warn(toErrorMessage.apply(d) @@ -203,6 +225,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails datanode) throws IOException { + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } final ContainerCommandRequestProto request = builder .setDatanodeUuid(datanode.getUuidString()).build(); ContainerCommandResponseProto response = @@ -238,6 +264,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = xceiverClient.sendCommand(request, getValidatorList()); @@ -341,10 +371,19 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( builder.setEncodedToken(token.encodeToUrlString()); } - return tryEachDatanode(xceiverClient.getPipeline(), - d -> readChunk(xceiverClient, chunk, blockID, - validators, builder, d), - d -> toErrorMessage(chunk, blockID, d)); + Span span = GlobalTracer.get() + .buildSpan("readChunk").start(); + try (Scope ignored = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", chunk.getOffset()) + .setTag("length", chunk.getLen()) + .setTag("block", blockID.toString()); + return tryEachDatanode(xceiverClient.getPipeline(), + d -> readChunk(xceiverClient, chunk, blockID, + validators, builder, d), + d -> toErrorMessage(chunk, blockID, d)); + } finally { + span.finish(); + } } private static ContainerProtos.ReadChunkResponseProto readChunk( @@ -352,10 +391,15 @@ private static ContainerProtos.ReadChunkResponseProto readChunk( List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails d) throws IOException { - final ContainerCommandRequestProto request = builder - .setDatanodeUuid(d.getUuidString()).build(); + ContainerCommandRequestProto.Builder requestBuilder = builder + .setDatanodeUuid(d.getUuidString()); + Span span = GlobalTracer.get().activeSpan(); + String traceId = TracingUtil.exportSpan(span); + if (traceId != null) { + requestBuilder = requestBuilder.setTraceID(traceId); + } ContainerCommandResponseProto reply = - xceiverClient.sendCommand(request, validators); + xceiverClient.sendCommand(requestBuilder.build(), validators); final ReadChunkResponseProto response = reply.getReadChunk(); final long readLen = getLen(response); if (readLen != chunk.getLen()) { @@ -394,8 +438,10 @@ static long getLen(ReadChunkResponseProto response) { */ public static XceiverClientReply writeChunkAsync( XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data, String tokenString, int replicationIndex) + ByteString data, String tokenString, + int replicationIndex, BlockData blockData) throws IOException, ExecutionException, InterruptedException { + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder() .setBlockID(DatanodeBlockID.newBuilder() @@ -406,6 +452,12 @@ public static XceiverClientReply writeChunkAsync( .build()) .setChunkData(chunk) .setData(data); + if (blockData != null) { + PutBlockRequestProto.Builder createBlockRequest = + PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + writeChunkRequest.setBlock(createBlockRequest); + } String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder() @@ -537,6 +589,11 @@ public static void createContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } + request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer(createRequest.build()); @@ -566,6 +623,10 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -588,6 +649,10 @@ public static void closeContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -611,6 +676,10 @@ public static ReadContainerResponseProto readContainer( if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } ContainerCommandResponseProto response = client.sendCommand(request.build(), getValidatorList()); @@ -646,12 +715,51 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); return response.getGetSmallFile(); } + /** + * Send an echo to DataNode. + * + * @return EchoResponseProto + */ + public static EchoResponseProto echo(XceiverClientSpi client, String encodedContainerID, + long containerID, ByteString payloadReqBytes, int payloadRespSizeKB, int sleepTimeMs) throws IOException { + ContainerProtos.EchoRequestProto getEcho = + EchoRequestProto + .newBuilder() + .setPayload(payloadReqBytes) + .setPayloadSizeResp(payloadRespSizeKB) + .setSleepTimeMs(sleepTimeMs) + .build(); + String id = client.getPipeline().getClosestNode().getUuidString(); + + ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto + .newBuilder() + .setCmdType(Type.Echo) + .setContainerID(containerID) + .setDatanodeUuid(id) + .setEcho(getEcho); + if (!encodedContainerID.isEmpty()) { + builder.setEncodedToken(encodedContainerID); + } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } + ContainerCommandRequestProto request = builder.build(); + ContainerCommandResponseProto response = + client.sendCommand(request, getValidatorList()); + return response.getEcho(); + } + /** * Validates a response from a container protocol call. Any non-successful * return code is mapped to a corresponding exception and thrown. @@ -716,6 +824,10 @@ public static List toValidatorList(Validator validator) { if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); Map responses = xceiverClient.sendCommandOnAllNodes(request); @@ -741,6 +853,10 @@ public static List toValidatorList(Validator validator) { if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } Map responses = client.sendCommandOnAllNodes(request.build()); for (Map.Entry entry : diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java index d14129972c61..489cf3c41ce0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java @@ -20,13 +20,16 @@ import java.io.File; import java.io.IOException; +import java.util.HashMap; import java.util.List; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.MappingIterator; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; +import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.SequenceWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.databind.node.ArrayNode; @@ -70,9 +73,24 @@ public static ArrayNode createArrayNode() { } public static ObjectNode createObjectNode(Object next) { + if (next == null) { + return MAPPER.createObjectNode(); + } return MAPPER.valueToTree(next); } + public static JsonNode readTree(String content) throws IOException { + return MAPPER.readTree(content); + } + + public static List> readTreeAsListOfMaps(String json) + throws IOException { + return MAPPER.readValue(json, + new TypeReference>>() { + }); + } + + /** * Utility to sequentially write a large collection of items to a file. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java index b968d407232c..29bd847319ea 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java @@ -139,6 +139,16 @@ public static boolean isTracingEnabled( ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); } + /** + * Execute {@code runnable} inside an activated new span. + */ + public static void executeInNewSpan(String spanName, + CheckedRunnable runnable) throws E { + Span span = GlobalTracer.get() + .buildSpan(spanName).start(); + executeInSpan(span, runnable); + } + /** * Execute {@code supplier} inside an activated new span. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index 4620a483385e..6a234ab5064a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -67,7 +67,7 @@ public static void close(Logger logger, AutoCloseable... closeables) { * Close each argument, catching exceptions and logging them as error. */ public static void close(Logger logger, - Collection closeables) { + Collection closeables) { if (closeables == null) { return; } @@ -94,7 +94,7 @@ public static void closeQuietly(AutoCloseable... closeables) { /** * Close each argument, swallowing exceptions. */ - public static void closeQuietly(Collection closeables) { + public static void closeQuietly(Collection closeables) { close(null, closeables); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 64e494a5af10..1ac293b301bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -28,6 +28,7 @@ import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled; import org.apache.ratis.util.MemoizedSupplier; import org.apache.ratis.util.Preconditions; +import org.apache.ratis.util.UncheckedAutoCloseable; import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +51,7 @@ * A buffer used by {@link Codec} * for supporting RocksDB direct {@link ByteBuffer} APIs. */ -public class CodecBuffer implements AutoCloseable { +public class CodecBuffer implements UncheckedAutoCloseable { public static final Logger LOG = LoggerFactory.getLogger(CodecBuffer.class); /** To create {@link CodecBuffer} instances. */ @@ -340,6 +341,12 @@ public int readableBytes() { return buf.readableBytes(); } + /** @return a writable {@link ByteBuffer}. */ + public ByteBuffer asWritableByteBuffer() { + assertRefCnt(1); + return buf.nioBuffer(0, buf.maxCapacity()); + } + /** @return a readonly {@link ByteBuffer} view of this buffer. */ public ByteBuffer asReadOnlyByteBuffer() { assertRefCnt(1); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index a0d4b59db168..070edac15f52 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,9 +36,9 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; + public static final String HDDS_CONTAINER_IPC_PORT = + "hdds.container.ipc.port"; + public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,52 +56,52 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified - * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. + * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified + * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_IPC_RANDOM_PORT = + "hdds.container.ipc.random.port"; + public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = - "dfs.container.ratis.datastream.random.port"; + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = + "hdds.container.ratis.datastream.random.port"; public static final boolean - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; + public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY = + "hdds.container.chunk.write.sync"; + public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; + public static final String HDDS_CONTAINER_RATIS_IPC_PORT = + "hdds.container.ratis.ipc.port"; + public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ - public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = - "dfs.container.ratis.admin.port"; - public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; + public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT = + "hdds.container.ratis.admin.port"; + public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ - public static final String DFS_CONTAINER_RATIS_SERVER_PORT = - "dfs.container.ratis.server.port"; - public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; + public static final String HDDS_CONTAINER_RATIS_SERVER_PORT = + "hdds.container.ratis.server.port"; + public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ - public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED - = "dfs.container.ratis.datastream.enabled"; - public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED + = "hdds.container.ratis.datastream.enabled"; + public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT - = "dfs.container.ratis.datastream.port"; - public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT + = "hdds.container.ratis.datastream.port"; + public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; /** @@ -126,7 +126,7 @@ public final class OzoneConfigKeys { public static final String OZONE_FS_HSYNC_ENABLED = "ozone.fs.hsync.enabled"; public static final boolean OZONE_FS_HSYNC_ENABLED_DEFAULT - = false; + = true; /** * hsync lease soft limit. @@ -141,9 +141,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT = + "hdds.container.ratis.ipc.random.port"; + public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = @@ -333,97 +333,97 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + "hdds.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; public static final String HDDS_DATANODE_PLUGINS_KEY = "hdds.datanode.plugins"; @@ -681,6 +681,16 @@ public final class OzoneConfigKeys { "hdds.scmclient.failover.max.retry"; + public static final String OZONE_XCEIVER_CLIENT_METRICS_PERCENTILES_INTERVALS_SECONDS_KEY = + "ozone.xceiver.client.metrics.percentiles.intervals.seconds"; + + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION = + "ozone.om.network.topology.refresh.duration"; + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT = "1h"; + + /** * There is no need to instantiate this class. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 9069c425e7d7..5d8eb4327413 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -589,4 +589,9 @@ private OzoneConsts() { */ public static final String COMPACTION_LOG_TABLE = "compactionLogTable"; + + /** + * S3G multipart upload request's ETag header key. + */ + public static final String ETAG = "ETag"; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index 5fab7eacdf6d..1d596bf70077 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.ozone.common; +import org.apache.hadoop.hdds.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.lang.reflect.Field; import java.nio.ByteBuffer; import java.util.zip.Checksum; @@ -35,6 +39,8 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { private final Checksum checksum; private static final Field IS_READY_ONLY_FIELD; + // To access Checksum.update(ByteBuffer) API from Java 9+. + private static final MethodHandle BYTE_BUFFER_UPDATE; static { Field f = null; @@ -46,6 +52,18 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { LOG.error("No isReadOnly field in ByteBuffer", e); } IS_READY_ONLY_FIELD = f; + + MethodHandle byteBufferUpdate = null; + if (JavaUtils.isJavaVersionAtLeast(9)) { + try { + byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update", + MethodType.methodType(void.class, ByteBuffer.class)); + } catch (Throwable t) { + throw new IllegalStateException("Failed to lookup Checksum.update(ByteBuffer)."); + } + } + BYTE_BUFFER_UPDATE = byteBufferUpdate; + } public ChecksumByteBufferImpl(Checksum impl) { @@ -57,6 +75,17 @@ public ChecksumByteBufferImpl(Checksum impl) { // should be refactored to simply call checksum.update(buffer), as the // Checksum interface has been enhanced to allow this since Java 9. public void update(ByteBuffer buffer) { + // Prefer JDK9+ implementation that allows ByteBuffer. This allows DirectByteBuffer to be checksum directly in + // native memory. + if (BYTE_BUFFER_UPDATE != null) { + try { + BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer); + return; + } catch (Throwable e) { + throw new IllegalStateException("Error invoking " + BYTE_BUFFER_UPDATE, e); + } + } + // this is a hack to not do memory copy. if (IS_READY_ONLY_FIELD != null) { try { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index 3948b5f04fc0..058934c2f27d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** Buffer for a block chunk. */ -public interface ChunkBuffer { +public interface ChunkBuffer extends UncheckedAutoCloseable { /** Similar to {@link ByteBuffer#allocate(int)}. */ static ChunkBuffer allocate(int capacity) { @@ -49,7 +51,8 @@ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { return new IncrementalChunkBuffer(capacity, increment, false); } - return new ChunkBufferImplWithByteBuffer(ByteBuffer.allocate(capacity)); + CodecBuffer codecBuffer = CodecBuffer.allocateDirect(capacity); + return new ChunkBufferImplWithByteBuffer(codecBuffer.asWritableByteBuffer(), codecBuffer); } /** Wrap the given {@link ByteBuffer} as a {@link ChunkBuffer}. */ @@ -86,6 +89,9 @@ default boolean hasRemaining() { /** Similar to {@link ByteBuffer#clear()}. */ ChunkBuffer clear(); + default void close() { + } + /** Similar to {@link ByteBuffer#put(ByteBuffer)}. */ ChunkBuffer put(ByteBuffer b); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java index 0cf49681cb16..fe2ee5fa8acb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java @@ -28,13 +28,27 @@ import java.util.function.Function; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** {@link ChunkBuffer} implementation using a single {@link ByteBuffer}. */ final class ChunkBufferImplWithByteBuffer implements ChunkBuffer { private final ByteBuffer buffer; + private final UncheckedAutoCloseable underlying; ChunkBufferImplWithByteBuffer(ByteBuffer buffer) { + this(buffer, null); + } + + ChunkBufferImplWithByteBuffer(ByteBuffer buffer, UncheckedAutoCloseable underlying) { this.buffer = Objects.requireNonNull(buffer, "buffer == null"); + this.underlying = underlying; + } + + @Override + public void close() { + if (underlying != null) { + underlying.close(); + } } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 5a63c09f1234..dda4fae0d2b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.common; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -47,6 +48,8 @@ final class IncrementalChunkBuffer implements ChunkBuffer { private final int limitIndex; /** Buffer list to be allocated incrementally. */ private final List buffers; + /** The underlying buffers. */ + private final List underlying; /** Is this a duplicated buffer? (for debug only) */ private final boolean isDuplicated; /** The index of the first non-full buffer. */ @@ -58,11 +61,18 @@ final class IncrementalChunkBuffer implements ChunkBuffer { this.limit = limit; this.increment = increment; this.limitIndex = limit / increment; - this.buffers = new ArrayList<>( - limitIndex + (limit % increment == 0 ? 0 : 1)); + int size = limitIndex + (limit % increment == 0 ? 0 : 1); + this.buffers = new ArrayList<>(size); + this.underlying = isDuplicated ? Collections.emptyList() : new ArrayList<>(size); this.isDuplicated = isDuplicated; } + @Override + public void close() { + underlying.forEach(CodecBuffer::release); + underlying.clear(); + } + /** @return the capacity for the buffer at the given index. */ private int getBufferCapacityAtIndex(int i) { Preconditions.checkArgument(i >= 0); @@ -99,6 +109,7 @@ private ByteBuffer getAtIndex(int i) { /** @return the i-th buffer. It may allocate buffers. */ private ByteBuffer getAndAllocateAtIndex(int index) { + Preconditions.checkState(!isDuplicated, "Duplicated buffer is readonly."); Preconditions.checkArgument(index >= 0); // never allocate over limit if (limit % increment == 0) { @@ -115,7 +126,9 @@ private ByteBuffer getAndAllocateAtIndex(int index) { // allocate upto the given index ByteBuffer b = null; for (; i <= index; i++) { - b = ByteBuffer.allocate(getBufferCapacityAtIndex(i)); + final CodecBuffer c = CodecBuffer.allocateDirect(getBufferCapacityAtIndex(i)); + underlying.add(c); + b = c.asWritableByteBuffer(); buffers.add(b); } return b; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 23c3dbaf1520..6bd83b44a93f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -200,11 +200,11 @@ public long getUnknownMessagesReceived() { return unknownMessagesReceived.value(); } - public MutableRate getGrpcQueueTime() { + MutableRate getGrpcQueueTime() { return grpcQueueTime; } - public MutableRate getGrpcProcessingTime() { + MutableRate getGrpcProcessingTime() { return grpcProcessingTime; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java index 83d61cab6857..0a50eab19509 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/PerformanceMetricsInitializer.java @@ -70,7 +70,7 @@ public static void initialize(T source, MetricsRegistry registry, * @param intervals intervals for quantiles * @return an instance of PerformanceMetrics */ - private static PerformanceMetrics getMetrics( + public static PerformanceMetrics getMetrics( MetricsRegistry registry, String name, String description, String sampleName, String valueName, int[] intervals) { return new PerformanceMetrics( diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 47067de5fede..8687dbf1a52e 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,26 +46,26 @@ - dfs.container.ipc + hdds.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. - dfs.container.ratis.datastream.random.port + hdds.container.ratis.datastream.random.port false OZONE, CONTAINER, RATIS, DATASTREAM Allocates a random free port for ozone container datastream. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + hdds.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -82,7 +82,7 @@ - dfs.container.chunk.write.sync + hdds.container.chunk.write.sync false OZONE, CONTAINER, MANAGEMENT Determines whether the chunk writes in the container happen as @@ -90,19 +90,19 @@ - dfs.container.ratis.statemachinedata.sync.timeout + hdds.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. - dfs.container.ratis.statemachinedata.sync.retries + hdds.container.ratis.statemachinedata.sync.retries OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / hdds.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -112,21 +112,22 @@ - dfs.container.ratis.log.queue.num-elements + hdds.container.ratis.log.queue.num-elements 1024 OZONE, DEBUG, CONTAINER, RATIS Limit for the number of operations in Ratis Log Worker. - dfs.container.ratis.log.queue.byte-limit + hdds.container.ratis.log.queue.byte-limit 4GB OZONE, DEBUG, CONTAINER, RATIS Byte limit for Ratis Log Worker queue. - dfs.container.ratis.log.appender.queue.num-elements + hdds.container.ratis.log.appender.queue.num-elements + 1 OZONE, DEBUG, CONTAINER, RATIS Limit for number of append entries in ratis leader's @@ -134,14 +135,16 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 32MB OZONE, DEBUG, CONTAINER, RATIS Byte limit for ratis leader's log appender queue. - dfs.container.ratis.log.purge.gap + hdds.container.ratis.log.purge.gap + 1000000 OZONE, DEBUG, CONTAINER, RATIS Purge gap between the last purged commit index @@ -149,7 +152,7 @@ - dfs.container.ratis.datanode.storage.dir + hdds.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +226,7 @@ - dfs.container.ratis.enabled + hdds.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,25 +235,26 @@ - dfs.container.ratis.ipc + hdds.container.ratis.ipc.port 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. - dfs.container.ratis.admin.port + hdds.container.ratis.admin.port 9857 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for admin requests. - dfs.container.ratis.server.port + hdds.container.ratis.server.port 9856 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + hdds.container.ratis.ipc.random.port + false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +263,7 @@ - dfs.container.ratis.rpc.type + hdds.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +272,7 @@ - dfs.ratis.snapshot.threshold + hdds.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -276,16 +280,16 @@ - dfs.container.ratis.statemachine.max.pending.apply-transactions + hdds.container.ratis.statemachine.max.pending.apply-transactions 10000 OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + hdds.ratis.snapshot.threshold. - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode @@ -295,7 +299,8 @@ - dfs.container.ratis.leader.pending.bytes.limit + hdds.container.ratis.leader.pending.bytes.limit + 1GB OZONE, RATIS, PERFORMANCE Limit on the total bytes of pending requests after which @@ -303,7 +308,7 @@ - dfs.container.ratis.replication.level + hdds.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +317,7 @@ - dfs.container.ratis.num.container.op.executors + hdds.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +325,7 @@ - dfs.container.ratis.segment.size + hdds.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +333,7 @@ - dfs.container.ratis.segment.preallocated.size + hdds.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -336,13 +341,13 @@ - dfs.ratis.server.retry-cache.timeout.duration + hdds.ratis.server.retry-cache.timeout.duration 600000ms OZONE, RATIS, MANAGEMENT Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + hdds.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +712,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + hdds.container.ratis.datanode.storage.dir be configured separately. @@ -856,8 +861,8 @@ - ozone.chunk.list.incremental - false + ozone.incremental.chunk.list + true OZONE, CLIENT, DATANODE, PERFORMANCE By default, a writer client sends full chunk list of a block when it @@ -1931,7 +1936,14 @@ Setting this interval equal to the metrics sampling time ensures more detailed metrics. - + + ozone.xceiver.client.metrics.percentiles.intervals.seconds + 60 + XCEIVER, PERFORMANCE + Specifies the interval in seconds for the rollover of XceiverClient MutableQuantiles metrics. + Setting this interval equal to the metrics sampling time ensures more detailed metrics. + + ozone.om.save.metrics.interval 5m @@ -2248,6 +2260,14 @@ OZONE, SECURITY, KERBEROS The OzoneManager service principal. Ex om/_HOST@REALM.COM + + ozone.om.kerberos.principal.pattern + * + + A client-side RegEx that can be configured to control + allowed realms to authenticate with (useful in cross-realm env.) + + ozone.om.http.auth.kerberos.principal HTTP/_HOST@REALM @@ -2796,6 +2816,14 @@ manager admin protocol. + + ozone.security.reconfigure.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access reconfigure protocol. + + hdds.datanode.http.auth.kerberos.principal @@ -3792,6 +3820,14 @@ Wait duration before which close container is send to DN. + + ozone.om.network.topology.refresh.duration + 1h + SCM, OZONE, OM + The duration at which we periodically fetch the updated network + topology cluster tree from SCM. + + ozone.scm.ha.ratis.server.snapshot.creation.gap 1024 @@ -4116,7 +4152,7 @@ ozone.fs.hsync.enabled - false + true OZONE, CLIENT Enable hsync/hflush. By default they are disabled. @@ -4295,15 +4331,6 @@ - - ozone.om.snapshot.sst_dumptool.pool.size - 1 - OZONE, OM - - Threadpool size for SST Dumptool which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.load.native.lib true @@ -4313,15 +4340,6 @@ - - ozone.om.snapshot.sst_dumptool.buffer.size - 8KB - OZONE, OM - - Buffer size for SST Dumptool Pipe which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.diff.max.allowed.keys.changed.per.job 10000000 diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java index 76b6a0db89b3..b20ce53597eb 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdds.fs; +import java.util.concurrent.atomic.AtomicLong; + /** * {@link SpaceUsageSource} implementations for testing. */ @@ -35,6 +37,26 @@ public static SpaceUsageSource fixed(long capacity, long available, return new SpaceUsageSource.Fixed(capacity, available, used); } + /** @return {@code SpaceUsageSource} with fixed capacity and dynamic usage */ + public static SpaceUsageSource of(long capacity, AtomicLong used) { + return new SpaceUsageSource() { + @Override + public long getUsedSpace() { + return used.get(); + } + + @Override + public long getCapacity() { + return capacity; + } + + @Override + public long getAvailable() { + return getCapacity() - getUsedSpace(); + } + }; + } + private MockSpaceUsageSource() { throw new UnsupportedOperationException("no instances"); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 9567fa2c281e..0d30d43dc01f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -19,8 +19,10 @@ import org.apache.hadoop.util.PureJavaCrc32; import org.apache.hadoop.util.PureJavaCrc32C; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; @@ -45,6 +47,23 @@ public void testPureJavaCrc32CByteBuffer() { new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); } + @Test + public void testWithDirectBuffer() { + final ChecksumByteBuffer checksum = ChecksumByteBufferFactory.crc32CImpl(); + byte[] value = "test".getBytes(StandardCharsets.UTF_8); + checksum.reset(); + checksum.update(value, 0, value.length); + long checksum1 = checksum.getValue(); + + ByteBuffer byteBuffer = ByteBuffer.allocateDirect(value.length); + byteBuffer.put(value).rewind(); + checksum.reset(); + checksum.update(byteBuffer); + long checksum2 = checksum.getValue(); + + Assertions.assertEquals(checksum1, checksum2); + } + static class VerifyChecksumByteBuffer { private final Checksum expected; private final ChecksumByteBuffer testee; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 3d6d38f3d3bd..b5212825e58b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -29,7 +29,11 @@ import org.apache.hadoop.hdds.utils.MockGatheringChannel; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -46,6 +50,16 @@ private static int nextInt(int n) { return ThreadLocalRandom.current().nextInt(n); } + @BeforeAll + public static void beforeAll() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test @Timeout(1) void testImplWithByteBuffer() throws IOException { @@ -59,7 +73,9 @@ void testImplWithByteBuffer() throws IOException { private static void runTestImplWithByteBuffer(int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, 0, ChunkBuffer.allocate(n)); + try (ChunkBuffer c = ChunkBuffer.allocate(n)) { + runTestImpl(expected, 0, c); + } } @Test @@ -78,8 +94,9 @@ void testIncrementalChunkBuffer() throws IOException { private static void runTestIncrementalChunkBuffer(int increment, int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, increment, - new IncrementalChunkBuffer(n, increment, false)); + try (IncrementalChunkBuffer c = new IncrementalChunkBuffer(n, increment, false)) { + runTestImpl(expected, increment, c); + } } @Test diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java index 4ed59669a9df..e121e4333a0d 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java @@ -118,7 +118,7 @@ void set(ConfigurationTarget target, String key, Object value, SIZE { @Override Object parse(String value, Config config, Class type, String key) { - StorageSize measure = StorageSize.parse(value); + StorageSize measure = StorageSize.parse(value, config.sizeUnit()); long val = Math.round(measure.getUnit().toBytes(measure.getValue())); if (type == int.class) { return (int) val; @@ -130,9 +130,9 @@ Object parse(String value, Config config, Class type, String key) { void set(ConfigurationTarget target, String key, Object value, Config config) { if (value instanceof Long) { - target.setStorageSize(key, (long) value, StorageUnit.BYTES); + target.setStorageSize(key, (long) value, config.sizeUnit()); } else if (value instanceof Integer) { - target.setStorageSize(key, (int) value, StorageUnit.BYTES); + target.setStorageSize(key, (int) value, config.sizeUnit()); } else { throw new ConfigurationException("Unsupported type " + value.getClass() + " for " + key); diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml index 0791ffb9eab0..f68fa91db864 100644 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml @@ -15,18 +15,6 @@ limitations under the License. --> - - - - - - - - - - - - diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java index e26610b357e7..8b0b3a7ca239 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java @@ -20,12 +20,13 @@ import com.google.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolDatanodePB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; @@ -66,6 +67,10 @@ protected HddsDatanodeClientProtocolServer( HDDS_DATANODE_CLIENT_ADDRESS_KEY, HddsUtils.getDatanodeRpcAddress(conf), rpcServer); datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort()); + if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + false)) { + rpcServer.refreshServiceAcl(conf, HddsPolicyProvider.getInstance()); + } } public void start() { @@ -97,7 +102,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, InetSocketAddress rpcAddress = HddsUtils.getDatanodeRpcAddress(conf); // Add reconfigureProtocolService. RPC.setProtocolEngine( - configuration, ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + configuration, ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); final int handlerCount = conf.getInt(HDDS_DATANODE_HANDLER_COUNT_KEY, HDDS_DATANODE_HANDLER_COUNT_DEFAULT); @@ -108,7 +113,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, reconfigureServerProtocol); return preserveThreadName(() -> startRpcServer(configuration, rpcAddress, - ReconfigureProtocolPB.class, reconfigureService, handlerCount)); + ReconfigureProtocolDatanodePB.class, reconfigureService, handlerCount)); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index f59622cb0faf..bbaf58d36b4f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -233,7 +233,6 @@ public void start() { datanodeDetails.setRevision( HddsVersionInfo.HDDS_VERSION_INFO.getRevision()); datanodeDetails.setBuildDate(HddsVersionInfo.HDDS_VERSION_INFO.getDate()); - datanodeDetails.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); TracingUtil.initTracing( "HddsDatanodeService." + datanodeDetails.getUuidString() .substring(0, 8), conf); @@ -424,17 +423,19 @@ private DatanodeDetails initializeDatanodeDetails() String idFilePath = HddsServerUtil.getDatanodeIdFilePath(conf); Preconditions.checkNotNull(idFilePath); File idFile = new File(idFilePath); + DatanodeDetails details; if (idFile.exists()) { - return ContainerUtils.readDatanodeDetailsFrom(idFile); + details = ContainerUtils.readDatanodeDetailsFrom(idFile); + // Current version is always overridden to the latest + details.setCurrentVersion(getDefaultCurrentVersion()); } else { // There is no datanode.id file, this might be the first time datanode // is started. - DatanodeDetails details = DatanodeDetails.newBuilder() - .setUuid(UUID.randomUUID()).build(); - details.setInitialVersion(DatanodeVersion.CURRENT_VERSION); - details.setCurrentVersion(DatanodeVersion.CURRENT_VERSION); - return details; + details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build(); + details.setInitialVersion(getDefaultInitialVersion()); + details.setCurrentVersion(getDefaultCurrentVersion()); } + return details; } /** @@ -678,4 +679,20 @@ private String reconfigReplicationStreamsLimit(String value) { .setPoolSize(Integer.parseInt(value)); return value; } + + /** + * Returns the initial version of the datanode. + */ + @VisibleForTesting + public static int getDefaultInitialVersion() { + return DatanodeVersion.CURRENT_VERSION; + } + + /** + * Returns the current version of the datanode. + */ + @VisibleForTesting + public static int getDefaultCurrentVersion() { + return DatanodeVersion.CURRENT_VERSION; + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java new file mode 100644 index 000000000000..eeed4fab5f72 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone; + + +import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; +import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.Service; +import org.apache.ratis.util.MemoizedSupplier; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; + +/** + * {@link PolicyProvider} for Datanode protocols. + */ +@Private +@Unstable +public final class HddsPolicyProvider extends PolicyProvider { + + private static final Supplier SUPPLIER = + MemoizedSupplier.valueOf(HddsPolicyProvider::new); + + private HddsPolicyProvider() { + } + + @Private + @Unstable + public static HddsPolicyProvider getInstance() { + return SUPPLIER.get(); + } + + private static final List DN_SERVICES = + Arrays.asList( + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) + ); + + @Override + public Service[] getServices() { + return DN_SERVICES.toArray(new Service[0]); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java index d271e7d5d48f..f7a38e3dec8b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java @@ -40,7 +40,8 @@ public enum DNAction implements AuditAction { CLOSE_CONTAINER, GET_COMMITTED_BLOCK_LENGTH, STREAM_INIT, - FINALIZE_BLOCK; + FINALIZE_BLOCK, + ECHO; @Override public String getAction() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index fc193751893f..337e4e3e29ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -52,6 +52,9 @@ public class ContainerMetrics { @Metric private MutableCounterLong containerDeleteFailedNonEmpty; @Metric private MutableCounterLong containerDeleteFailedBlockCountNotZero; @Metric private MutableCounterLong containerForceDelete; + @Metric private MutableCounterLong numReadStateMachine; + @Metric private MutableCounterLong bytesReadStateMachine; + private MutableCounterLong[] numOpsArray; private MutableCounterLong[] opsBytesArray; @@ -152,4 +155,20 @@ public long getContainerDeleteFailedBlockCountNotZero() { public long getContainerForceDelete() { return containerForceDelete.value(); } + + public void incNumReadStateMachine() { + numReadStateMachine.incr(); + } + + public long getNumReadStateMachine() { + return numReadStateMachine.value(); + } + + public void incBytesReadStateMachine(long bytes) { + bytesReadStateMachine.incr(bytes); + } + + public long getBytesReadStateMachine() { + return bytesReadStateMachine.value(); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java index f8acbc7e2d69..58b1c1d1d5e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeIdYaml.java @@ -56,7 +56,7 @@ private DatanodeIdYaml() { } /** - * Creates a yaml file using DatnodeDetails. This method expects the path + * Creates a yaml file using DatanodeDetails. This method expects the path * validation to be performed by the caller. * * @param datanodeDetails {@link DatanodeDetails} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 8444b3bda1e8..210c538f274a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -25,12 +25,9 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import com.google.common.collect.ImmutableList; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Defines layout versions for the Chunks. @@ -39,22 +36,17 @@ public enum ContainerLayoutVersion { FILE_PER_CHUNK(1, "One file per chunk") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { - return new File(chunkDir, info.getChunkName()); + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { + return new File(chunkDir, chunkName); } }, FILE_PER_BLOCK(2, "One file per block") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { return new File(chunkDir, blockID.getLocalID() + ".block"); } }; - private static final Logger LOG = - LoggerFactory.getLogger(ContainerLayoutVersion.class); - private static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; @@ -118,12 +110,12 @@ public String getDescription() { } public abstract File getChunkFile(File chunkDir, - BlockID blockID, ChunkInfo info); + BlockID blockID, String chunkName); public File getChunkFile(ContainerData containerData, BlockID blockID, - ChunkInfo info) throws StorageContainerException { + String chunkName) throws StorageContainerException { File chunkDir = ContainerUtils.getChunkDir(containerData); - return getChunkFile(chunkDir, blockID, info); + return getChunkFile(chunkDir, blockID, chunkName); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index f20615d23f8c..904ec21b5f62 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -91,6 +91,16 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { static final Logger LOG = LoggerFactory.getLogger(HddsDispatcher.class); private static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.DNLOGGER); + private static final String AUDIT_PARAM_CONTAINER_ID = "containerID"; + private static final String AUDIT_PARAM_CONTAINER_TYPE = "containerType"; + private static final String AUDIT_PARAM_FORCE_UPDATE = "forceUpdate"; + private static final String AUDIT_PARAM_FORCE_DELETE = "forceDelete"; + private static final String AUDIT_PARAM_START_CONTAINER_ID = "startContainerID"; + private static final String AUDIT_PARAM_BLOCK_DATA = "blockData"; + private static final String AUDIT_PARAM_BLOCK_DATA_SIZE = "blockDataSize"; + private static final String AUDIT_PARAM_COUNT = "count"; + private static final String AUDIT_PARAM_START_LOCAL_ID = "startLocalID"; + private static final String AUDIT_PARAM_PREV_CHUNKNAME = "prevChunkName"; private final Map handlers; private final ConfigurationSource conf; private final ContainerSet containerSet; @@ -257,7 +267,7 @@ private ContainerCommandResponseProto dispatchRequest( if (getMissingContainerSet().contains(containerID)) { StorageContainerException sce = new StorageContainerException( "ContainerID " + containerID - + " has been lost and and cannot be recreated on this DataNode", + + " has been lost and cannot be recreated on this DataNode", ContainerProtos.Result.CONTAINER_MISSING); audit(action, eventType, params, AuditEventStatus.FAILURE, sce); return ContainerUtils.logAndReturnError(LOG, sce, msg); @@ -526,13 +536,14 @@ public void validateContainerCommand( Handler handler = getHandler(containerType); if (handler == null) { StorageContainerException ex = new StorageContainerException( - "Invalid " + "ContainerType " + containerType, + "Invalid ContainerType " + containerType, ContainerProtos.Result.CONTAINER_INTERNAL_ERROR); audit(action, eventType, params, AuditEventStatus.FAILURE, ex); throw ex; } State containerState = container.getContainerState(); + String log = "Container " + containerID + " in " + containerState + " state"; if (!HddsUtils.isReadOnly(msg) && !HddsUtils.isOpenToWriteState(containerState)) { switch (cmdType) { @@ -546,14 +557,12 @@ public void validateContainerCommand( default: // if the container is not open/recovering, no updates can happen. Just // throw an exception - ContainerNotOpenException cex = new ContainerNotOpenException( - "Container " + containerID + " in " + containerState + " state"); + ContainerNotOpenException cex = new ContainerNotOpenException(log); audit(action, eventType, params, AuditEventStatus.FAILURE, cex); throw cex; } } else if (HddsUtils.isReadOnly(msg) && containerState == State.INVALID) { - InvalidContainerStateException iex = new InvalidContainerStateException( - "Container " + containerID + " in " + containerState + " state"); + InvalidContainerStateException iex = new InvalidContainerStateException(log); audit(action, eventType, params, AuditEventStatus.FAILURE, iex); throw iex; } @@ -605,7 +614,7 @@ private boolean isVolumeFull(Container container) { long volumeCapacity = precomputedVolumeSpace.getCapacity(); long volumeFreeSpaceToSpare = VolumeUsage.getMinVolumeFreeSpace(conf, volumeCapacity); - long volumeFree = volume.getAvailable(precomputedVolumeSpace); + long volumeFree = precomputedVolumeSpace.getAvailable(); long volumeCommitted = volume.getCommittedBytes(); long volumeAvailable = volumeFree - volumeCommitted; return (volumeAvailable <= volumeFreeSpaceToSpare); @@ -807,6 +816,7 @@ private static DNAction getAuditAction(Type cmdType) { case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH; case StreamInit : return DNAction.STREAM_INIT; case FinalizeBlock : return DNAction.FINALIZE_BLOCK; + case Echo : return DNAction.ECHO; default : LOG.debug("Invalid command type - {}", cmdType); return null; @@ -820,36 +830,36 @@ private static Map getAuditParams( String containerID = String.valueOf(msg.getContainerID()); switch (cmdType) { case CreateContainer: - auditParams.put("containerID", containerID); - auditParams.put("containerType", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_TYPE, msg.getCreateContainer().getContainerType().toString()); return auditParams; case ReadContainer: - auditParams.put("containerID", containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); return auditParams; case UpdateContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceUpdate", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_FORCE_UPDATE, String.valueOf(msg.getUpdateContainer().getForceUpdate())); return auditParams; case DeleteContainer: - auditParams.put("containerID", containerID); - auditParams.put("forceDelete", + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_FORCE_DELETE, String.valueOf(msg.getDeleteContainer().getForceDelete())); return auditParams; case ListContainer: - auditParams.put("startContainerID", containerID); - auditParams.put("count", + auditParams.put(AUDIT_PARAM_START_CONTAINER_ID, containerID); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListContainer().getCount())); return auditParams; case PutBlock: try { - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) .toString()); } catch (IOException ex) { @@ -862,58 +872,58 @@ private static Map getAuditParams( return auditParams; case GetBlock: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetBlock().getBlockID()).toString()); return auditParams; case DeleteBlock: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteBlock().getBlockID()) .toString()); return auditParams; case ListBlock: - auditParams.put("startLocalID", + auditParams.put(AUDIT_PARAM_START_LOCAL_ID, String.valueOf(msg.getListBlock().getStartLocalID())); - auditParams.put("count", String.valueOf(msg.getListBlock().getCount())); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListBlock().getCount())); return auditParams; case ReadChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getReadChunk().getBlockID()).toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getReadChunk().getChunkData().getLen())); return auditParams; case DeleteChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getDeleteChunk().getBlockID()) .toString()); return auditParams; case WriteChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID()) .toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getWriteChunk().getChunkData().getLen())); return auditParams; case ListChunk: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getListChunk().getBlockID()).toString()); - auditParams.put("prevChunkName", msg.getListChunk().getPrevChunkName()); - auditParams.put("count", String.valueOf(msg.getListChunk().getCount())); + auditParams.put(AUDIT_PARAM_PREV_CHUNKNAME, msg.getListChunk().getPrevChunkName()); + auditParams.put(AUDIT_PARAM_COUNT, String.valueOf(msg.getListChunk().getCount())); return auditParams; case CompactChunk: return null; //CompactChunk operation case PutSmallFile: try { - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); - auditParams.put("blockDataSize", + auditParams.put(AUDIT_PARAM_BLOCK_DATA_SIZE, String.valueOf(msg.getPutSmallFile().getChunkInfo().getLen())); } catch (IOException ex) { if (LOG.isTraceEnabled()) { @@ -924,17 +934,17 @@ private static Map getAuditParams( return auditParams; case GetSmallFile: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetSmallFile().getBlock().getBlockID()) .toString()); return auditParams; case CloseContainer: - auditParams.put("containerID", containerID); + auditParams.put(AUDIT_PARAM_CONTAINER_ID, containerID); return auditParams; case GetCommittedBlockLength: - auditParams.put("blockData", + auditParams.put(AUDIT_PARAM_BLOCK_DATA, BlockID.getFromProtobuf(msg.getGetCommittedBlockLength().getBlockID()) .toString()); return auditParams; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..346b05ebb4c1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,11 +99,11 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { + if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index b776dc903de4..87572768e4af 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.container.common.transport.server.ratis; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; @@ -132,67 +131,55 @@ public void incNumApplyTransactionsFails() { numApplyTransactionFails.incr(); } - @VisibleForTesting public long getNumWriteStateMachineOps() { return numWriteStateMachineOps.value(); } - @VisibleForTesting public long getNumQueryStateMachineOps() { return numQueryStateMachineOps.value(); } - @VisibleForTesting public long getNumApplyTransactionsOps() { return numApplyTransactionOps.value(); } - @VisibleForTesting public long getNumWriteStateMachineFails() { return numWriteStateMachineFails.value(); } - @VisibleForTesting public long getNumWriteDataFails() { return numWriteDataFails.value(); } - @VisibleForTesting public long getNumQueryStateMachineFails() { return numQueryStateMachineFails.value(); } - @VisibleForTesting public long getNumApplyTransactionsFails() { return numApplyTransactionFails.value(); } - @VisibleForTesting public long getNumReadStateMachineFails() { return numReadStateMachineFails.value(); } - @VisibleForTesting public long getNumReadStateMachineMissCount() { return numReadStateMachineMissCount.value(); } - @VisibleForTesting public long getNumReadStateMachineOps() { return numReadStateMachineOps.value(); } - @VisibleForTesting public long getNumBytesWrittenCount() { return numBytesWrittenCount.value(); } - @VisibleForTesting public long getNumBytesCommittedCount() { return numBytesCommittedCount.value(); } - public MutableRate getApplyTransactionLatencyNs() { + MutableRate getApplyTransactionLatencyNs() { return applyTransactionNs; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 30496ce51a02..9eb5b909ccea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -219,8 +219,8 @@ public ContainerStateMachine(RaftGroupId gid, this.writeChunkFutureMap = new ConcurrentHashMap<>(); applyTransactionCompletionMap = new ConcurrentHashMap<>(); long pendingRequestsBytesLimit = (long)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower @@ -238,13 +238,13 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); @@ -432,11 +432,10 @@ public TransactionContext startTransaction(RaftClientRequest request) if (!blockAlreadyFinalized) { // create the log entry proto final WriteChunkRequestProto commitWriteChunkProto = - WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) - .setChunkData(write.getChunkData()) + WriteChunkRequestProto.newBuilder(write) // skipping the data field as it is // already set in statemachine data proto + .clearData() .build(); ContainerCommandRequestProto commitContainerCommandProto = ContainerCommandRequestProto diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f10..53ae98f50c01 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -110,12 +110,12 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -189,8 +189,8 @@ private XceiverServerRatis(DatanodeDetails dd, ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); assignPorts(); this.streamEnable = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); RaftProperties serverProperties = newRaftProperties(); this.context = context; this.dispatcher = dispatcher; @@ -217,17 +217,17 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) { adminPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); serverPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); } else { adminPort = clientPort; serverPort = clientPort; @@ -236,8 +236,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -249,14 +249,14 @@ private ContainerStateMachine getStateMachine(RaftGroupId gid) { private void setUpRatisStream(RaftProperties properties) { // set the datastream config if (conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { dataStreamPort = 0; } else { dataStreamPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); } RatisHelper.enableNettyStreaming(properties); NettyConfigKeys.DataStream.setPort(properties, dataStreamPort); @@ -327,8 +327,8 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); + conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, + OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); RaftServerConfigKeys.Snapshot. @@ -338,11 +338,11 @@ public RaftProperties newRaftProperties() { setPendingRequestsLimits(properties); int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); final long logQueueByteLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setQueueElementLimit( properties, logQueueNumElements); @@ -353,8 +353,8 @@ public RaftProperties newRaftProperties() { false); int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); //Set the number of Snapshots Retained. @@ -375,12 +375,12 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { long duration; TimeUnit leaderElectionMinTimeoutUnit = OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); @@ -396,11 +396,11 @@ private void setTimeoutForRetryCache(RaftProperties properties) { TimeUnit timeUnit; long duration; timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getDuration(), timeUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -410,8 +410,8 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); @@ -420,23 +420,23 @@ private long setRaftSegmentPreallocatedSize(RaftProperties properties) { private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { final int logAppenderQueueNumElements = conf.getInt( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; assertTrue(raftSegmentBufferSize <= raftSegmentSize, - () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -454,11 +454,11 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); TimeUnit timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); final TimeDuration dataSyncTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -479,7 +479,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, syncTimeoutRetryDefault); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numSyncRetries); @@ -507,8 +507,8 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); return rpc; @@ -517,8 +517,8 @@ private RpcType setRpcType(RaftProperties properties) { private void setPendingRequestsLimits(RaftProperties properties) { long pendingRequestsBytesLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); final int pendingRequestsMegaBytesLimit = HddsUtils.roundupMb(pendingRequestsBytesLimit); @@ -990,9 +990,9 @@ private static List createChunkExecutors( // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java deleted file mode 100644 index 0a2375b4f44e..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ /dev/null @@ -1,1295 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater - .newUpdater; - -import jakarta.annotation.Nullable; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.locks.LockSupport; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * An abstract implementation of {@link ListenableFuture}, intended for - * advanced users only. More common ways to create a {@code ListenableFuture} - * include instantiating a {@link SettableFuture}, submitting a task to a - * {@link ListeningExecutorService}, and deriving a {@code Future} from an - * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and its overloaded versions. - *

- *

This class implements all methods in {@code ListenableFuture}. - * Subclasses should provide a way to set the result of the computation - * through the protected methods {@link #set(Object)}, - * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}. - * Subclasses may also override {@link #interruptTask()}, which will be - * invoked automatically if a call to {@link #cancel(boolean) cancel(true)} - * succeeds in canceling the future. Subclasses should rarely override other - * methods. - */ - -@GwtCompatible(emulated = true) -public abstract class AbstractFuture implements ListenableFuture { - // NOTE: Whenever both tests are cheap and functional, it's faster to use &, - // | instead of &&, || - - private static final boolean GENERATE_CANCELLATION_CAUSES = - Boolean.parseBoolean( - System.getProperty("guava.concurrent.generate_cancellation_cause", - "false")); - - /** - * A less abstract subclass of AbstractFuture. This can be used to optimize - * setFuture by ensuring that {@link #get} calls exactly the implementation - * of {@link AbstractFuture#get}. - */ - abstract static class TrustedFuture extends AbstractFuture { - @Override - public final V get() throws InterruptedException, ExecutionException { - return super.get(); - } - - @Override - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return super.get(timeout, unit); - } - - @Override - public final boolean isDone() { - return super.isDone(); - } - - @Override - public final boolean isCancelled() { - return super.isCancelled(); - } - - @Override - public final void addListener(Runnable listener, Executor executor) { - super.addListener(listener, executor); - } - - @Override - public final boolean cancel(boolean mayInterruptIfRunning) { - return super.cancel(mayInterruptIfRunning); - } - } - - // Logger to log exceptions caught when running listeners. - private static final Logger LOG = Logger - .getLogger(AbstractFuture.class.getName()); - - // A heuristic for timed gets. If the remaining timeout is less than this, - // spin instead of - // blocking. This value is what AbstractQueuedSynchronizer uses. - private static final long SPIN_THRESHOLD_NANOS = 1000L; - - private static final AtomicHelper ATOMIC_HELPER; - - static { - AtomicHelper helper; - - try { - helper = new UnsafeAtomicHelper(); - } catch (Throwable unsafeFailure) { - // catch absolutely everything and fall through to our 'SafeAtomicHelper' - // The access control checks that ARFU does means the caller class has - // to be AbstractFuture - // instead of SafeAtomicHelper, so we annoyingly define these here - try { - helper = - new SafeAtomicHelper( - newUpdater(Waiter.class, Thread.class, "thread"), - newUpdater(Waiter.class, Waiter.class, "next"), - newUpdater(AbstractFuture.class, Waiter.class, "waiters"), - newUpdater(AbstractFuture.class, Listener.class, "listeners"), - newUpdater(AbstractFuture.class, Object.class, "value")); - } catch (Throwable atomicReferenceFieldUpdaterFailure) { - // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs - // that cause getDeclaredField to throw a NoSuchFieldException when - // the field is definitely there. - // For these users fallback to a suboptimal implementation, based on - // synchronized. This will be a definite performance hit to those users. - LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure); - LOG.log( - Level.SEVERE, "SafeAtomicHelper is broken!", - atomicReferenceFieldUpdaterFailure); - helper = new SynchronizedHelper(); - } - } - ATOMIC_HELPER = helper; - - // Prevent rare disastrous classloading in first call to LockSupport.park. - // See: https://bugs.openjdk.java.net/browse/JDK-8074773 - @SuppressWarnings("unused") - Class ensureLoaded = LockSupport.class; - } - - /** - * Waiter links form a Treiber stack, in the {@link #waiters} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Waiter { - static final Waiter TOMBSTONE = new Waiter(false /* ignored param */); - - @Nullable volatile Thread thread; - @Nullable volatile Waiter next; - - /** - * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this - * class is loaded before the ATOMIC_HELPER. Apparently this is possible - * on some android platforms. - */ - Waiter(boolean unused) { - } - - Waiter() { - // avoid volatile write, write is made visible by subsequent CAS on - // waiters field - ATOMIC_HELPER.putThread(this, Thread.currentThread()); - } - - // non-volatile write to the next field. Should be made visible by - // subsequent CAS on waiters field. - void setNext(Waiter next) { - ATOMIC_HELPER.putNext(this, next); - } - - void unpark() { - // This is racy with removeWaiter. The consequence of the race is that - // we may spuriously call unpark even though the thread has already - // removed itself from the list. But even if we did use a CAS, that - // race would still exist (it would just be ever so slightly smaller). - Thread w = thread; - if (w != null) { - thread = null; - LockSupport.unpark(w); - } - } - } - - /** - * Marks the given node as 'deleted' (null waiter) and then scans the list - * to unlink all deleted nodes. This is an O(n) operation in the common - * case (and O(n^2) in the worst), but we are saved by two things. - *

    - *
  • This is only called when a waiting thread times out or is - * interrupted. Both of which should be rare. - *
  • The waiters list should be very short. - *
- */ - private void removeWaiter(Waiter node) { - node.thread = null; // mark as 'deleted' - restart: - while (true) { - Waiter pred = null; - Waiter curr = waiters; - if (curr == Waiter.TOMBSTONE) { - return; // give up if someone is calling complete - } - Waiter succ; - while (curr != null) { - succ = curr.next; - if (curr.thread != null) { // we aren't unlinking this node, update - // pred. - pred = curr; - } else if (pred != null) { // We are unlinking this node and it has a - // predecessor. - pred.next = succ; - if (pred.thread == null) { // We raced with another node that - // unlinked pred. Restart. - continue restart; - } - } else if (!ATOMIC_HELPER - .casWaiters(this, curr, succ)) { // We are unlinking head - continue restart; // We raced with an add or complete - } - curr = succ; - } - break; - } - } - - /** - * Listeners also form a stack through the {@link #listeners} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Listener { - static final Listener TOMBSTONE = new Listener(null, null); - final Runnable task; - final Executor executor; - - // writes to next are made visible by subsequent CAS's on the listeners - // field - @Nullable Listener next; - - Listener(Runnable task, Executor executor) { - this.task = task; - this.executor = executor; - } - } - - /** - * A special value to represent {@code null}. - */ - private static final Object NULL = new Object(); - - /** - * A special value to represent failure, when {@link #setException} is - * called successfully. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Failure { - static final Failure FALLBACK_INSTANCE = - new Failure( - new Throwable("Failure occurred while trying to finish a future.") { - @Override - public synchronized Throwable fillInStackTrace() { - return this; // no stack trace - } - }); - final Throwable exception; - - Failure(Throwable exception) { - this.exception = checkNotNull(exception); - } - } - - /** - * A special value to represent cancellation and the 'wasInterrupted' bit. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Cancellation { - final boolean wasInterrupted; - @Nullable final Throwable cause; - - Cancellation(boolean wasInterrupted, @Nullable Throwable cause) { - this.wasInterrupted = wasInterrupted; - this.cause = cause; - } - } - - /** - * A special value that encodes the 'setFuture' state. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SetFuture implements Runnable { - final AbstractFuture owner; - final ListenableFuture future; - - SetFuture(AbstractFuture owner, ListenableFuture future) { - this.owner = owner; - this.future = future; - } - - @Override - public void run() { - if (owner.value != this) { - // nothing to do, we must have been cancelled, don't bother inspecting - // the future. - return; - } - Object valueToSet = getFutureValue(future); - if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) { - complete(owner); - } - } - } - - /** - * This field encodes the current state of the future. - *

- *

The valid values are: - *

    - *
  • {@code null} initial state, nothing has happened. - *
  • {@link Cancellation} terminal state, {@code cancel} was called. - *
  • {@link Failure} terminal state, {@code setException} was called. - *
  • {@link SetFuture} intermediate state, {@code setFuture} was called. - *
  • {@link #NULL} terminal state, {@code set(null)} was called. - *
  • Any other non-null value, terminal state, {@code set} was called with - * a non-null argument. - *
- */ - private volatile Object value; - - /** - * All listeners. - */ - private volatile Listener listeners; - - /** - * All waiting threads. - */ - private volatile Waiter waiters; - - /** - * Constructor for use by subclasses. - */ - protected AbstractFuture() { - } - - // Gets and Timed Gets - // - // * Be responsive to interruption - // * Don't create Waiter nodes if you aren't going to park, this helps - // reduce contention on the waiters field. - // * Future completion is defined by when #value becomes non-null/non - // SetFuture - // * Future completion can be observed if the waiters field contains a - // TOMBSTONE - - // Timed Get - // There are a few design constraints to consider - // * We want to be responsive to small timeouts, unpark() has non trivial - // latency overheads (I have observed 12 micros on 64 bit linux systems to - // wake up a parked thread). So if the timeout is small we shouldn't park(). - // This needs to be traded off with the cpu overhead of spinning, so we use - // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for - // similar purposes. - // * We want to behave reasonably for timeouts of 0 - // * We are more responsive to completion than timeouts. This is because - // parkNanos depends on system scheduling and as such we could either miss - // our deadline, or unpark() could be delayed so that it looks like we - // timed out even though we didn't. For comparison FutureTask respects - // completion preferably and AQS is non-deterministic (depends on where in - // the queue the waiter is). If we wanted to be strict about it, we could - // store the unpark() time in the Waiter node and we could use that to make - // a decision about whether or not we timed out prior to being unparked. - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get(long timeout, TimeUnit unit) - throws InterruptedException, TimeoutException, ExecutionException { - // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into - // the while(true) loop at the bottom and throw a timeoutexception. - long remainingNanos = unit - .toNanos(timeout); // we rely on the implicit null check on unit. - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - // we delay calling nanoTime until we know we will need to either park or - // spin - final long endNanos = remainingNanos > 0 ? System - .nanoTime() + remainingNanos : 0; - long_wait_loop: - if (remainingNanos >= SPIN_THRESHOLD_NANOS) { - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - while (true) { - LockSupport.parkNanos(this, remainingNanos); - // Check interruption first, if we woke up due to interruption - // we need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - - // timed out? - remainingNanos = endNanos - System.nanoTime(); - if (remainingNanos < SPIN_THRESHOLD_NANOS) { - // Remove the waiter, one way or another we are done parking - // this thread. - removeWaiter(node); - break long_wait_loop; // jump down to the busy wait loop - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and - // there is no node on the waiters list - while (remainingNanos > 0) { - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } - remainingNanos = endNanos - System.nanoTime(); - } - throw new TimeoutException(); - } - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get() throws InterruptedException, ExecutionException { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - // we are on the stack, now wait for completion. - while (true) { - LockSupport.park(this); - // Check interruption first, if we woke up due to interruption we - // need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - - /** - * Unboxes {@code obj}. Assumes that obj is not {@code null} or a - * {@link SetFuture}. - */ - private V getDoneValue(Object obj) throws ExecutionException { - // While this seems like it might be too branch-y, simple benchmarking - // proves it to be unmeasurable (comparing done AbstractFutures with - // immediateFuture) - if (obj instanceof Cancellation) { - throw cancellationExceptionWithCause( - "Task was cancelled.", ((Cancellation) obj).cause); - } else if (obj instanceof Failure) { - throw new ExecutionException(((Failure) obj).exception); - } else if (obj == NULL) { - return null; - } else { - @SuppressWarnings("unchecked") // this is the only other option - V asV = (V) obj; - return asV; - } - } - - @Override - public boolean isDone() { - final Object localValue = value; - return localValue != null & !(localValue instanceof SetFuture); - } - - @Override - public boolean isCancelled() { - final Object localValue = value; - return localValue instanceof Cancellation; - } - - /** - * {@inheritDoc} - *

- *

If a cancellation attempt succeeds on a {@code Future} that had - * previously been {@linkplain#setFuture set asynchronously}, then the - * cancellation will also be propagated to the delegate {@code Future} that - * was supplied in the {@code setFuture} call. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - Object localValue = value; - boolean rValue = false; - if (localValue == null | localValue instanceof SetFuture) { - // Try to delay allocating the exception. At this point we may still - // lose the CAS, but it is certainly less likely. - Throwable cause = - GENERATE_CANCELLATION_CAUSES - ? new CancellationException("Future.cancel() was called.") - : null; - Object valueToSet = new Cancellation(mayInterruptIfRunning, cause); - AbstractFuture abstractFuture = this; - while (true) { - if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) { - rValue = true; - // We call interuptTask before calling complete(), which is - // consistent with FutureTask - if (mayInterruptIfRunning) { - abstractFuture.interruptTask(); - } - complete(abstractFuture); - if (localValue instanceof SetFuture) { - // propagate cancellation to the future set in setfuture, this is - // racy, and we don't care if we are successful or not. - ListenableFuture futureToPropagateTo = ((SetFuture) localValue) - .future; - if (futureToPropagateTo instanceof TrustedFuture) { - // If the future is a TrustedFuture then we specifically avoid - // calling cancel() this has 2 benefits - // 1. for long chains of futures strung together with setFuture - // we consume less stack - // 2. we avoid allocating Cancellation objects at every level of - // the cancellation chain - // We can only do this for TrustedFuture, because - // TrustedFuture.cancel is final and does nothing but delegate - // to this method. - AbstractFuture trusted = (AbstractFuture) - futureToPropagateTo; - localValue = trusted.value; - if (localValue == null | localValue instanceof SetFuture) { - abstractFuture = trusted; - continue; // loop back up and try to complete the new future - } - } else { - // not a TrustedFuture, call cancel directly. - futureToPropagateTo.cancel(mayInterruptIfRunning); - } - } - break; - } - // obj changed, reread - localValue = abstractFuture.value; - if (!(localValue instanceof SetFuture)) { - // obj cannot be null at this point, because value can only change - // from null to non-null. So if value changed (and it did since we - // lost the CAS), then it cannot be null and since it isn't a - // SetFuture, then the future must be done and we should exit the loop - break; - } - } - } - return rValue; - } - - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a - * successful call to {@link #cancel(boolean) cancel(true)}. - *

- *

The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() { - } - - /** - * Returns true if this future was cancelled with {@code - * mayInterruptIfRunning} set to {@code true}. - * - * @since 14.0 - */ - protected final boolean wasInterrupted() { - final Object localValue = value; - return (localValue instanceof Cancellation) && ((Cancellation) localValue) - .wasInterrupted; - } - - /** - * {@inheritDoc} - * - * @since 10.0 - */ - @Override - public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); - Listener oldHead = listeners; - if (oldHead != Listener.TOMBSTONE) { - Listener newNode = new Listener(listener, executor); - do { - newNode.next = oldHead; - if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) { - return; - } - oldHead = listeners; // re-read - } while (oldHead != Listener.TOMBSTONE); - } - // If we get here then the Listener TOMBSTONE was set, which means the - // future is done, call the listener. - executeListener(listener, executor); - } - - /** - * Sets the result of this {@code Future} unless this {@code Future} has - * already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the {@code - * Future} may have previously been set asynchronously, in which case its - * result may not be known yet. That result, though not yet known, cannot - * be overridden by a call to a {@code set*} method, only by a call to - * {@link #cancel}. - * - * @param value the value to be used as the result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean set(@Nullable V val) { - Object valueToSet = value == null ? NULL : val; - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the failed result of this {@code Future} unless this {@code Future} - * has already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the - * {@code Future} may have previously been set asynchronously, in which case - * its result may not be known yet. That result, though not yet known, - * cannot be overridden by a call to a {@code set*} method, only by a call - * to {@link #cancel}. - * - * @param throwable the exception to be used as the failed result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the result of this {@code Future} to match the supplied input - * {@code Future} once the supplied {@code Future} is done, unless this - * {@code Future} has already been cancelled or set (including "set - * asynchronously," defined below). - *

- *

If the supplied future is {@linkplain #isDone done} when this method - * is called and the call is accepted, then this future is guaranteed to - * have been completed with the supplied future by the time this method - * returns. If the supplied future is not done and the call is accepted, then - * the future will be set asynchronously. Note that such a result, - * though not yet known, cannot be overridden by a call to a {@code set*} - * method, only by a call to {@link #cancel}. - *

- *

If the call {@code setFuture(delegate)} is accepted and this {@code - * Future} is later cancelled, cancellation will be propagated to {@code - * delegate}. Additionally, any call to {@code setFuture} after any - * cancellation will propagate cancellation to the supplied {@code Future}. - * - * @param future the future to delegate to - * @return true if the attempt was accepted, indicating that the {@code - * Future} was not previously cancelled or set. - * @since 19.0 - */ - @Beta - @SuppressWarnings("deadstore") - protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); - Object localValue = value; - if (localValue == null) { - if (future.isDone()) { - Object val = getFutureValue(future); - if (ATOMIC_HELPER.casValue(this, null, val)) { - complete(this); - return true; - } - return false; - } - SetFuture valueToSet = new SetFuture(this, future); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - // the listener is responsible for calling completeWithFuture, - // directExecutor is appropriate since all we are doing is unpacking - // a completed future which should be fast. - try { - future.addListener(valueToSet, directExecutor()); - } catch (Throwable t) { - // addListener has thrown an exception! SetFuture.run can't throw - // any exceptions so this must have been caused by addListener - // itself. The most likely explanation is a misconfigured mock. Try - // to switch to Failure. - Failure failure; - try { - failure = new Failure(t); - } catch (Throwable oomMostLikely) { - failure = Failure.FALLBACK_INSTANCE; - } - // Note: The only way this CAS could fail is if cancel() has raced - // with us. That is ok. - boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure); - } - return true; - } - localValue = value; // we lost the cas, fall through and maybe cancel - } - // The future has already been set to something. If it is cancellation we - // should cancel the incoming future. - if (localValue instanceof Cancellation) { - // we don't care if it fails, this is best-effort. - future.cancel(((Cancellation) localValue).wasInterrupted); - } - return false; - } - - /** - * Returns a value, suitable for storing in the {@link #value} field. From - * the given future, which is assumed to be done. - *

- *

This is approximately the inverse of {@link #getDoneValue(Object)} - */ - private static Object getFutureValue(ListenableFuture future) { - Object valueToSet; - if (future instanceof TrustedFuture) { - // Break encapsulation for TrustedFuture instances since we know that - // subclasses cannot override .get() (since it is final) and therefore - // this is equivalent to calling .get() and unpacking the exceptions - // like we do below (just much faster because it is a single field read - // instead of a read, several branches and possibly creating exceptions). - return ((AbstractFuture) future).value; - } else { - // Otherwise calculate valueToSet by calling .get() - try { - Object v = getDone(future); - valueToSet = v == null ? NULL : v; - } catch (ExecutionException exception) { - valueToSet = new Failure(exception.getCause()); - } catch (CancellationException cancellation) { - valueToSet = new Cancellation(false, cancellation); - } catch (Throwable t) { - valueToSet = new Failure(t); - } - } - return valueToSet; - } - - /** - * Unblocks all threads and runs all listeners. - */ - private static void complete(AbstractFuture future) { - Listener next = null; - outer: - while (true) { - future.releaseWaiters(); - // We call this before the listeners in order to avoid needing to manage - // a separate stack data structure for them. afterDone() should be - // generally fast and only used for cleanup work... but in theory can - // also be recursive and create StackOverflowErrors - future.afterDone(); - // push the current set of listeners onto next - next = future.clearListeners(next); - future = null; - while (next != null) { - Listener curr = next; - next = next.next; - Runnable task = curr.task; - if (task instanceof SetFuture) { - SetFuture setFuture = (SetFuture) task; - // We unwind setFuture specifically to avoid StackOverflowErrors in - // the case of long chains of SetFutures - // Handling this special case is important because there is no way - // to pass an executor to setFuture, so a user couldn't break the - // chain by doing this themselves. It is also potentially common - // if someone writes a recursive Futures.transformAsync transformer. - future = setFuture.owner; - if (future.value == setFuture) { - Object valueToSet = getFutureValue(setFuture.future); - if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { - continue outer; - } - } - // other wise the future we were trying to set is already done. - } else { - executeListener(task, curr.executor); - } - } - break; - } - } - - public static V getDone(Future future) throws ExecutionException { - /* - * We throw IllegalStateException, since the call could succeed later. - * Perhaps we "should" throw IllegalArgumentException, since the call - * could succeed with a different argument. Those exceptions' docs - * suggest that either is acceptable. Google's Java Practices page - * recommends IllegalArgumentException here, in part to keep its - * recommendation simple: Static methods should throw - * IllegalStateException only when they use static state. - * - * - * Why do we deviate here? The answer: We want for fluentFuture.getDone() - * to throw the same exception as Futures.getDone(fluentFuture). - */ - Preconditions.checkState(future.isDone(), "Future was expected to be " + - "done:" + - " %s", future); - return Uninterruptibles.getUninterruptibly(future); - } - - /** - * Callback method that is called exactly once after the future is completed. - *

- *

If {@link #interruptTask} is also run during completion, - * {@link #afterDone} runs after it. - *

- *

The default implementation of this method in {@code AbstractFuture} - * does nothing. This is intended for very lightweight cleanup work, for - * example, timing statistics or clearing fields. - * If your task does anything heavier consider, just using a listener with - * an executor. - * - * @since 20.0 - */ - @Beta - protected void afterDone() { - } - - /** - * If this future has been cancelled (and possibly interrupted), cancels - * (and possibly interrupts) the given future (if available). - *

- *

This method should be used only when this future is completed. It is - * designed to be called from {@code done}. - */ - final void maybePropagateCancellation(@Nullable Future related) { - if (related != null & isCancelled()) { - related.cancel(wasInterrupted()); - } - } - - /** - * Releases all threads in the {@link #waiters} list, and clears the list. - */ - private void releaseWaiters() { - Waiter head; - do { - head = waiters; - } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE)); - for (Waiter currentWaiter = head; - currentWaiter != null; currentWaiter = currentWaiter.next) { - currentWaiter.unpark(); - } - } - - /** - * Clears the {@link #listeners} list and prepends its contents to {@code - * onto}, least recently added first. - */ - private Listener clearListeners(Listener onto) { - // We need to - // 1. atomically swap the listeners with TOMBSTONE, this is because - // addListener uses that to to synchronize with us - // 2. reverse the linked list, because despite our rather clear contract, - // people depend on us executing listeners in the order they were added - // 3. push all the items onto 'onto' and return the new head of the stack - Listener head; - do { - head = listeners; - } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE)); - Listener reversedList = onto; - while (head != null) { - Listener tmp = head; - head = head.next; - tmp.next = reversedList; - reversedList = tmp; - } - return reversedList; - } - - /** - * Submits the given runnable to the given {@link Executor} catching and - * logging all {@linkplain RuntimeException runtime exceptions} thrown by - * the executor. - */ - private static void executeListener(Runnable runnable, Executor executor) { - try { - executor.execute(runnable); - } catch (RuntimeException e) { - // Log it and keep going -- bad runnable and/or executor. Don't punish - // the other runnables if we're given a bad one. We only catch - // RuntimeException because we want Errors to propagate up. - LOG.log( - Level.SEVERE, - "RuntimeException while executing runnable " + runnable + " with " + - "executor " + executor, - e); - } - } - - private abstract static class AtomicHelper { - /** - * Non volatile write of the thread to the {@link Waiter#thread} field. - */ - abstract void putThread(Waiter waiter, Thread newValue); - - /** - * Non volatile write of the waiter to the {@link Waiter#next} field. - */ - abstract void putNext(Waiter waiter, Waiter newValue); - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - abstract boolean casWaiters( - AbstractFuture future, Waiter expect, - Waiter update); - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - abstract boolean casListeners( - AbstractFuture future, Listener expect, - Listener update); - - /** - * Performs a CAS operation on the {@link #value} field. - */ - abstract boolean casValue( - AbstractFuture future, Object expect, Object update); - } - - /** - * {@link AtomicHelper} based on {@link sun.misc.Unsafe}. - *

- *

Static initialization of this class will fail if the - * {@link sun.misc.Unsafe} object cannot be accessed. - */ - private static final class UnsafeAtomicHelper extends AtomicHelper { - static final sun.misc.Unsafe UNSAFE; - static final long LISTENERS_OFFSET; - static final long WAITERS_OFFSET; - static final long VALUE_OFFSET; - static final long WAITER_THREAD_OFFSET; - static final long WAITER_NEXT_OFFSET; - - static { - sun.misc.Unsafe unsafe = null; - try { - unsafe = sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - try { - unsafe = - AccessController.doPrivileged( - new PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (PrivilegedActionException e) { - throw new RuntimeException( - "Could not initialize intrinsics", e.getCause()); - } - } - try { - Class abstractFuture = AbstractFuture.class; - WAITERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("waiters")); - LISTENERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("listeners")); - VALUE_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("value")); - WAITER_THREAD_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("thread")); - WAITER_NEXT_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("next")); - UNSAFE = unsafe; - } catch (Exception e) { - throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue); - } - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return UNSAFE - .compareAndSwapObject(future, WAITERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return UNSAFE - .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #value} field. - */ - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SafeAtomicHelper extends AtomicHelper { - final AtomicReferenceFieldUpdater waiterThreadUpdater; - final AtomicReferenceFieldUpdater waiterNextUpdater; - final AtomicReferenceFieldUpdater waitersUpdater; - final AtomicReferenceFieldUpdater - listenersUpdater; - final AtomicReferenceFieldUpdater valueUpdater; - - SafeAtomicHelper( - AtomicReferenceFieldUpdater waiterThreadUpdater, - AtomicReferenceFieldUpdater waiterNextUpdater, - AtomicReferenceFieldUpdater waitersUpdater, - AtomicReferenceFieldUpdater listenersUpdater, - AtomicReferenceFieldUpdater valueUpdater) { - this.waiterThreadUpdater = waiterThreadUpdater; - this.waiterNextUpdater = waiterNextUpdater; - this.waitersUpdater = waitersUpdater; - this.listenersUpdater = listenersUpdater; - this.valueUpdater = valueUpdater; - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - waiterThreadUpdater.lazySet(waiter, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiterNextUpdater.lazySet(waiter, newValue); - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return waitersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return listenersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return valueUpdater.compareAndSet(future, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@code synchronized} and volatile writes. - *

- *

This is an implementation of last resort for when certain basic VM - * features are broken (like AtomicReferenceFieldUpdater). - */ - private static final class SynchronizedHelper extends AtomicHelper { - @Override - void putThread(Waiter waiter, Thread newValue) { - waiter.thread = newValue; - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiter.next = newValue; - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - synchronized (future) { - if (future.waiters == expect) { - future.waiters = update; - return true; - } - return false; - } - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - synchronized (future) { - if (future.listeners == expect) { - future.listeners = update; - return true; - } - return false; - } - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - synchronized (future) { - if (future.value == expect) { - future.value = update; - return true; - } - return false; - } - } - } - - private static CancellationException cancellationExceptionWithCause( - @Nullable String message, @Nullable Throwable cause) { - CancellationException exception = new CancellationException(message); - exception.initCause(cause); - return exception; - } - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}. - *

- *

This instance is equivalent to:

   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}
- */ - public static Executor directExecutor() { - return DirectExecutor.INSTANCE; - } - - /** - * See {@link #directExecutor} for behavioral notes. - */ - private enum DirectExecutor implements Executor { - INSTANCE; - - @Override - public void execute(Runnable command) { - command.run(); - } - - @Override - public String toString() { - return "MoreExecutors.directExecutor()"; - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d8ba919cefb5..d4cdaf2cfe41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -92,6 +92,7 @@ public class HddsVolume extends StorageVolume { private File dbParentDir; private File deletedContainerDir; private AtomicBoolean dbLoaded = new AtomicBoolean(false); + private final AtomicBoolean dbLoadFailure = new AtomicBoolean(false); /** * Builder for HddsVolume. @@ -257,6 +258,11 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); + if (isDbLoadFailure()) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + return VolumeCheckResult.FAILED; + } if (result != VolumeCheckResult.HEALTHY || !df.getContainerSchemaV3Enabled() || !isDbLoaded()) { return result; @@ -313,6 +319,11 @@ public File getDbParentDir() { return this.dbParentDir; } + @VisibleForTesting + public void setDbParentDir(File dbParentDir) { + this.dbParentDir = dbParentDir; + } + public File getDeletedContainerDir() { return this.deletedContainerDir; } @@ -326,6 +337,10 @@ public boolean isDbLoaded() { return dbLoaded.get(); } + public boolean isDbLoadFailure() { + return dbLoadFailure.get(); + } + public void loadDbStore(boolean readOnly) throws IOException { // DN startup for the first time, not registered yet, // so the DbVolume is not formatted. @@ -363,7 +378,8 @@ public void loadDbStore(boolean readOnly) throws IOException { String containerDBPath = containerDBFile.getAbsolutePath(); try { initPerDiskDBStore(containerDBPath, getConf(), readOnly); - } catch (IOException e) { + } catch (Throwable e) { + dbLoadFailure.set(true); throw new IOException("Can't init db instance under path " + containerDBPath + " for volume " + getStorageID(), e); } @@ -417,9 +433,11 @@ public void createDbStore(MutableVolumeSet dbVolumeSet) throws IOException { try { HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false); dbLoaded.set(true); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is created and loaded at {} for volume {}", containerDBPath, getStorageID()); } catch (IOException e) { + dbLoadFailure.set(true); String errMsg = "Can't create db instance under path " + containerDBPath + " for volume " + getStorageID(); LOG.error(errMsg, e); @@ -448,6 +466,7 @@ private void closeDbStore() { .getAbsolutePath(); DatanodeStoreCache.getInstance().removeDB(containerDBPath); dbLoaded.set(false); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is stopped at {} for volume {}", containerDBPath, getStorageID()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e195b127d499 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -442,12 +442,20 @@ public Map> getVolumeStateMap() { public boolean hasEnoughVolumes() { // Max number of bad volumes allowed, should have at least // 1 good volume + boolean hasEnoughVolumes; if (maxVolumeFailuresTolerated == StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - return getVolumesList().size() >= 1; + hasEnoughVolumes = getVolumesList().size() >= 1; } else { - return getFailedVolumesList().size() <= maxVolumeFailuresTolerated; + hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated; } + if (!hasEnoughVolumes) { + LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {}, VolumeType: {}, " + + "MaxVolumeFailuresTolerated: {}, ActiveVolumes: {}, FailedVolumes: {}", + datanodeUuid, volumeType, maxVolumeFailuresTolerated, + getVolumesList().size(), getFailedVolumesList().size()); + } + return hasEnoughVolumes; } public StorageLocationReport[] getStorageReport() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index d9d5a667b30b..b85ac15c54e4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -456,11 +456,6 @@ public long getAvailable() { } - public long getAvailable(SpaceUsageSource precomputedVolumeSpace) { - return volumeInfo.map(info -> info.getAvailable(precomputedVolumeSpace)) - .orElse(0L); - } - public SpaceUsageSource getCurrentUsage() { return volumeInfo.map(VolumeInfo::getCurrentUsage) .orElse(SpaceUsageSource.UNKNOWN); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java index 4917810bd97c..e81fd1008ff6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.container.common.volume; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; @@ -38,7 +43,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -46,10 +50,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java index 991f105d15b2..1548b30c9fb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java @@ -144,8 +144,7 @@ public Optional> schedule( final ListenableFuture lf; if (diskCheckTimeout > 0) { - lf = TimeoutFuture - .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, + lf = Futures.withTimeout(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java deleted file mode 100644 index 42e2ed5758eb..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import jakarta.annotation.Nullable; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Implementation of {@code Futures#withTimeout}. - *

- *

Future that delegates to another but will finish early (via a - * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the - * specified duration expires. The delegate future is interrupted and - * cancelled if it times out. - */ -final class TimeoutFuture extends AbstractFuture.TrustedFuture { - public static final Logger LOG = LoggerFactory.getLogger( - TimeoutFuture.class); - - static ListenableFuture create( - ListenableFuture delegate, - long time, - TimeUnit unit, - ScheduledExecutorService scheduledExecutor) { - TimeoutFuture result = new TimeoutFuture(delegate); - TimeoutFuture.Fire fire = new TimeoutFuture.Fire(result); - result.timer = scheduledExecutor.schedule(fire, time, unit); - delegate.addListener(fire, directExecutor()); - return result; - } - - /* - * Memory visibility of these fields. There are two cases to consider. - * - * 1. visibility of the writes to these fields to Fire.run: - * - * The initial write to delegateRef is made definitely visible via the - * semantics of addListener/SES.schedule. The later racy write in cancel() - * is not guaranteed to be observed, however that is fine since the - * correctness is based on the atomic state in our base class. The initial - * write to timer is never definitely visible to Fire.run since it is - * assigned after SES.schedule is called. Therefore Fire.run has to check - * for null. However, it should be visible if Fire.run is called by - * delegate.addListener since addListener is called after the assignment - * to timer, and importantly this is the main situation in which we need to - * be able to see the write. - * - * 2. visibility of the writes to an afterDone() call triggered by cancel(): - * - * Since these fields are non-final that means that TimeoutFuture is not - * being 'safely published', thus a motivated caller may be able to expose - * the reference to another thread that would then call cancel() and be - * unable to cancel the delegate. There are a number of ways to solve this, - * none of which are very pretty, and it is currently believed to be a - * purely theoretical problem (since the other actions should supply - * sufficient write-barriers). - */ - - @Nullable private ListenableFuture delegateRef; - @Nullable private Future timer; - - private TimeoutFuture(ListenableFuture delegate) { - this.delegateRef = Preconditions.checkNotNull(delegate); - } - - /** - * A runnable that is called when the delegate or the timer completes. - */ - private static final class Fire implements Runnable { - @Nullable - private TimeoutFuture timeoutFutureRef; - - Fire( - TimeoutFuture timeoutFuture) { - this.timeoutFutureRef = timeoutFuture; - } - - @Override - public void run() { - // If either of these reads return null then we must be after a - // successful cancel or another call to this method. - TimeoutFuture timeoutFuture = timeoutFutureRef; - if (timeoutFuture == null) { - return; - } - ListenableFuture delegate = timeoutFuture.delegateRef; - if (delegate == null) { - return; - } - - /* - * If we're about to complete the TimeoutFuture, we want to release our - * reference to it. Otherwise, we'll pin it (and its result) in memory - * until the timeout task is GCed. (The need to clear our reference to - * the TimeoutFuture is the reason we use a *static* nested class with - * a manual reference back to the "containing" class.) - * - * This has the nice-ish side effect of limiting reentrancy: run() calls - * timeoutFuture.setException() calls run(). That reentrancy would - * already be harmless, since timeoutFuture can be set (and delegate - * cancelled) only once. (And "set only once" is important for other - * reasons: run() can still be invoked concurrently in different threads, - * even with the above null checks.) - */ - timeoutFutureRef = null; - if (delegate.isDone()) { - timeoutFuture.setFuture(delegate); - } else { - try { - timeoutFuture.setException( - new TimeoutException("Future timed out: " + delegate)); - } finally { - delegate.cancel(true); - } - } - } - } - - @Override - protected void afterDone() { - maybePropagateCancellation(delegateRef); - - Future localTimer = timer; - // Try to cancel the timer as an optimization. - // timer may be null if this call to run was by the timer task since there - // is no happens-before edge between the assignment to timer and an - // execution of the timer task. - if (localTimer != null) { - localTimer.cancel(false); - } - - delegateRef = null; - timer = null; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 6ee35ba6b096..af890269255d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -20,11 +20,9 @@ import java.io.File; import java.io.IOException; -import java.util.Collection; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; @@ -33,10 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; - /** * Stores information about a disk/volume. * @@ -100,13 +94,6 @@ public final class VolumeInfo { // Space usage calculator private final VolumeUsage usage; - // Capacity configured. This is useful when we want to - // limit the visible capacity for tests. If negative, then we just - // query from the filesystem. - private long configuredCapacity; - - private long reservedInBytes; - /** * Builder for VolumeInfo. */ @@ -115,7 +102,6 @@ public static class Builder { private final String rootDir; private SpaceUsageCheckFactory usageCheckFactory; private StorageType storageType; - private long configuredCapacity; public Builder(String root, ConfigurationSource config) { this.rootDir = root; @@ -127,11 +113,6 @@ public Builder storageType(StorageType st) { return this; } - public Builder configuredCapacity(long capacity) { - this.configuredCapacity = capacity; - return this; - } - public Builder usageCheckFactory(SpaceUsageCheckFactory factory) { this.usageCheckFactory = factory; return this; @@ -142,55 +123,6 @@ public VolumeInfo build() throws IOException { } } - private long getReserved(ConfigurationSource conf) { - if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT) - && conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED)) { - LOG.error("Both {} and {} are set. Set either one, not both. If the " + - "volume matches with volume parameter in former config, it is set " + - "as reserved space. If not it fall backs to the latter config.", - HDDS_DATANODE_DIR_DU_RESERVED, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); - } - - // 1. If hdds.datanode.dir.du.reserved is set for a volume then make it - // as the reserved bytes. - Collection reserveList = conf.getTrimmedStringCollection( - HDDS_DATANODE_DIR_DU_RESERVED); - for (String reserve : reserveList) { - String[] words = reserve.split(":"); - if (words.length < 2) { - LOG.error("Reserved space should config in pair, but current is {}", - reserve); - continue; - } - - if (words[0].trim().equals(rootDir)) { - try { - StorageSize size = StorageSize.parse(words[1].trim()); - return (long) size.getUnit().toBytes(size.getValue()); - } catch (Exception e) { - LOG.error("Failed to parse StorageSize: {}", words[1].trim(), e); - break; - } - } - } - - // 2. If hdds.datanode.dir.du.reserved not set and - // hdds.datanode.dir.du.reserved.percent is set, fall back to this config. - if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT)) { - float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, - HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); - if (0 <= percentage && percentage <= 1) { - return (long) Math.ceil(this.usage.getCapacity() * percentage); - } - //If it comes here then the percentage is not between 0-1. - LOG.error("The value of {} should be between 0 to 1. Defaulting to 0.", - HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); - } - - //Both configs are not set, return 0. - return 0; - } - private VolumeInfo(Builder b) throws IOException { this.rootDir = b.rootDir; @@ -206,9 +138,6 @@ private VolumeInfo(Builder b) throws IOException { this.storageType = (b.storageType != null ? b.storageType : StorageType.DEFAULT); - this.configuredCapacity = (b.configuredCapacity != 0 ? - b.configuredCapacity : -1); - SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; if (usageCheckFactory == null) { usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); @@ -216,16 +145,11 @@ private VolumeInfo(Builder b) throws IOException { SpaceUsageCheckParams checkParams = usageCheckFactory.paramsFor(root); - this.usage = new VolumeUsage(checkParams); - this.reservedInBytes = getReserved(b.conf); - this.usage.setReserved(reservedInBytes); + usage = new VolumeUsage(checkParams, b.conf); } public long getCapacity() { - if (configuredCapacity < 0) { - return Math.max(usage.getCapacity() - reservedInBytes, 0); - } - return configuredCapacity; + return usage.getCapacity(); } /** @@ -236,17 +160,11 @@ public long getCapacity() { * A) avail = capacity - used */ public long getAvailable() { - long avail = getCapacity() - usage.getUsedSpace(); - return Math.max(Math.min(avail, usage.getAvailable()), 0); - } - - public long getAvailable(SpaceUsageSource precomputedValues) { - long avail = precomputedValues.getCapacity() - usage.getUsedSpace(); - return Math.max(Math.min(avail, usage.getAvailable(precomputedValues)), 0); + return usage.getAvailable(); } public SpaceUsageSource getCurrentUsage() { - return usage.snapshot(); + return usage.getCurrentUsage(); } public void incrementUsedSpace(long usedSpace) { @@ -285,8 +203,7 @@ public VolumeUsage getUsageForTesting() { return usage; } - @VisibleForTesting public long getReservedInBytes() { - return reservedInBytes; + return usage.getReservedBytes(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index 18e7354ec1da..e59cab0d539f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -32,15 +32,13 @@ public class VolumeInfoMetrics { private String metricsSourceName = VolumeInfoMetrics.class.getSimpleName(); - private String volumeRootStr; - private HddsVolume volume; + private final HddsVolume volume; /** - * @param identifier Typically, path to volume root. e.g. /data/hdds + * @param identifier Typically, path to volume root. E.g. /data/hdds */ public VolumeInfoMetrics(String identifier, HddsVolume ref) { this.metricsSourceName += '-' + identifier; - this.volumeRootStr = identifier; this.volume = ref; init(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index b2a66ba16b4a..be86cdaeadf5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -18,38 +18,60 @@ package org.apache.hadoop.ozone.container.common.volume; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.fs.CachingSpaceUsageSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; import org.apache.hadoop.hdds.fs.SpaceUsageSource; +import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collection; + import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_MIN_FREE_SPACE_PERCENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT; /** * Class that wraps the space df of the Datanode Volumes used by SCM * containers. */ -public class VolumeUsage implements SpaceUsageSource { +public class VolumeUsage { private final CachingSpaceUsageSource source; private boolean shutdownComplete; - private long reservedInBytes; + private final long reservedInBytes; private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class); - VolumeUsage(SpaceUsageCheckParams checkParams) { + VolumeUsage(SpaceUsageCheckParams checkParams, ConfigurationSource conf) { source = new CachingSpaceUsageSource(checkParams); + reservedInBytes = getReserved(conf, checkParams.getPath(), source.getCapacity()); + Preconditions.assertTrue(reservedInBytes >= 0, reservedInBytes + " < 0"); start(); // TODO should start only on demand } - @Override + @VisibleForTesting + SpaceUsageSource realUsage() { + return source.snapshot(); + } + public long getCapacity() { - return Math.max(source.getCapacity(), 0); + return getCurrentUsage().getCapacity(); + } + + public long getAvailable() { + return getCurrentUsage().getAvailable(); + } + + public long getUsedSpace() { + return getCurrentUsage().getUsedSpace(); } /** @@ -60,24 +82,15 @@ public long getCapacity() { * remainingReserved * B) avail = fsAvail - Max(reserved - other, 0); */ - @Override - public long getAvailable() { - return source.getAvailable() - getRemainingReserved(); - } - - public long getAvailable(SpaceUsageSource precomputedVolumeSpace) { - long available = precomputedVolumeSpace.getAvailable(); - return available - getRemainingReserved(precomputedVolumeSpace); - } - - @Override - public long getUsedSpace() { - return source.getUsedSpace(); - } + public SpaceUsageSource getCurrentUsage() { + SpaceUsageSource real = realUsage(); - @Override - public SpaceUsageSource snapshot() { - return source.snapshot(); + return reservedInBytes == 0 + ? real + : new SpaceUsageSource.Fixed( + Math.max(real.getCapacity() - reservedInBytes, 0), + Math.max(real.getAvailable() - getRemainingReserved(real), 0), + real.getUsedSpace()); } public void incrementUsedSpace(long usedSpace) { @@ -94,19 +107,10 @@ public void decrementUsedSpace(long reclaimedSpace) { * so there could be that DU value > totalUsed when there are deletes. * @return other used space */ - private long getOtherUsed() { - long totalUsed = source.getCapacity() - source.getAvailable(); - return Math.max(totalUsed - source.getUsedSpace(), 0L); - } - - private long getOtherUsed(SpaceUsageSource precomputedVolumeSpace) { + private static long getOtherUsed(SpaceUsageSource precomputedVolumeSpace) { long totalUsed = precomputedVolumeSpace.getCapacity() - precomputedVolumeSpace.getAvailable(); - return Math.max(totalUsed - source.getUsedSpace(), 0L); - } - - private long getRemainingReserved() { - return Math.max(reservedInBytes - getOtherUsed(), 0L); + return Math.max(totalUsed - precomputedVolumeSpace.getUsedSpace(), 0L); } private long getRemainingReserved( @@ -129,8 +133,8 @@ public void refreshNow() { source.refreshNow(); } - public void setReserved(long reserved) { - this.reservedInBytes = reserved; + public long getReservedBytes() { + return reservedInBytes; } /** @@ -174,4 +178,55 @@ public static boolean hasVolumeEnoughSpace(long volumeAvailableSpace, return (volumeAvailableSpace - volumeCommittedBytesCount) > Math.max(requiredSpace, volumeFreeSpaceToSpare); } + + private static long getReserved(ConfigurationSource conf, String rootDir, + long capacity) { + if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT) + && conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED)) { + LOG.error("Both {} and {} are set. Set either one, not both. If the " + + "volume matches with volume parameter in former config, it is set " + + "as reserved space. If not it fall backs to the latter config.", + HDDS_DATANODE_DIR_DU_RESERVED, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); + } + + // 1. If hdds.datanode.dir.du.reserved is set for a volume then make it + // as the reserved bytes. + Collection reserveList = conf.getTrimmedStringCollection( + HDDS_DATANODE_DIR_DU_RESERVED); + for (String reserve : reserveList) { + String[] words = reserve.split(":"); + if (words.length < 2) { + LOG.error("Reserved space should config in pair, but current is {}", + reserve); + continue; + } + + if (words[0].trim().equals(rootDir)) { + try { + StorageSize size = StorageSize.parse(words[1].trim()); + return (long) size.getUnit().toBytes(size.getValue()); + } catch (Exception e) { + LOG.error("Failed to parse StorageSize: {}", words[1].trim(), e); + break; + } + } + } + + // 2. If hdds.datanode.dir.du.reserved not set and + // hdds.datanode.dir.du.reserved.percent is set, fall back to this config. + if (conf.isConfigured(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT)) { + float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, + HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); + if (0 <= percentage && percentage <= 1) { + return (long) Math.ceil(capacity * percentage); + } + //If it comes here then the percentage is not between 0-1. + LOG.error("The value of {} should be between 0 to 1. Defaulting to 0.", + HDDS_DATANODE_DIR_DU_RESERVED_PERCENT); + } + + //Both configs are not set, return 0. + return 0; + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 234439a00c24..dccc271f6de2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.IOUtils; @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.security.token.Token; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,7 +71,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -101,12 +101,14 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 5; + private final ECContainerOperationClient containerOperationClient; private final ByteBufferPool byteBufferPool; - private final ExecutorService ecReconstructExecutor; - + private final ExecutorService ecReconstructReadExecutor; + private final MemoizedSupplier ecReconstructWriteExecutor; private final BlockInputStreamFactory blockInputStreamFactory; private final TokenHelper tokenHelper; private final ContainerClientMetrics clientMetrics; @@ -123,20 +125,18 @@ public ECReconstructionCoordinator( this.containerOperationClient = new ECContainerOperationClient(conf, certificateClient); this.byteBufferPool = new ElasticByteBufferPool(); - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d") - .build(); ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - this.ecReconstructExecutor = - new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), - 60, - TimeUnit.SECONDS, - new SynchronousQueue<>(), - threadFactory, - new ThreadPoolExecutor.CallerRunsPolicy()); + this.ecReconstructReadExecutor = createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), + threadNamePrefix + "ec-reconstruct-reader-TID-%d"); + this.ecReconstructWriteExecutor = MemoizedSupplier.valueOf( + () -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeWritePoolLimit(), + threadNamePrefix + "ec-reconstruct-writer-TID-%d")); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, () -> ecReconstructExecutor); + .getInstance(byteBufferPool, () -> ecReconstructReadExecutor); tokenHelper = new TokenHelper(new SecurityConfig(conf), secretKeyClient); this.clientMetrics = ContainerClientMetrics.acquire(); this.metrics = metrics; @@ -232,7 +232,7 @@ private ECBlockOutputStream getECBlockOutputStream( containerOperationClient.singleNodePipeline(datanodeDetails, repConfig, replicaIndex), BufferPool.empty(), ozoneClientConfig, - blockLocationInfo.getToken(), clientMetrics, streamBufferArgs); + blockLocationInfo.getToken(), clientMetrics, streamBufferArgs, ecReconstructWriteExecutor); } @VisibleForTesting @@ -267,12 +267,15 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, return; } + OzoneClientConfig clientConfig = this.ozoneClientConfig; + clientConfig.setChecksumVerify(true); try (ECBlockReconstructedStripeInputStream sis = new ECBlockReconstructedStripeInputStream( - repConfig, blockLocationInfo, true, + repConfig, blockLocationInfo, this.containerOperationClient.getXceiverClientManager(), null, this.blockInputStreamFactory, byteBufferPool, - this.ecReconstructExecutor)) { + this.ecReconstructReadExecutor, + clientConfig)) { ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; @@ -457,6 +460,9 @@ public void close() throws IOException { if (containerOperationClient != null) { containerOperationClient.close(); } + if (ecReconstructWriteExecutor.isInitialized()) { + ecReconstructWriteExecutor.get().shutdownNow(); + } } private Pipeline rebuildInputPipeline(ECReplicationConfig repConfig, @@ -590,4 +596,12 @@ OptionalLong getTermOfLeaderSCM() { .map(StateContext::getTermOfLeaderSCM) .orElse(OptionalLong.empty()); } + + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index ab78c6055cdf..70539111fb99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdfs.util.Canceler; @@ -45,7 +46,6 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; -import java.util.Arrays; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; @@ -345,7 +345,7 @@ private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler, File chunkFile; try { chunkFile = layout.getChunkFile(onDiskContainerData, - block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk)); + block.getBlockID(), chunk.getChunkName()); } catch (IOException ex) { return ScanResult.unhealthy( ScanResult.FailureType.MISSING_CHUNK_FILE, @@ -421,8 +421,8 @@ private static ScanResult verifyChecksum(BlockData block, " for block %s", ChunkInfo.getFromProtoBuf(chunk), i, - Arrays.toString(expected.toByteArray()), - Arrays.toString(actual.toByteArray()), + StringUtils.bytes2Hex(expected.asReadOnlyByteBuffer()), + StringUtils.bytes2Hex(actual.asReadOnlyByteBuffer()), block.getBlockID()); return ScanResult.unhealthy( ScanResult.FailureType.CORRUPT_CHUNK, chunkFile, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 59009ef9dfef..ed13ebc93b99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -103,6 +103,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.PUT_SMALL_FILE_ERROR; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockDataResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getBlockLengthResponse; +import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getEchoResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getFinalizeBlockResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getGetSmallFileResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getListBlockResponse; @@ -111,6 +112,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadContainerResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponseBuilder; +import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getWriteChunkResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; @@ -167,9 +169,9 @@ public KeyValueHandler(ConfigurationSource config, // Requests. final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); containerCreationLocks = Striped.lazyWeakLock( @@ -279,6 +281,8 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, return handler.handleGetCommittedBlockLength(request, kvContainer); case FinalizeBlock: return handler.handleFinalizeBlock(request, kvContainer); + case Echo: + return handler.handleEcho(request, kvContainer); default: return null; } @@ -611,6 +615,11 @@ ContainerCommandResponseProto handleFinalizeBlock( return getFinalizeBlockResponse(request, responseData); } + ContainerCommandResponseProto handleEcho( + ContainerCommandRequestProto request, KeyValueContainer kvContainer) { + return getEchoResponse(request); + } + /** * Handle Get Block operation. Calls BlockManager to process the request. */ @@ -772,11 +781,14 @@ ContainerCommandResponseProto handleReadChunk( data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); + LOG.debug("read chunk from block {} chunk {}", blockID, chunkInfo); // Validate data only if the read chunk is issued by Ratis for its // internal logic. // For client reads, the client is expected to validate. if (DispatcherContext.op(dispatcherContext).readFromTmpFile()) { validateChunkChecksumData(data, chunkInfo); + metrics.incBytesReadStateMachine(chunkInfo.getLen()); + metrics.incNumReadStateMachine(); } metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen()); } catch (StorageContainerException ex) { @@ -809,7 +821,7 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data, info.getChecksumData(), 0); + Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -831,6 +843,7 @@ ContainerCommandResponseProto handleWriteChunk( return malformedRequest(request); } + ContainerProtos.BlockData blockDataProto = null; try { checkContainerOpen(kvContainer); @@ -854,6 +867,28 @@ ContainerCommandResponseProto handleWriteChunk( chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); + final boolean isCommit = dispatcherContext.getStage().isCommit(); + if (isCommit && writeChunk.hasBlock()) { + metrics.incContainerOpsMetrics(Type.PutBlock); + BlockData blockData = BlockData.getFromProtoBuf( + writeChunk.getBlock().getBlockData()); + // optimization for hsync when WriteChunk is in commit phase: + // + // block metadata is piggybacked in the same message. + // there will not be an additional PutBlock request. + // + // End of block will always be sent as a standalone PutBlock. + // the PutBlock piggybacked in WriteChunk is never end of block. + // + // do not do this in WRITE_DATA phase otherwise PutBlock will be out + // of order. + blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); + blockManager.putBlock(kvContainer, blockData, false); + blockDataProto = blockData.getProtoBufMessage(); + final long numBytes = blockDataProto.getSerializedSize(); + metrics.incContainerBytesStats(Type.PutBlock, numBytes); + } + // We should increment stats after writeChunk if (isWrite) { metrics.incContainerBytesStats(Type.WriteChunk, writeChunk @@ -867,7 +902,7 @@ ContainerCommandResponseProto handleWriteChunk( request); } - return getSuccessResponse(request); + return getWriteChunkResponseSuccess(request, blockDataProto); } /** @@ -910,9 +945,9 @@ ContainerCommandResponseProto handlePutSmallFile( // chunks will be committed as a part of handling putSmallFile // here. There is no need to maintain this info in openContainerBlockMap. + validateChunkChecksumData(data, chunkInfo); chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - validateChunkChecksumData(data, chunkInfo); chunkManager.finishWriteChunks(kvContainer, blockData); List chunks = new LinkedList<>(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index a45055821a41..e966a0bed862 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -124,15 +124,13 @@ private static String getContainerSubDirectory(long containerId) { */ public static File getContainerDBFile(KeyValueContainerData containerData) { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); } - return getContainerDBFile(containerData.getMetadataPath(), containerData); - } - - public static File getContainerDBFile(String baseDir, - KeyValueContainerData containerData) { - return new File(baseDir, containerData.getContainerID() + + Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata Directory cannot be null"); + return new File(containerData.getMetadataPath(), containerData.getContainerID() + OzoneConsts.DN_CONTAINER_DB); } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 55e35f5741ed..b7d5b5fa59eb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -23,7 +23,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.List; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -32,7 +31,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -65,12 +63,6 @@ private KeyValueContainerUtil() { private static final Logger LOG = LoggerFactory.getLogger( KeyValueContainerUtil.class); - /** - * - * @param containerMetaDataPath - * @throws IOException - */ - /** * creates metadata path, chunks path and metadata DB for the specified * container. @@ -434,46 +426,9 @@ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, } public static long getBlockLength(BlockData block) throws IOException { - long blockLen = 0; - List chunkInfoList = block.getChunks(); - - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); - } - - return blockLen; - } - - /** - * Returns the path where data or chunks live for a given container. - * - * @param kvContainerData - KeyValueContainerData - * @return - Path to the chunks directory - */ - public static Path getDataDirectory(KeyValueContainerData kvContainerData) { - - String chunksPath = kvContainerData.getChunksPath(); - Preconditions.checkNotNull(chunksPath); - - return Paths.get(chunksPath); - } - - /** - * Container metadata directory -- here is where the RocksDB and - * .container file lives. - * - * @param kvContainerData - KeyValueContainerData - * @return Path to the metadata directory - */ - public static Path getMetadataDirectory( - KeyValueContainerData kvContainerData) { - - String metadataPath = kvContainerData.getMetadataPath(); - Preconditions.checkNotNull(metadataPath); - - return Paths.get(metadataPath); - + return block.getChunks().stream() + .mapToLong(ContainerProtos.ChunkInfo::getLen) + .sum(); } public static boolean isSameSchemaVersion(String schema, String other) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index e40434f508e6..413f36a7616b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -42,7 +42,6 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BCSID_MISMATCH; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNKNOWN_BCSID; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT; @@ -55,7 +54,8 @@ */ public class BlockManagerImpl implements BlockManager { - static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); + public static final Logger LOG = + LoggerFactory.getLogger(BlockManagerImpl.class); private ConfigurationSource config; @@ -66,6 +66,7 @@ public class BlockManagerImpl implements BlockManager { // Default Read Buffer capacity when Checksum is not present private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; + private boolean incrementalEnabled; /** * Constructs a Block Manager. @@ -81,6 +82,15 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferThreshold = config.getBufferSize( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_THRESHOLD_DEFAULT); + incrementalEnabled = + config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL, + OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT); + if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized( + HDDSLayoutFeature.HBASE_SUPPORT)) { + LOG.warn("DataNode has not finalized upgrading to a version that " + + "supports incremental chunk list. Fallback to full chunk list"); + incrementalEnabled = false; + } } @Override @@ -93,23 +103,12 @@ public long putBlock(Container container, BlockData data, boolean endOfBlock) throws IOException { return persistPutBlock( (KeyValueContainer) container, - data, - config, - endOfBlock); + data, endOfBlock); } - public static long persistPutBlock(KeyValueContainer container, - BlockData data, ConfigurationSource config, boolean endOfBlock) + public long persistPutBlock(KeyValueContainer container, + BlockData data, boolean endOfBlock) throws IOException { - boolean incrementalEnabled = - config.getBoolean(OZONE_CHUNK_LIST_INCREMENTAL, - OZONE_CHUNK_LIST_INCREMENTAL_DEFAULT); - if (incrementalEnabled && !VersionedDatanodeFeatures.isFinalized( - HDDSLayoutFeature.HBASE_SUPPORT)) { - throw new StorageContainerException("DataNode has not finalized " + - "upgrading to a version that supports incremental chunk list.", - UNSUPPORTED_REQUEST); - } Preconditions.checkNotNull(data, "BlockData cannot be null for put " + "operation."); Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 1267ed786892..288a2d3e3312 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -51,8 +51,8 @@ private ChunkManagerFactory() { public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { boolean sync = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); + conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY, + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA, HDDS_CONTAINER_PERSISTDATA_DEFAULT); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 99b68670fadf..a87b184ccecf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -96,7 +96,7 @@ private static void checkLayoutVersion(Container container) { public String streamInit(Container container, BlockID blockID) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return chunkFile.getAbsolutePath(); } @@ -105,7 +105,7 @@ public StateMachine.DataChannel getStreamDataChannel( Container container, BlockID blockID, ContainerMetrics metrics) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return new KeyValueStreamDataChannel(chunkFile, container.getContainerData(), metrics); } @@ -137,7 +137,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); long len = info.getLen(); long offset = info.getOffset(); @@ -188,7 +188,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, HddsVolume volume = containerData.getVolume(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); final long len = info.getLen(); long offset = info.getOffset(); @@ -213,7 +213,7 @@ public void deleteChunks(Container container, BlockData blockData) @Override public void finishWriteChunks(KeyValueContainer container, BlockData blockData) throws IOException { - File chunkFile = getChunkFile(container, blockData.getBlockID(), null); + final File chunkFile = getChunkFile(container, blockData.getBlockID()); try { files.close(chunkFile); verifyChunkFileExists(chunkFile); @@ -227,7 +227,7 @@ public void finishWriteChunks(KeyValueContainer container, public void finalizeWriteChunk(KeyValueContainer container, BlockID blockId) throws IOException { synchronized (container) { - File chunkFile = getChunkFile(container, blockId, null); + File chunkFile = getChunkFile(container, blockId); try { if (files.isOpen(chunkFile)) { files.close(chunkFile); @@ -247,7 +247,7 @@ private void deleteChunk(Container container, BlockID blockID, Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - File file = getChunkFile(container, blockID, info); + final File file = getChunkFile(container, blockID); // if the chunk file does not exist, it might have already been deleted. // The call might be because of reapply of transactions on datanode @@ -267,10 +267,8 @@ private void deleteChunk(Container container, BlockID blockID, LOG.info("Deleted block file: {}", file); } - private File getChunkFile(Container container, BlockID blockID, - ChunkInfo info) throws StorageContainerException { - return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, - info); + private static File getChunkFile(Container container, BlockID blockID) throws StorageContainerException { + return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, null); } private static void checkFullDelete(ChunkInfo info, File chunkFile) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 31a340f310b8..a649f573bf08 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -342,8 +342,7 @@ public void deleteChunks(Container container, BlockData blockData) private static File getChunkFile(KeyValueContainer container, BlockID blockID, ChunkInfo info) throws StorageContainerException { - return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, - info); + return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, info.getChunkName()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java index b2c62dfcbd17..cc83f453ebdf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import java.io.File; @@ -71,9 +70,6 @@ public ConfigurationSource getConfig() { public abstract DBColumnFamilyDefinition getMetadataColumnFamily(); - public abstract DBColumnFamilyDefinition - getDeletedBlocksColumnFamily(); - public DBColumnFamilyDefinition getFinalizeBlocksColumnFamily() { return null; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index c5a59da537ea..26719d7f035a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -62,13 +62,11 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table blockDataTableWithIterator; - private Table deletedBlocksTable; - private Table finalizeBlocksTable; private Table finalizeBlocksTableWithIterator; - static final Logger LOG = + public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); private volatile DBStore store; private final AbstractDatanodeDBDefinition dbDef; @@ -161,10 +159,6 @@ public void start(ConfigurationSource config) blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); checkTableStatus(blockDataTable, blockDataTable.getName()); - deletedBlocksTable = new DatanodeTable<>( - dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); - checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); - if (dbDef.getFinalizeBlocksColumnFamily() != null) { finalizeBlocksTableWithIterator = dbDef.getFinalizeBlocksColumnFamily().getTable(this.store); @@ -217,7 +211,7 @@ public Table getLastChunkInfoTable() { @Override public Table getDeletedBlocksTable() { - return deletedBlocksTable; + throw new UnsupportedOperationException("DeletedBlocksTable is only supported in Container Schema One"); } @Override @@ -292,7 +286,7 @@ protected Table getFinalizeBlocksTableWithIterator() { return this.finalizeBlocksTableWithIterator; } - private static void checkTableStatus(Table table, String name) + protected static void checkTableStatus(Table table, String name) throws IOException { String logMessage = "Unable to get a reference to %s table. Cannot " + "continue."; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index d34edb3a48a7..4f54e85da2b1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -96,7 +96,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override public DBColumnFamilyDefinition getDeletedBlocksColumnFamily() { return DELETED_BLOCKS; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index 4d01ae781f29..d47446d49b0f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; @@ -74,15 +73,6 @@ public class DatanodeSchemaThreeDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - FixedLengthStringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -116,7 +106,6 @@ public class DatanodeSchemaThreeDBDefinition COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION, FINALIZE_BLOCKS, LAST_CHUNK_INFO); @@ -140,7 +129,6 @@ public DatanodeSchemaThreeDBDefinition(String dbPath, BLOCK_DATA.setCfOptions(cfOptions); METADATA.setCfOptions(cfOptions); - DELETED_BLOCKS.setCfOptions(cfOptions); DELETE_TRANSACTION.setCfOptions(cfOptions); FINALIZE_BLOCKS.setCfOptions(cfOptions); LAST_CHUNK_INFO.setCfOptions(cfOptions); @@ -162,12 +150,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - @Override public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index 8d293aba989f..b9e7ec7bd5bf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; @@ -59,15 +58,6 @@ public class DatanodeSchemaTwoDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - StringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -104,7 +94,6 @@ public DatanodeSchemaTwoDBDefinition(String dbPath, COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION, FINALIZE_BLOCKS, LAST_CHUNK_INFO); @@ -125,12 +114,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - @Override public DBColumnFamilyDefinition getLastChunkInfoColumnFamily() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java index 4b514c04e44e..f5eb1a3d8ec5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java @@ -28,6 +28,9 @@ * places all data in the default column family. */ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore { + + private Table deletedBlocksTable; + /** * Constructs the metadata store and starts the DB Services. * @@ -38,12 +41,15 @@ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath, boolean openReadOnly) throws IOException { super(config, new DatanodeSchemaOneDBDefinition(dbPath, config), openReadOnly); + deletedBlocksTable = new DatanodeTable<>( + ((DatanodeSchemaOneDBDefinition) getDbDef()).getDeletedBlocksColumnFamily().getTable(getStore())); + checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); } @Override public Table getDeletedBlocksTable() { // Return a wrapper around the deleted blocks table to handle prefixes // when all data is stored in a single table. - return new SchemaOneDeletedBlocksTable(super.getDeletedBlocksTable()); + return new SchemaOneDeletedBlocksTable(deletedBlocksTable); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java index 25479a7a9c14..7bdc7f1dbd5e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java @@ -106,7 +106,6 @@ public void removeKVContainerData(long containerID) throws IOException { try (BatchOperation batch = getBatchHandler().initBatchOperation()) { getMetadataTable().deleteBatchWithPrefix(batch, prefix); getBlockDataTable().deleteBatchWithPrefix(batch, prefix); - getDeletedBlocksTable().deleteBatchWithPrefix(batch, prefix); getDeleteTransactionTable().deleteBatchWithPrefix(batch, prefix); getBatchHandler().commitBatchOperation(batch); } @@ -119,8 +118,6 @@ public void dumpKVContainerData(long containerID, File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir), prefix); getBlockDataTable().dumpToFileWithPrefix( getTableDumpFile(getBlockDataTable(), dumpDir), prefix); - getDeletedBlocksTable().dumpToFileWithPrefix( - getTableDumpFile(getDeletedBlocksTable(), dumpDir), prefix); getDeleteTransactionTable().dumpToFileWithPrefix( getTableDumpFile(getDeleteTransactionTable(), dumpDir), prefix); @@ -132,8 +129,6 @@ public void loadKVContainerData(File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir)); getBlockDataTable().loadFromFile( getTableDumpFile(getBlockDataTable(), dumpDir)); - getDeletedBlocksTable().loadFromFile( - getTableDumpFile(getDeletedBlocksTable(), dumpDir)); getDeleteTransactionTable().loadFromFile( getTableDumpFile(getDeleteTransactionTable(), dumpDir)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java index f2b879706b7b..5941cc6cf89c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScannerConfiguration.java @@ -71,10 +71,10 @@ public class ContainerScannerConfiguration { @Config(key = "enabled", type = ConfigType.BOOLEAN, - defaultValue = "false", + defaultValue = "true", tags = {ConfigTag.STORAGE}, description = "Config parameter to enable all container scanners.") - private boolean enabled = false; + private boolean enabled = true; @Config(key = "dev.data.scan.enabled", type = ConfigType.BOOLEAN, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 1929c16089b0..f20094079c9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -95,18 +95,17 @@ public void importContainer(long containerID, Path tarFilePath, throws IOException { if (!importContainerProgress.add(containerID)) { deleteFileQuietely(tarFilePath); - LOG.warn("Container import in progress with container Id {}", - containerID); - throw new StorageContainerException("Container " + - "import in progress with container Id " + containerID, + String log = "Container import in progress with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } try { if (containerSet.getContainer(containerID) != null) { - LOG.warn("Container already exists with container Id {}", containerID); - throw new StorageContainerException("Container already exists " + - "with container Id " + containerID, + String log = "Container already exists with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java index 20c36b4d1fcf..6bc237207b37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java @@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.container.replication; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; @@ -28,11 +31,18 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; +import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; +import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; +import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; +import org.apache.ratis.thirdparty.io.grpc.ServerServiceDefinition; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getDownloadMethod; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getUploadMethod; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.fromProto; /** @@ -49,10 +59,79 @@ public class GrpcReplicationService extends private final ContainerReplicationSource source; private final ContainerImporter importer; + private final boolean zeroCopyEnabled; + + private final ZeroCopyMessageMarshaller + sendContainerZeroCopyMessageMarshaller; + + private final ZeroCopyMessageMarshaller + copyContainerZeroCopyMessageMarshaller; + public GrpcReplicationService(ContainerReplicationSource source, - ContainerImporter importer) { + ContainerImporter importer, boolean zeroCopyEnabled) { this.source = source; this.importer = importer; + this.zeroCopyEnabled = zeroCopyEnabled; + + if (zeroCopyEnabled) { + sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + SendContainerRequest.getDefaultInstance()); + copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + CopyContainerRequestProto.getDefaultInstance()); + } else { + sendContainerZeroCopyMessageMarshaller = null; + copyContainerZeroCopyMessageMarshaller = null; + } + } + + public ServerServiceDefinition bindServiceWithZeroCopy() { + ServerServiceDefinition orig = super.bindService(); + if (!zeroCopyEnabled) { + LOG.info("Zerocopy is not enabled."); + return orig; + } + + Set methodNames = new HashSet<>(); + ServerServiceDefinition.Builder builder = + ServerServiceDefinition.builder(orig.getServiceDescriptor().getName()); + + // Add `upload` method with zerocopy marshaller. + MethodDescriptor uploadMethod = + getUploadMethod(); + addZeroCopyMethod(orig, builder, uploadMethod, + sendContainerZeroCopyMessageMarshaller); + methodNames.add(uploadMethod.getFullMethodName()); + + // Add `download` method with zerocopy marshaller. + MethodDescriptor + downloadMethod = getDownloadMethod(); + addZeroCopyMethod(orig, builder, downloadMethod, + copyContainerZeroCopyMessageMarshaller); + methodNames.add(downloadMethod.getFullMethodName()); + + // Add other methods as is. + orig.getMethods().stream().filter( + x -> !methodNames.contains(x.getMethodDescriptor().getFullMethodName()) + ).forEach( + builder::addMethod + ); + + return builder.build(); + } + + private static void addZeroCopyMethod( + ServerServiceDefinition orig, + ServerServiceDefinition.Builder newServiceBuilder, + MethodDescriptor origMethod, + ZeroCopyMessageMarshaller zeroCopyMarshaller) { + MethodDescriptor newMethod = origMethod.toBuilder() + .setRequestMarshaller(zeroCopyMarshaller) + .build(); + @SuppressWarnings("unchecked") + ServerCallHandler serverCallHandler = + (ServerCallHandler) orig.getMethod( + newMethod.getFullMethodName()).getServerCallHandler(); + newServiceBuilder.addMethod(newMethod, serverCallHandler); } @Override @@ -76,13 +155,21 @@ public void download(CopyContainerRequestProto request, } finally { // output may have already been closed, ignore such errors IOUtils.cleanupWithLogger(LOG, outputStream); + + if (copyContainerZeroCopyMessageMarshaller != null) { + InputStream popStream = + copyContainerZeroCopyMessageMarshaller.popStream(request); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } @Override public StreamObserver upload( StreamObserver responseObserver) { - - return new SendContainerRequestHandler(importer, responseObserver); + return new SendContainerRequestHandler(importer, responseObserver, + sendContainerZeroCopyMessageMarshaller); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java index fa3763d88067..7becbe752189 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java @@ -27,7 +27,6 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Time; /** @@ -98,38 +97,31 @@ public void close() throws Exception { DefaultMetricsSystem.instance().unregisterSource(metricsName()); } - @VisibleForTesting - public MutableCounterLong getSuccess() { + MutableCounterLong getSuccess() { return success; } - @VisibleForTesting - public MutableGaugeLong getSuccessTime() { + MutableGaugeLong getSuccessTime() { return successTime; } - @VisibleForTesting - public MutableGaugeLong getFailureTime() { + MutableGaugeLong getFailureTime() { return failureTime; } - @VisibleForTesting - public MutableCounterLong getFailure() { + MutableCounterLong getFailure() { return failure; } - @VisibleForTesting - public MutableGaugeLong getQueueTime() { + MutableGaugeLong getQueueTime() { return queueTime; } - @VisibleForTesting - public MutableGaugeLong getTransferredBytes() { + MutableGaugeLong getTransferredBytes() { return transferredBytes; } - @VisibleForTesting - public MutableGaugeLong getFailureBytes() { + MutableGaugeLong getFailureBytes() { return failureBytes; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index d2407a61d0b5..f72ca2a6881d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -99,16 +99,18 @@ public ReplicationServer(ContainerController controller, new LinkedBlockingQueue<>(replicationQueueLimit), threadFactory); - init(); + init(replicationConfig.isZeroCopyEnable()); } - public void init() { + public void init(boolean enableZeroCopy) { + GrpcReplicationService grpcReplicationService = new GrpcReplicationService( + new OnDemandContainerReplicationSource(controller), importer, + enableZeroCopy); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .addService(ServerInterceptors.intercept(new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller), - importer - ), new GrpcServerInterceptor())) + .addService(ServerInterceptors.intercept( + grpcReplicationService.bindServiceWithZeroCopy(), + new GrpcServerInterceptor())) .executor(executor); if (secConf.isSecurityEnabled() && secConf.isGrpcTlsEnabled()) { @@ -203,6 +205,11 @@ public static final class ReplicationConfig { static final String REPLICATION_OUTOFSERVICE_FACTOR_KEY = PREFIX + "." + OUTOFSERVICE_FACTOR_KEY; + public static final String ZEROCOPY_ENABLE_KEY = "zerocopy.enabled"; + private static final boolean ZEROCOPY_ENABLE_DEFAULT = true; + private static final String ZEROCOPY_ENABLE_DEFAULT_VALUE = + "true"; + /** * The maximum number of replication commands a single datanode can execute * simultaneously. @@ -244,6 +251,15 @@ public static final class ReplicationConfig { ) private double outOfServiceFactor = OUTOFSERVICE_FACTOR_DEFAULT; + @Config(key = ZEROCOPY_ENABLE_KEY, + type = ConfigType.BOOLEAN, + defaultValue = ZEROCOPY_ENABLE_DEFAULT_VALUE, + tags = {DATANODE, SCM}, + description = "Specify if zero-copy should be enabled for " + + "replication protocol." + ) + private boolean zeroCopyEnable = ZEROCOPY_ENABLE_DEFAULT; + public double getOutOfServiceFactor() { return outOfServiceFactor; } @@ -277,6 +293,14 @@ public void setReplicationQueueLimit(int limit) { this.replicationQueueLimit = limit; } + public boolean isZeroCopyEnable() { + return zeroCopyEnable; + } + + public void setZeroCopyEnable(boolean zeroCopyEnable) { + this.zeroCopyEnable = zeroCopyEnable; + } + @PostConstruct public void validate() { if (replicationMaxStreams < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index ee51463309b8..5ceea125e814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -106,7 +106,6 @@ public static class Builder { private Clock clock; private IntConsumer executorThreadUpdater = threadCount -> { }; - private String threadNamePrefix; public Builder clock(Clock newClock) { clock = newClock; @@ -138,11 +137,6 @@ public Builder executorThreadUpdater(IntConsumer newUpdater) { return this; } - public Builder threadNamePrefix(String threadPrefix) { - this.threadNamePrefix = threadPrefix; - return this; - } - public ReplicationSupervisor build() { if (replicationConfig == null || datanodeConfig == null) { ConfigurationSource conf = new OzoneConfiguration(); @@ -162,6 +156,7 @@ public ReplicationSupervisor build() { if (executor == null) { LOG.info("Initializing replication supervisor with thread count = {}", replicationConfig.getReplicationMaxStreams()); + String threadNamePrefix = context != null ? context.getThreadNamePrefix() : ""; ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadNamePrefix + "ContainerReplicationThread-%d") diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java index 6bcd46ba0a7a..506a96fe0514 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java @@ -24,11 +24,13 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -53,12 +55,15 @@ class SendContainerRequestHandler private HddsVolume volume; private Path path; private CopyContainerCompression compression; + private final ZeroCopyMessageMarshaller marshaller; SendContainerRequestHandler( ContainerImporter importer, - StreamObserver responseObserver) { + StreamObserver responseObserver, + ZeroCopyMessageMarshaller marshaller) { this.importer = importer; this.responseObserver = responseObserver; + this.marshaller = marshaller; } @Override @@ -98,6 +103,13 @@ public void onNext(SendContainerRequest req) { nextOffset += length; } catch (Throwable t) { onError(t); + } finally { + if (marshaller != null) { + InputStream popStream = marshaller.popStream(req); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a851664..c63f82025e09 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55cd..21775245efb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,8 +155,8 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 5738f5c1106e..e1e1ee9172a8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,12 +82,12 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); @@ -200,7 +200,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = @@ -327,7 +327,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 95df6c647f8b..e5f6dc7edefd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -20,11 +20,13 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -39,6 +41,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; @@ -46,10 +50,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -68,6 +74,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -78,7 +85,6 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory; -import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; @@ -99,6 +105,9 @@ * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { + @TempDir + private Path tempDir; + private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); @TempDir @@ -129,6 +138,8 @@ public void testContainerCloseActionWhenFull( (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -164,6 +175,72 @@ public void testContainerCloseActionWhenFull( } } + @Test + public void testSmallFileChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + + ContainerCommandResponseProto smallFileResponse = + hddsDispatcher.dispatch(newPutSmallFile(1L, 1L), null); + + assertEquals(ContainerProtos.Result.SUCCESS, smallFileResponse.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + + @Test + public void testWriteChunkChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + //Send a few WriteChunkRequests + ContainerCommandResponseProto response; + ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0); + hddsDispatcher.dispatch(writeChunkRequest0, null); + hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 1), null); + response = hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 2), null); + + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + // Send Read Chunk request for written chunk. + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + ByteString responseData = BufferUtils.concatByteStrings( + response.getReadChunk().getDataBuffers().getBuffersList()); + assertEquals(writeChunkRequest0.getWriteChunk().getData(), + responseData); + + // Test checksum on Read: + final DispatcherContext context = DispatcherContext + .newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA) + .build(); + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenVolumeFull( ContainerLayoutVersion layoutVersion) throws Exception { @@ -178,7 +255,8 @@ public void testContainerCloseActionWhenVolumeFull( .conf(conf).usageCheckFactory(MockSpaceUsageCheckFactory.NONE); // state of cluster : available (140) > 100 ,datanode volume // utilisation threshold not yet reached. container creates are successful. - SpaceUsageSource spaceUsage = fixed(500, 140, 360); + AtomicLong usedSpace = new AtomicLong(360); + SpaceUsageSource spaceUsage = MockSpaceUsageSource.of(500, usedSpace); SpaceUsageCheckFactory factory = MockSpaceUsageCheckFactory.of( spaceUsage, Duration.ZERO, inMemory(new AtomicLong(0))); @@ -196,6 +274,8 @@ public void testContainerCloseActionWhenVolumeFull( 50, UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -212,6 +292,7 @@ public void testContainerCloseActionWhenVolumeFull( hddsDispatcher.setClusterId(scmId.toString()); containerData.getVolume().getVolumeInfo() .ifPresent(volumeInfo -> volumeInfo.incrementUsedSpace(50)); + usedSpace.addAndGet(50); ContainerCommandResponseProto response = hddsDispatcher .dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null); assertEquals(ContainerProtos.Result.SUCCESS, @@ -512,6 +593,84 @@ private ContainerCommandRequestProto getWriteChunkRequest( .build(); } + static ChecksumData checksum(ByteString data) { + try { + return new Checksum(ContainerProtos.ChecksumType.CRC32, 256) + .computeChecksum(data.asReadOnlyByteBuffer()); + } catch (OzoneChecksumException e) { + throw new IllegalStateException(e); + } + } + + private ContainerCommandRequestProto getWriteChunkRequest0( + String datanodeId, Long containerId, Long localId, int chunkNum) { + final int lenOfBytes = 32; + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + + ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo + .newBuilder() + .setChunkName( + DigestUtils.md5Hex("dummy-key") + "_stream_" + + containerId + "_chunk_" + localId) + .setOffset((long) chunkNum * lenOfBytes) + .setLen(lenOfBytes) + .setChecksumData(checksum(chunkData).getProtoBufMessage()) + .build(); + + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto + .newBuilder() + .setBlockID(new BlockID(containerId, localId) + .getDatanodeBlockIDProtobuf()) + .setChunkData(chunk) + .setData(chunkData); + + return ContainerCommandRequestProto + .newBuilder() + .setContainerID(containerId) + .setCmdType(ContainerProtos.Type.WriteChunk) + .setDatanodeUuid(datanodeId) + .setWriteChunk(writeChunkRequest) + .build(); + } + + static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) { + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + return newPutSmallFile(new BlockID(containerId, localId), chunkData); + } + + static ContainerCommandRequestProto newPutSmallFile( + BlockID blockID, ByteString data) { + final ContainerProtos.BlockData.Builder blockData + = ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest + = ContainerProtos.PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest + = ContainerProtos.PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setPutSmallFile(putSmallFileRequest) + .build(); + } + /** * Creates container read chunk request using input container write chunk * request. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dde..657afc38874a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java index d05c127838f1..387997db736d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.volume.DbVolume; @@ -43,7 +44,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; + +import org.mockito.MockedStatic; +import org.mockito.Mockito; + /** * Test for {@link HddsVolumeUtil}. @@ -95,6 +102,34 @@ public void teardown() { dbVolumeSet.shutdown(); } + @Test + public void testLoadHDDVolumeWithInitDBException() + throws Exception { + // Create db instances for all HDDsVolumes. + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + } + + try (MockedStatic mocked = mockStatic(HddsVolumeUtil.class, Mockito.CALLS_REAL_METHODS)) { + // Simulating the init DB Exception + mocked.when(() -> HddsVolumeUtil.initPerDiskDBStore(Mockito.anyString(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new IOException("Mocked Exception")); + + reinitVolumes(); + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + assertThrowsExactly(IOException.class, () -> hddsVolume.loadDbStore(true)); + // If the Volume init DB is abnormal, the Volume should be recognized as a failed Volume + assertEquals(VolumeCheckResult.FAILED, hddsVolume.check(false)); + assertTrue(hddsVolume.isDbLoadFailure()); + assertFalse(hddsVolume.isDbLoaded()); + } + } + + } + @Test public void testLoadAllHddsVolumeDbStoreWithoutDbVolumes() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b9..46b8cc6772e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java index c5d127446bfc..2a7cae57dbf9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java @@ -66,20 +66,15 @@ public void testVolumeCapacityAfterReserve() throws Exception { HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long volumeCapacity = hddsVolume.getCapacity(); - //Gets the actual total capacity - long totalCapacity = hddsVolume.getVolumeInfo().get() - .getUsageForTesting().getCapacity(); - long reservedCapacity = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); - //Volume Capacity with Reserved - long volumeCapacityReserved = totalCapacity - reservedCapacity; + VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); + //Gets the actual total capacity + long totalCapacity = usage.realUsage().getCapacity(); + long reservedCapacity = usage.getReservedBytes(); long reservedCalculated = (long) Math.ceil(totalCapacity * percentage); - assertEquals(reservedFromVolume, reservedCalculated); - assertEquals(volumeCapacity, volumeCapacityReserved); + assertEquals(reservedCalculated, reservedCapacity); + assertEquals(totalCapacity - reservedCapacity, volumeCapacity); } /** @@ -119,16 +114,15 @@ public void testFallbackToPercentConfig() throws Exception { temp.toString() + ":500B"); HddsVolume hddsVolume = volumeBuilder.conf(conf).build(); - long reservedFromVolume = hddsVolume.getVolumeInfo().get() - .getReservedInBytes(); - assertNotEquals(reservedFromVolume, 0); + VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting(); + long reservedFromVolume = usage.getReservedBytes(); + assertNotEquals(0, reservedFromVolume); - long totalCapacity = hddsVolume.getVolumeInfo().get() - .getUsageForTesting().getCapacity(); + long totalCapacity = usage.realUsage().getCapacity(); float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT, HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT); long reservedCalculated = (long) Math.ceil(totalCapacity * percentage); - assertEquals(reservedFromVolume, reservedCalculated); + assertEquals(reservedCalculated, reservedFromVolume); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c78..68e687fefade 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47a..55df5f43b6b8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -45,6 +46,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -76,6 +78,9 @@ */ @Timeout(30) public class TestVolumeSetDiskChecks { + @TempDir + private Path tempDir; + public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); @TempDir @@ -228,7 +233,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); @@ -302,11 +307,15 @@ public void testVolumeFailure() throws IOException { dummyChecker); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container); KeyValueContainer container1 = new KeyValueContainer(data1, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet1.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container1.create(volumeSet1, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 60dfe8509bda..b24a6f04c488 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; @@ -123,9 +122,7 @@ public void testKeyValueContainerCheckCorruption( assertFalse(block.getChunks().isEmpty()); ContainerProtos.ChunkInfo c = block.getChunks().get(0); BlockID blockID = block.getBlockID(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(c); - File chunkFile = getChunkLayout() - .getChunkFile(containerData, blockID, chunkInfo); + File chunkFile = getChunkLayout().getChunkFile(containerData, blockID, c.getChunkName()); long length = chunkFile.length(); assertThat(length).isGreaterThan(0); // forcefully truncate the file to induce failure. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index d2206a7fd680..47d24874749e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -76,7 +76,7 @@ public void testReadOversizeChunk() throws IOException { // write chunk bypassing size limit File chunkFile = getStrategy().getLayout() - .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo); + .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo.getChunkName()); FileUtils.writeByteArrayToFile(chunkFile, array); // WHEN+THEN diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java index f83216b7126e..27a0bc81d6f6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java @@ -67,7 +67,7 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { long term = 0; long index = 0; File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK - .getChunkFile(container.getContainerData(), blockID, chunkInfo); + .getChunkFile(container.getContainerData(), blockID, chunkInfo.getChunkName()); File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX @@ -109,7 +109,7 @@ public void deletesChunkFileWithLengthIncludingOffset() throws Exception { ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen()); File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile( - container.getContainerData(), blockID, chunkInfo); + container.getContainerData(), blockID, chunkInfo.getChunkName()); ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b8..8fd7b6280b62 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java index f11a7f5522c1..c4dca7c3498a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScannerConfiguration.java @@ -35,7 +35,7 @@ import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.ON_DEMAND_VOLUME_BYTES_PER_SECOND_KEY; import static org.apache.hadoop.ozone.container.ozoneimpl.ContainerScannerConfiguration.VOLUME_BYTES_PER_SECOND_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test for {@link ContainerScannerConfiguration}. @@ -103,7 +103,7 @@ public void isCreatedWitDefaultValues() { ContainerScannerConfiguration csConf = conf.getObject(ContainerScannerConfiguration.class); - assertFalse(csConf.isEnabled()); + assertTrue(csConf.isEnabled()); assertEquals(METADATA_SCAN_INTERVAL_DEFAULT, csConf.getMetadataScanInterval()); assertEquals(DATA_SCAN_INTERVAL_DEFAULT, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb9..07804c2a20bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index f479ff93372d..03901b99be3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -17,14 +17,51 @@ */ package org.apache.hadoop.ozone.container.replication; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -34,6 +71,148 @@ */ class TestGrpcReplicationService { + @TempDir + private Path tempDir; + + private ReplicationServer replicationServer; + private OzoneConfiguration conf; + private ContainerController containerController; + private DatanodeDetails datanode; + private static final long CONTAINER_ID = 123456L; + private final AtomicLong pushContainerId = new AtomicLong(); + + @BeforeEach + public void setUp() throws Exception { + init(false); + } + + public void init(boolean isZeroCopy) throws Exception { + conf = new OzoneConfiguration(); + + ReplicationServer.ReplicationConfig replicationConfig = + conf.getObject(ReplicationServer.ReplicationConfig.class); + + replicationConfig.setZeroCopyEnable(isZeroCopy); + + SecurityConfig secConf = new SecurityConfig(conf); + + ContainerSet containerSet = new ContainerSet(1000); + + DatanodeDetails.Builder dn = + DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) + .setHostName("localhost").setIpAddress("127.0.0.1") + .setPersistedOpState(HddsProtos.NodeOperationalState.IN_SERVICE) + .setPersistedOpStateExpiry(0); + DatanodeDetails.Port containerPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); + DatanodeDetails.Port ratisPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + DatanodeDetails.Port replicationPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, + replicationConfig.getPort()); + DatanodeDetails.Port streamPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + dn.addPort(containerPort); + dn.addPort(ratisPort); + dn.addPort(replicationPort); + dn.addPort(streamPort); + + datanode = dn.build(); + + final String testDir = + Files.createDirectory(tempDir.resolve("VolumeDir")).toString(); + + MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList( + new HddsVolume.Builder(testDir).conf(conf).build())); + + ContainerMetrics metrics = ContainerMetrics.create(conf); + Handler containerHandler = + new KeyValueHandler(conf, datanode.getUuidString(), containerSet, + volumeSet, metrics, c -> { + }); + + containerController = new ContainerController(containerSet, + Collections.singletonMap( + ContainerProtos.ContainerType.KeyValueContainer, containerHandler)); + + KeyValueContainerData data = new KeyValueContainerData( + CONTAINER_ID, + ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), + datanode.getUuidString()); + KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), + "test-replication"); + containerSet.addContainer(container); + container.close(); + + ContainerImporter importer = mock(ContainerImporter.class); + doAnswer(invocation -> { + pushContainerId.set((long) invocation.getArguments()[0]); + return null; + }).when(importer).importContainer(anyLong(), any(), any(), any()); + doReturn(true).when(importer).isAllowedContainerImport(eq( + CONTAINER_ID)); + when(importer.chooseNextVolume()).thenReturn(new HddsVolume.Builder( + Files.createDirectory(tempDir.resolve("ImporterDir")).toString()).conf( + conf).build()); + + replicationServer = + new ReplicationServer(containerController, replicationConfig, secConf, + null, importer, datanode.threadNamePrefix()); + replicationServer.start(); + } + + @AfterEach + public void cleanup() { + replicationServer.stop(); + } + + @Test + public void testDownload() throws IOException { + SimpleContainerDownloader downloader = + new SimpleContainerDownloader(conf, null); + Path downloadDir = Files.createDirectory(tempDir.resolve("DownloadDir")); + Path result = downloader.getContainerDataFromReplicas( + CONTAINER_ID, + Collections.singletonList(datanode), downloadDir, + CopyContainerCompression.NO_COMPRESSION); + + assertTrue(result.toString().startsWith(downloadDir.toString())); + + File[] files = downloadDir.toFile().listFiles(); + + assertNotNull(files); + assertEquals(files.length, 1); + + assertTrue(files[0].getName().startsWith("container-" + + CONTAINER_ID + "-")); + + downloader.close(); + } + + @Test + public void testUpload() { + ContainerReplicationSource source = + new OnDemandContainerReplicationSource(containerController); + + GrpcContainerUploader uploader = new GrpcContainerUploader(conf, null); + + PushReplicator pushReplicator = new PushReplicator(conf, source, uploader); + + ReplicationTask task = + new ReplicationTask(toTarget(CONTAINER_ID, datanode), pushReplicator); + + pushReplicator.replicate(task); + + assertEquals(pushContainerId.get(), CONTAINER_ID); + } + @Test void closesStreamOnError() { // GIVEN @@ -51,7 +230,7 @@ public void copyData(long containerId, OutputStream destination, }; ContainerImporter importer = mock(ContainerImporter.class); GrpcReplicationService subject = - new GrpcReplicationService(source, importer); + new GrpcReplicationService(source, importer, false); CopyContainerRequestProto request = CopyContainerRequestProto.newBuilder() .setContainerID(1) diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java similarity index 65% rename from hadoop-hdds/rocks-native/src/main/native/Pipe.cpp rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java index f1dd54438700..00891cf3e24d 100644 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java @@ -6,32 +6,26 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.ozone.container.replication; -#include "Pipe.h" -#include +import org.junit.jupiter.api.BeforeEach; -const int Pipe::READ_FILE_DESCRIPTOR_IDX = 0; -const int Pipe::WRITE_FILE_DESCRIPTOR_IDX = 1; - -Pipe::Pipe() { - pipe(p); - open = true; -} - -Pipe::~Pipe() { - ::close(p[Pipe::READ_FILE_DESCRIPTOR_IDX]); - ::close(p[Pipe::WRITE_FILE_DESCRIPTOR_IDX]); -} - -void Pipe::close() { - open = false; +/** + * Tests {@link GrpcReplicationService}. + */ +class TestGrpcReplicationServiceWithZeroCopy + extends TestGrpcReplicationService { + @BeforeEach + public void setUp() throws Exception { + init(true); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 26c6853b64a6..f42d6afd6814 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -79,8 +79,8 @@ import static org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status.DONE; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.fromSources; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -478,11 +478,8 @@ private static class BlockingTask extends AbstractReplicationTask { @Override public void runTask() { runningLatch.countDown(); - try { - waitForCompleteLatch.await(); - } catch (InterruptedException e) { - fail("Interrupted waiting for the completion latch to be released"); - } + assertDoesNotThrow(() -> waitForCompleteLatch.await(), + "Interrupted waiting for the completion latch to be released"); setStatus(DONE); } } @@ -607,13 +604,10 @@ public void replicate(ReplicationTask task) { UUID.randomUUID().toString(), UUID.randomUUID().toString()); KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); - - try { + assertDoesNotThrow(() -> { set.addContainer(kvc); task.setStatus(DONE); - } catch (Exception e) { - fail("Unexpected error: " + e.getMessage()); - } + }); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index f054358b35b4..baaf296f02ba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -75,7 +75,7 @@ void testReceiveDataForExistingContainer() throws Exception { return null; }).when(observer).onError(any()); SendContainerRequestHandler sendContainerRequestHandler - = new SendContainerRequestHandler(containerImporter, observer); + = new SendContainerRequestHandler(containerImporter, observer, null); ByteString data = ByteString.copyFromUtf8("test"); ContainerProtos.SendContainerRequest request = ContainerProtos.SendContainerRequest.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 383e76dcc72a..23b7da263465 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -99,9 +99,9 @@ private void initTests(Boolean enable) throws Exception { conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 137214aa1cd6..59b88bcbea46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -532,6 +533,8 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) // Start new datanode with the same configuration. dsm = new DatanodeStateMachine(dd, conf); + StorageVolumeUtil.getHddsVolumesList(dsm.getContainer().getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/docs/.gitignore b/hadoop-hdds/docs/.gitignore new file mode 100644 index 000000000000..07b56370087f --- /dev/null +++ b/hadoop-hdds/docs/.gitignore @@ -0,0 +1,2 @@ +public +.hugo_build.lock diff --git a/hadoop-hdds/docs/content/feature/Decommission.md b/hadoop-hdds/docs/content/feature/Decommission.md index 86a345a460be..8058c0c0902e 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.md +++ b/hadoop-hdds/docs/content/feature/Decommission.md @@ -51,6 +51,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` You can enter multiple hosts to decommission multiple datanodes together. +To view the status of a decommissioning datanode, you can execute the following command: + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +You can pass the IP address or UUID of one datanode to view only the details related to that datanode. + + **Note:** To recommission a datanode you may execute the below command in cli, ```shell ozone admin datanode recommission [-hV] [-id=] diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md index ad959469b953..231539fe0d1b 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.zh.md +++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md @@ -50,6 +50,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` 您可以输入多个主机,以便一起Decommission多个DataNode。 +查看 Decommission时datanode 的状态,可以执行下面的命令, + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +您可以指定一个 Datanode 的 IP address 或 UUID 以查看该 Datanode 相关的详细信息。 + + **Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令, ```shell ozone admin datanode recommission [-hV] [-id=] diff --git a/hadoop-hdds/docs/content/feature/ErasureCoding.md b/hadoop-hdds/docs/content/feature/ErasureCoding.md index 77866762f6d3..c4d3739f1dcd 100644 --- a/hadoop-hdds/docs/content/feature/ErasureCoding.md +++ b/hadoop-hdds/docs/content/feature/ErasureCoding.md @@ -174,7 +174,9 @@ the configuration keys `ozone.server.default.replication.type` and `ozone.server ozone.server.default.replication.type EC +``` +```XML ozone.server.default.replication RS-6-3-1024k @@ -208,6 +210,22 @@ We can pass the EC Replication Config while creating the keys irrespective of bu ozone sh key put --type EC --replication rs-6-3-1024k ``` +When using ofs/o3fs, we can pass the EC Replication Config by setting the configuration keys `ozone.replication.type` and `ozone.replication`. + +```XML + + ozone.replication.type + EC + +``` + +```XML + + ozone.replication + rs-3-2-1024k + +``` + In the case bucket already has default EC Replication Config, there is no need of passing EC Replication Config while creating key. ### Enable Intel ISA-L diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.md b/hadoop-hdds/docs/content/feature/Reconfigurability.md index af220554ba82..8aa0579139de 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.md @@ -28,10 +28,11 @@ If a property is reconfigurable, you can modify it in the configuration file (`o command: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` The meaning of command options: +- **--service**: The node type of the server specified with --address - **--address**: RPC address for one server - Three operations are provided: - **start**: Execute the reconfig operation asynchronously @@ -40,60 +41,60 @@ The meaning of command options: ## Retrieve the reconfigurable properties list To retrieve all the reconfigurable properties list for a specific component in Ozone, -you can use the command: `ozone admin reconfig --address= properties`. +you can use the command: `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`. This command will list all the properties that can be dynamically reconfigured at runtime for specific component.
> For example, get the Ozone OM reconfigurable properties list. > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started OM reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM -address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started OM reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM -address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators ## Datanode Reconfigurability >For example, modify `ozone.example.config` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig -address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE -address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ Currently, only Datanode supports batch operations
>For example, to list the reconfigurable properties of all Datanodes:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md index 8e983a98ab8d..957f0510548e 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md @@ -27,10 +27,11 @@ Ozone支持在不重启服务的情况下动态加载某些配置。如果某个 命令: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` 命令项的含义: +- **--service**: --address 指定节点的Ozone服务类型 - **--address**: 一台服务所在的主机与客户端通信的RPC地址 - 提供3中操作: - **start**: 开始异步执行动态加载配置 @@ -38,44 +39,44 @@ ozone admin reconfig --address= start|status|properties - **properties**: 列出支持动态加载的配置项 ## 获取可动态加载的属性列表 -要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --address= properties`。 +要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`。 这个命令将会列出所有可以在运行时动态加载的属性。 > 例如, 获取 Ozone OM 可动态加载属性列表 > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators @@ -83,16 +84,16 @@ ozone.administrators ## Datanode 动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.example.config`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig --address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ ozone.example.config >例如, 列出 Datanode 所有可配置的属性:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md index f9ea5f608461..23c015515035 100644 --- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md +++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md @@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 list-objects --bucket bucket- { "Key": "file1", "LastModified": "2022-02-16T00:10:00.000Z", - "ETag": "2022-02-16T00:10:00.000Z", + "ETag": "e99f93dedfe22e9a133dc3c634f14634", "Size": 3811, "StorageClass": "STANDARD" } diff --git a/hadoop-hdds/docs/content/feature/Snapshot.md b/hadoop-hdds/docs/content/feature/Snapshot.md index 880176ec669e..143a1a5f918f 100644 --- a/hadoop-hdds/docs/content/feature/Snapshot.md +++ b/hadoop-hdds/docs/content/feature/Snapshot.md @@ -73,5 +73,5 @@ Ozone also provides SnapshotDiff API. Whenever a user issues a SnapshotDiff betw Snapshot feature places additional demands on the cluster in terms of CPU, memory and storage. Cluster nodes running Ozone Managers and Ozone Datanodes should be configured with extra storage capacity depending on the number of active snapshots that the user wants to keep. Ozone Snapshots consume incremental amount of space per snapshot. e.g. if the active object store has 100 GB data (before replication) and a snapshot is taken, then the 100 GB of space will be locked in that snapshot. If the active object store consumes another 10 GB of space (before replication) subsequently then overall space requirement would be 100 GB + 10 GB = 110 GB in total (before replication). This is because common keys between Ozone snapshots and the active object store will share the storage space. -Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depepnds on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. +Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depends on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. diff --git a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md index 5f55afebc3c8..e48a95c8bb9c 100644 --- a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md +++ b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md @@ -43,7 +43,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - To enable the Streaming Write Pipeline feature, set the following property to true. ```XML - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. @@ -52,7 +52,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - Datanodes listen to the following port for the streaming traffic. ```XML - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. diff --git a/hadoop-hdds/docs/content/interface/Cli.zh.md b/hadoop-hdds/docs/content/interface/Cli.zh.md new file mode 100644 index 000000000000..aa34a9245710 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Cli.zh.md @@ -0,0 +1,311 @@ +--- +title: 命令行接口 +weight: 4 +menu: + main: + parent: "客户端接口" +--- + + + +Ozone shell 是从命令行与 Ozone 交互的主要接口。在后台,它调用 [Java API]({{< ref "interface/JavaApi.md">}}). + +有些功能只能通过使用 `ozone sh` 命令才能访问。例如: + +1. 创建带有配额的卷 +2. 管理内部 ACL +3. 创建带有加密的键的桶 + +所有这些命令都是一次性的管理任务。应用程序也可以使用其他接口,如 Hadoop 兼容文件系统(o3fs 或 ofs)或 S3 接口来实现相同功能而无需使用 Ozone 命令行接口。 + + +Ozone shell 的帮助菜单可以在 _对象_ 级别 或者 _动作_ 级别被调出. + +示例命令: + +```bash +ozone sh volume --help +``` + +这条命令展示了卷的所有可用的 _动作_ 命令 + +或者也可以用来解释具体的某个 _动作_ ,例如: + +```bash +ozone sh volume create --help +``` + +这条命令输出卷的`create`动作的所有命令行选项 + +## 通用命令格式 + +Ozone shell 命令采取以下形式: + +> _ozone sh object action url_ + +**ozone** 脚本用于调用所有 Ozone 子命令。通过 ```sh``` 命令调用 ozone shell 命令。 + +对象可以是卷、桶或键。动作可以是创建、列出、删除等。 + +根据动作,Ozone URL 可以指向以下格式的卷、桶或键: + +_\[schema\]\[server:port\]/volume/bucket/key_ + + +其中, + +1. **Schema** - 应为 `o3`,这是访问 Ozone API 的原生 RPC 协议。是否指定 schema 是可选的。 + +2. **Server:Port** - 应为 Ozone Manager 的地址。如果不指定端口,则将使用 ozone-site.xml 中的默认端口。 + +请查看卷命令、桶命令和键命令部分了解更多详情。 + +## 卷操作 + +卷位于层次结构的顶层,仅由管理员管理。也可以指定所有者用户和配额。 + +示例命令: + +```shell +$ ozone sh volume create /vol1 +``` + +```shell +$ ozone sh volume info /vol1 +{ + "metadata" : { }, + "name" : "vol1", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-28T12:31:50.112Z", + "modificationTime" : "2020-07-28T12:31:50.112Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +``` + +```shell +$ ozone sh volume list / +[ { + "metadata" : { }, + "name" : "s3v", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-27T11:32:22.314Z", + "modificationTime" : "2020-07-27T11:32:22.314Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +}, { + .... +} ] +``` + +如果卷为空,我们可以使用以下命令删除卷。 + +```shell +$ ozone sh volume delete /vol1 +Volume vol1 is deleted +``` +如果卷包含任意桶或键,我们可以递归地删除卷。这将删除卷中所有的桶和键,然后删除卷本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh volume delete -r /vol1 +This command will delete volume recursively. +There is no recovery option after using this command, and no trash for FSO buckets. +Delay is expected running this command. +Enter 'yes' to proceed': yes +Volume vol1 is deleted +``` + +## 桶操作 + +桶是层次结构的第二层级,与 AWS S3 桶相似。如果用户有必要的权限,可以在卷中创建桶。 + +示例命令: + +```shell +$ ozone sh bucket create /vol1/bucket1 +``` + +```shell +$ ozone sh bucket info /vol1/bucket1 +{ + "metadata" : { }, + "volumeName" : "vol1", + "name" : "bucket1", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-07-28T13:14:45.091Z", + "modificationTime" : "2020-07-28T13:14:45.091Z", + "encryptionKeyName" : null, + "sourceVolume" : null, + "sourceBucket" : null +} +``` + +如果桶是空的,我们可以用以下命令来删除桶。 + +```shell +$ ozone sh bucket delete /vol1/bucket1 +Bucket bucket1 is deleted +``` + +如果桶包含任意键,我们可以递归地删除桶。这将删除桶中的所有键,然后删除桶本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh bucket delete -r /vol1/bucket1 +This command will delete bucket recursively. +There is no recovery option after using this command, and deleted keys won't move to trash. +Enter 'yes' to proceed': yes +Bucket bucket1 is deleted +``` +[透明数据加密]({{< ref "security/SecuringTDE.md" >}}) 可以在桶层级被启用。 + +## 键操作 + +键是可以存储数据的对象。 + +```shell +$ ozone sh key put /vol1/bucket1/README.md README.md +``` + +

+ + + +```shell +$ ozone sh key info /vol1/bucket1/README.md +{ + "volumeName" : "vol1", + "bucketName" : "bucket1", + "name" : "README.md", + "dataSize" : 3841, + "creationTime" : "2020-07-28T13:17:20.749Z", + "modificationTime" : "2020-07-28T13:17:21.979Z", + "replicationType" : "RATIS", + "replicationFactor" : 1, + "ozoneKeyLocations" : [ { + "containerID" : 1, + "localID" : 104591670688743424, + "length" : 3841, + "offset" : 0 + } ], + "metadata" : { }, + "fileEncryptionInfo" : null +} +``` + +```shell +$ ozone sh key get /vol1/bucket1/README.md /tmp/ +``` + +```shell +$ ozone sh key delete /vol1/bucket1/key1 +``` + + +如果键是在 [FSO]({{< ref "feature/PrefixFSO.zh.md">}}) 桶中,当删除键时它会被移动到回收站,回收站的位置是: +```shell +$ ///.Trash/ +``` +如果键是在OBS桶中,它将被永久删除。 + +## 查询命令行结果 + +Ozone命令行返回JSON响应。[jq](https://stedolan.github.io/jq/manual/) 是一个命令行JSON处理器,可以用来过滤CLI结果以获取所需信息. + +示例命令: + +* 列出不是链接的 FSO 桶。 +```shell +$ ozone sh bucket list /s3v | jq '.[] | select(.link==false and .bucketLayout=="FILE_SYSTEM_OPTIMIZED")' +{ + "metadata": {}, + "volumeName": "s3v", + "name": "fso-bucket", + "storageType": "DISK", + "versioning": false, + "usedBytes": 0, + "usedNamespace": 0, + "creationTime": "2023-02-01T05:18:46.974Z", + "modificationTime": "2023-02-01T05:18:46.974Z", + "quotaInBytes": -1, + "quotaInNamespace": -1, + "bucketLayout": "FILE_SYSTEM_OPTIMIZED", + "owner": "om", + "link": false +} +``` + +* 列出 EC 桶以及它们的复制策略配置。 +```shell +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.replicationConfig.replicationType == "EC") | {"name": .name, "replicationConfig": .replicationConfig}' +{ + "name": "ec5", + "replicationConfig": { + "data": 3, + "parity": 2, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 5 + } +} +{ + "name": "ec9", + "replicationConfig": { + "data": 6, + "parity": 3, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 9 + } +} +``` + +* 以制表符分隔的格式列出加密桶的名字以及它们的加密的键名。 +```shell + +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.encryptionKeyName != null) | [.name, .encryptionKeyName] | @tsv' +ec5 key1 +encrypted-bucket key1 +``` diff --git a/hadoop-hdds/docs/content/interface/HttpFS.md b/hadoop-hdds/docs/content/interface/HttpFS.md index e413faf03cde..cebe0d315b02 100644 --- a/hadoop-hdds/docs/content/interface/HttpFS.md +++ b/hadoop-hdds/docs/content/interface/HttpFS.md @@ -84,7 +84,7 @@ Truncate a File | not implemented in Ozone Status of a File/Directory | supported List a Directory | supported List a File | supported -Iteratively List a Directory | supported +Iteratively List a Directory | unsupported ### Other File System Operations diff --git a/hadoop-hdds/docs/content/interface/Ofs.zh.md b/hadoop-hdds/docs/content/interface/Ofs.zh.md new file mode 100644 index 000000000000..25d7039f49ac --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Ofs.zh.md @@ -0,0 +1,249 @@ +--- +title: Ofs (兼容 Hadoop 的文件系统) +date: 2017-09-14 +weight: 1 +menu: + main: + parent: "编程接口" +summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. **Global level view.** +--- + + +兼容 Hadoop 的文件系统 (HCFS) 接口允许像 Ozone 这样的存储后端轻松集成到 Hadoop 生态系统中。Ozone 文件系统 (OFS) 是一个兼容 Hadoop 的文件系统。 + + + + +## 基础知识 + +有效的 OFS 路径示例: + +``` +ofs://om1/ +ofs://om3:9862/ +ofs://omservice/ +ofs://omservice/volume1/ +ofs://omservice/volume1/bucket1/ +ofs://omservice/volume1/bucket1/dir1 +ofs://omservice/volume1/bucket1/dir1/key1 + +ofs://omservice/tmp/ +ofs://omservice/tmp/key1 +``` + +在 OFS 文件系统中,卷和挂载点位于根目录级别。卷的下一级是桶。每个桶下面是键和目录。 + +请注意,对于挂载点,目前仅支持临时挂载 /tmp。 + +## 配置 + +请在 `core-site.xml` 添加下列配置。 + +{{< highlight xml >}} + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://om-host.example.com/ + +{{< /highlight >}} + +这将使所有的卷和桶成为默认的 Hadoop 兼容文件系统,并注册 ofs 文件系统类型。 + +您还需要将 ozone-filesystem-hadoop3.jar 文件添加到 classpath 中: + +{{< highlight bash >}} +export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH +{{< /highlight >}} + +(请注意: 在 Hadoop 2.x 中, 请使用 `ozone-filesystem-hadoop2-*.jar`) + +当默认的文件系统被建立,用户可以运行命令例如ls,put,mkdir等。 +例如: + +{{< highlight bash >}} +hdfs dfs -ls / +{{< /highlight >}} + +请注意,ofs 对所有桶和卷都有效。用户可以使用 mkdir 创建桶和卷,例如创建名为 volume1 的卷和名为 bucket1 的桶 + +{{< highlight bash >}} +hdfs dfs -mkdir /volume1 +hdfs dfs -mkdir /volume1/bucket1 +{{< /highlight >}} + +或者使用 put 命令向桶中写入一个文件 + +{{< highlight bash >}} +hdfs dfs -put /etc/hosts /volume1/bucket1/test +{{< /highlight >}} + +有关更多用法,请参见: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf + +## 与 [o3fs]({{< ref "interface/O3fs.md" >}}) 的区别 + +### 创建文件 + +OFS 不允许直接在根目录或卷下创建键(文件)。 +当用户尝试这样做时,他们将收到一个错误消息: + +```bash +$ ozone fs -touch /volume1/key1 +touch: Cannot create file under root or volume. +``` + +### 简化 fs.defaultFS + +使用 OFS 时,fs.defaultFS(在 core-site.xml 中)不再需要像 o3fs 那样在其路径中具有特定的卷和桶。 +只需设置 OM 主机名或 service ID(在 HA 的情况下): + + +```xml + + fs.defaultFS + ofs://omservice + +``` + +客户端将能够访问集群上的所有卷和桶,而无需指定主机名或 service ID。 + +```bash +$ ozone fs -mkdir -p /volume1/bucket1 +``` + +### 通过 FileSystem shell 直接管理卷和桶 + +管理员可以通过 Hadoop FS shell 轻松创建和删除卷和桶。卷和桶被视为类似于目录,因此如果它们不存在,可以使用 `-p` 创建: + +```bash +$ ozone fs -mkdir -p ofs://omservice/volume1/bucket1/dir1/ +``` +请注意,卷和桶名称字符集规则仍然适用。例如,桶和卷名称不接受下划线(`_`): + +```bash +$ ozone fs -mkdir -p /volume_1 +mkdir: Bucket or Volume name has an unsupported character : _ +``` + +## 挂载点和设置 /tmp + +为了与使用 /tmp/ 的传统 Hadoop 应用程序兼容,我们在 FS 的根目录有一个特殊的临时目录挂载点。 +这个功能将来可能会扩展,以支持自定义挂载路径。 + +目前 Ozone 支持两种 /tmp 的配置。第一种(默认)是每个用户的临时目录, +由一个挂载卷和一个用户特定的临时桶组成。第二种(通过 ozone-site.xml 配置) +是一个类似粘滞位的临时目录,对所有用户共用,由一个挂载卷和一个共用的临时桶组成。 + +重要提示:要使用它,首先,**管理员** 需要创建名为 tmp 的卷(卷名目前是硬编码的)并将其 ACL 设置为 world ALL 访问权限。 + +具体来说: + +```bash +$ ozone sh volume create tmp +$ ozone sh volume setacl tmp -al world::a +``` + +每个集群中这些命令**仅需要执行一次** + +### 对于每个用户的 /tmp 目录 (默认) + +**每个用户** 都需要先创建并初始化他们自己的 temp 桶一次 + +```bash +$ ozone fs -mkdir /tmp +2020-06-04 00:00:00,050 [main] INFO rpc.RpcClient: Creating Bucket: tmp/0238 ... +``` + +在此之后用户可以向该目录写入,就和向其他常规目录写入一样。例如: + +```bash +$ ozone fs -touch /tmp/key1 +``` + +### 对于所有用户共享的 /tmp 目录 + +要启用类似粘滞位的共享 /tmp 目录,请在 ozone-site.xml 中更新以下配置: + +```xml + + ozone.om.enable.ofs.shared.tmp.dir + true + +``` + +然后,在以**管理员**身份设置好 tmp 卷之后,还需要配置一个 tmp 桶,作为所有用户的共享 /tmp 目录,例如: + +```bash +$ ozone sh bucket create /tmp/tmp +$ ozone sh volume setacl tmp -a user:anyuser:rwlc \ + user:adminuser:a,group:anyuser:rwlc,group:adminuser:a tmp/tmp +``` + +在这里,anyuser 是管理员希望授予访问权限的用户名,而 adminuser 是管理员的用户名。 + +然后用户可以访问 tmp 目录: + +```bash +$ ozone fs -put ./NOTICE.txt ofs://om/tmp/key1 +``` + +## 启用回收站的删除操作 + +为了在 Ozone 中启用回收站,请将这些配置添加到 core-site.xml: + +{{< highlight xml >}} + + fs.trash.interval + 10 + + + fs.trash.classname + org.apache.hadoop.ozone.om.TrashPolicyOzone + +{{< /highlight >}} + +当启用回收站功能后删除键时,这些键会被移动到每个桶下的一个回收站目录中,因为在 Ozone 中不允许将键在桶之间移动(重命名)。 + +```bash +$ ozone fs -rm /volume1/bucket1/key1 +2020-06-04 00:00:00,100 [main] INFO fs.TrashPolicyDefault: Moved: 'ofs://id1/volume1/bucket1/key1' to trash at: ofs://id1/volume1/bucket1/.Trash/hadoop/Current/volume1/bucket1/key1 +``` + +这与 HDFS encryption zone 处理回收站位置的方式非常相似。 + +**请注意** + +1. 可以使用标志 `-skipTrash` 来永久删除文件,而不将其移动到回收站。 +2. 启用回收站时,不允许在桶或卷级别进行删除操作。在这种情况下,必须使用 skipTrash。 +即,不使用 skipTrash 的情况下,不允许使用 `ozone fs -rm -R ofs://vol1/bucket1` 或 `ozone fs -rm -R o3fs://bucket1.vol1` 进行操作。 + +## 递归地列出 + +OFS 支持递归地列出卷、桶和键。 + +例如,如果启用了 ACL 的话, 命令 `ozone fs -ls -R ofs://omservice/` 会递归地列出用户有 LIST 权限的所有卷、桶和键。 +如果禁用了 ACL,这个命令会列出该集群上的所有内容。 + +这个功能不会降低服务器性能,因为循环操作是在客户端上进行的。可以将其视为客户端向服务器发出多个请求以获取所有信息的过程。 diff --git a/hadoop-hdds/docs/content/interface/ReconApi.zh.md b/hadoop-hdds/docs/content/interface/ReconApi.zh.md index 7fa4b27b0dee..586fef16b2ff 100644 --- a/hadoop-hdds/docs/content/interface/ReconApi.zh.md +++ b/hadoop-hdds/docs/content/interface/ReconApi.zh.md @@ -222,7 +222,497 @@ Recon API v1 是一组 HTTP 端点,可以帮助您了解 Ozone 集群的当前 回传处于给定状态的容器的 UnhealthyContainerMetadata 对象。 不健康的容器状态可能为`MISSING`, `MIS_REPLICATED`, `UNDER_REPLICATED`,`OVER_REPLICATED`。 响应结构与`/containers/unhealthy`相同。 - + + +### GET /api/v1/containers/mismatch + +**回传** + +回传 OM 和 SCM 之间不匹配容器的列表。 +* 容器存在于 OM 中,但不存在于 SCM 中。 +* 容器存在于 SCM 中,但不存在于 OM 中。 + +```json +[ + { + "containerId" : 1, + "numberOfKeys" : 3, + "pipelines" : [ + "pipelineId" : "1423ghjds832403232", + "pipelineId" : "32vds94943fsdh4443", + "pipelineId" : "32vds94943fsdhs443" + ], + "existsAt" : "OM" + } + ... +] +``` + +### GET /api/v1/containers/mismatch/deleted + + +**参数** + +* prevKey (可选) + +返回在SCM中,给定prevKey(容器ID) 后被标记为已删除状态,且在OM中存在的容器集合, +以便找出映射到这些已删除状态容器的键列表。例如:prevKey=5,跳过直到准确地定位到前一个容器ID。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回在SCM中已删除但在OM中存在的容器集合,以找出映射到这些已删除状态容器的键列表。 + +```json +[ + { + "containerId": 2, + "numberOfKeys": 2, + "pipelines": [] + } + ... +] +``` + +### GET /api/v1/keys/open + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后仍然处于打开状态且存在的键/文件集合。 + 例如:prevKey=/vol1/bucket1/key1,这将跳过键,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于打开状态的键/文件集合。 + +```json +{ + "lastKey": "/vol1/fso-bucket/dir1/dir2/file2", + "replicatedTotal": 13824, + "unreplicatedTotal": 4608, + "entities": [ + { + "path": "/vol1/bucket1/key1", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 1024, + "replicatedSize": 3072, + "unreplicatedSize": 1024, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/bucket1/key2", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 512, + "replicatedSize": 1536, + "unreplicatedSize": 512, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/fso-bucket/dir1/file1", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 1024, + "replicatedSize": 3072, + "unreplicatedSize": 1024, + "replicationType": "RATIS", + "replicationFactor": "THREE" + }, + { + "path": "/vol1/fso-bucket/dir1/dir2/file2", + "keyState": "Open", + "inStateSince": 1667564193026, + "size": 2048, + "replicatedSize": 6144, + "unreplicatedSize": 2048, + "replicationType": "RATIS", + "replicationFactor": "THREE" + } + ] +} +``` + +### GET /api/v1/keys/deletePending + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后处于待删除状态的键/文件集合。 + 例如:prevKey=/vol1/bucket1/key1,这将跳过键,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于待删除状态的键/文件集合。 + +```json +{ + "lastKey": "sampleVol/bucketOne/key_one", + "replicatedTotal": -1530804718628866300, + "unreplicatedTotal": -1530804718628866300, + "deletedkeyinfo": [ + { + "omKeyInfoList": [ + { + "metadata": {}, + "objectID": 0, + "updateID": 0, + "parentObjectID": 0, + "volumeName": "sampleVol", + "bucketName": "bucketOne", + "keyName": "key_one", + "dataSize": -1530804718628866300, + "keyLocationVersions": [], + "creationTime": 0, + "modificationTime": 0, + "replicationConfig": { + "replicationFactor": "ONE", + "requiredNodes": 1, + "replicationType": "STANDALONE" + }, + "fileChecksum": null, + "fileName": "key_one", + "acls": [], + "path": "0/key_one", + "file": false, + "latestVersionLocations": null, + "replicatedSize": -1530804718628866300, + "fileEncryptionInfo": null, + "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='-1530804718628866186', creationTime='0', objectID='0', parentID='0', replication='STANDALONE/ONE', fileChecksum='null}", + "updateIDset": false + } + ] + } + ], + "status": "OK" +} +``` + +### GET /api/v1/keys/deletePending/dirs + + +**参数** + +* prevKey (可选) + + 返回给定 prevKey id 之后处于待删除状态的目录集合。 + 例如:prevKey=/vol1/bucket1/bucket1/dir1,这将跳过目录,直到成功定位到给定的 prevKey。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回处于待删除状态的目录集合。 + +```json +{ + "lastKey": "vol1/bucket1/bucket1/dir1", + "replicatedTotal": -1530804718628866300, + "unreplicatedTotal": -1530804718628866300, + "deletedkeyinfo": [ + { + "omKeyInfoList": [ + { + "metadata": {}, + "objectID": 0, + "updateID": 0, + "parentObjectID": 0, + "volumeName": "sampleVol", + "bucketName": "bucketOne", + "keyName": "key_one", + "dataSize": -1530804718628866300, + "keyLocationVersions": [], + "creationTime": 0, + "modificationTime": 0, + "replicationConfig": { + "replicationFactor": "ONE", + "requiredNodes": 1, + "replicationType": "STANDALONE" + }, + "fileChecksum": null, + "fileName": "key_one", + "acls": [], + "path": "0/key_one", + "file": false, + "latestVersionLocations": null, + "replicatedSize": -1530804718628866300, + "fileEncryptionInfo": null, + "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='-1530804718628866186', creationTime='0', objectID='0', parentID='0', replication='STANDALONE/ONE', fileChecksum='null}", + "updateIDset": false + } + ] + } + ], + "status": "OK" +} +``` + +## Blocks Metadata (admin only) +### GET /api/v1/blocks/deletePending + + +**参数** + +* prevKey (可选) + + 仅返回给定块ID(prevKey)之后处于待删除状态的块列表。 + 例如:prevKey=4,这将跳过 deletedBlocks 表中的键以跳过 prevKey 之前的记录。 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回待删除的块列表。 + +```json +{ + "OPEN": [ + { + "containerId": 100, + "localIDList": [ + 1, + 2, + 3, + 4 + ], + "localIDCount": 4, + "txID": 1 + } + ] +} +``` + +## Namespace Metadata (仅 admin) + +### GET /api/v1/namespace/summary + +**参数** + +* path + + 字符串形式的路径请求,不包含任何协议前缀。 + +**回传** + +返回路径的基本信息汇总,包括实体类型和路径下对象的聚合计数。 + +如果路径存在,则 `status` 为 `OK`,否则为 `PATH_NOT_FOUND`。 + +示例: /api/v1/namespace/summary?path=/ +```json + { + "status": OK, + "type": ROOT, + "numVolume": 10, + "numBucket": 100, + "numDir": 1000, + "numKey": 10000 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1 +```json + { + "status": OK, + "type": VOLUME, + "numVolume": -1, + "numBucket": 10, + "numDir": 100, + "numKey": 1000 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1 +```json + { + "status": OK, + "type": BUCKET, + "numVolume": -1, + "numBucket": -1, + "numDir": 50, + "numKey": 500 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1/dir +```json + { + "status": OK, + "type": DIRECTORY, + "numVolume": -1, + "numBucket": -1, + "numDir": 10, + "numKey": 100 + } +``` + +示例: /api/v1/namespace/summary?path=/volume1/bucket1/dir/nestedDir +```json + { + "status": OK, + "type": DIRECTORY, + "numVolume": -1, + "numBucket": -1, + "numDir": 5, + "numKey": 50 + } +``` + +如果任何 `num` 字段为 `-1`,则该路径请求不适用于该实体类型。 + +### GET /api/v1/namespace/du + +**参数** + +* path + + 字符串形式的路径请求,不包含任何协议前缀。 + +* files (可选) + + 一个布尔值,默认值为 `false`。如果设置为 `true`,则会计算路径下键的磁盘使用情况。 + +* replica (可选) + + 一个布尔值,默认为 `false`。如果设置为 `true`,则会计算键的副本大小的磁盘使用情况。 + +**回传** + +返回路径下所有子路径的磁盘使用情况。规范化 `path` 字段,返回路径下直接健的总大小作为 +`sizeDirectKey`,并以字节为单位返回 `size/sizeWithReplica`。 + +如果路径存在,则 `status` 为 `OK`,否则为 `PATH_NOT_FOUND`。 + +示例: /api/v1/namespace/du?path=/vol1/bucket1&files=true&replica=true +```json + { + "status": OK, + "path": "/vol1/bucket1", + "size": 100000, + "sizeWithReplica": 300000, + "subPathCount": 4, + "subPaths": [ + { + "path": "/vol1/bucket1/dir1-1", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/dir1-2", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/dir1-3", + "size": 30000, + "sizeWithReplica": 90000, + "isKey": false + }, + { + "path": "/vol1/bucket1/key1-1", + "size": 10000, + "sizeWithReplica": 30000, + "isKey": true + } + ], + "sizeDirectKey": 10000 + } +``` +如果 `files` 设置为 `false`,则子路径 `/vol1/bucket1/key1-1` 将被省略。 +如果 `replica` 设置为 `false`,则 `sizeWithReplica` 返回 `-1`。 +如果路径的实体类型无法具有直接键(例如根目录、卷),则 `sizeDirectKey` 返回 `-1`。 + +### GET /api/v1/namespace/quota + +**参数** + +* path + + 路径请求为字符串,不包含任何协议前缀。 + +**回传** + +返回路径下允许的配额和已使用的配额。 +只有卷和存储桶具有配额。其他类型不适用于配额请求 + +如果请求有效,则 `status` 为 `OK`;如果路径不存在,则为 `PATH_NOT_FOUND`; +如果路径存在但路径的实体类型不适用于请求,则为 `TYPE_NOT_APPLICABLE`。 + +示例: /api/v1/namespace/quota?path=/vol +```json + { + "status": OK, + "allowed": 200000, + "used": 160000 + } +``` + +如果未设置配额,则 `allowed` 返回 `-1`。详情请参阅 [Ozone 中的配额]。 +(https://ci-hadoop.apache.org/view/Hadoop%20Ozone/job/ozone-doc-master/lastSuccessfulBuild/artifact/hadoop-hdds/docs/public/feature/quota.html) + + +### GET /api/v1/namespace/dist + +**参数** + +* path + + 路径请求为字符串,不包含任何协议前缀。 + +**回传** + +返回路径下所有键的文件大小分布。 + +如果请求有效,则 `status` 为 `OK`;如果路径不存在,则为 `PATH_NOT_FOUND`; +如果路径存在,但该路径是一个键,键不具有文件大小分布,则为 `TYPE_NOT_APPLICABLE`。 + +示例: /api/v1/namespace/dist?path=/ +```json + { + "status": OK, + "dist": [ + 0, + 0, + 10, + 20, + 0, + 30, + 0, + 100, + ... + ] + } +``` + +Recon跟踪所有大小从`1 KB`到`1 PB`的键。对于小于`1 KB`的键,映射到第一个箱(索引); +对于大于`1 PB`的键,映射到最后一个箱(索引)。 + +`dist` 的每个索引都映射到一个文件大小范围(例如 `1 MB` 到 `2 MB`)。 + ## 集群状态 ### GET /api/v1/clusterState @@ -251,6 +741,114 @@ Recon API v1 是一组 HTTP 端点,可以帮助您了解 Ozone 集群的当前 "keys": 25 } ``` + +## Volumes (仅 admin) + +### GET /api/v1/volumes + +**参数** + +* prevKey (可选) + + 仅返回给定 prevKey 之后的卷。 + 示例: prevKey=vol1 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + +**回传** + +返回集群中的所有卷。 + +```json + { + "totalCount": 4, + "volumes": [{ + "volume": "vol1", + "owner": "testuser", + "admin": "ozone", + "creationTime": 1665588176660 , + "modificationTime": 1665590397315, + "quotaInNamespace": 2048, + "quotaInBytes": 1073741824, + "usedNamespace": 10, + "acls": [ + { + "type": "USER", + "name": "testuser", + "scope": "ACCESS", + "aclList": [ + "WRITE", + "READ", + "DELETE" + ] + } + ] + }, + ... + ] + } +``` + +## Buckets (仅 admin) + +### GET /api/v1/buckets + +**参数** + +* volume (可选) + + 卷以字符串形式表示,不包含任何协议前缀。 + +* prevKey (可选) + + 返回给定 prevKey 之后的存储桶。 如果未指定卷,则忽略 prevKey。 + 示例: prevKey=bucket1 + +* limit (可选) + + 仅返回有限数量的结果。默认限制为1000。 + + +**回传** + +如果未指定卷或指定的卷是一个空字符串,则返回集群中的所有存储桶。 +如果指定了 `volume`,则仅返回 `volume` 下的存储桶。 + +```json + { + "totalCount": 5, + "buckets": [{ + "volumeName": "vol1", + "bucketName": "buck1", + "versioning": false, + "storageType": "DISK", + "creationTime": 1665588176616, + "modificationTime": 1665590392293, + "usedBytes": 943718400, + "usedNamespace": 40000, + "quotaInBytes": 1073741824, + "quotaInNamespace": 50000, + "owner": "testuser", + "bucketLayout": "OBJECT_STORE", + "acls": [ + { + "type": "USER", + "name": "testuser", + "scope": "ACCESS", + "aclList": [ + "WRITE", + "READ", + "DELETE" + ] + } + ] + }, + ... + ] + } +``` ## 数据节点 diff --git a/hadoop-hdds/docs/content/interface/_index.zh.md b/hadoop-hdds/docs/content/interface/_index.zh.md index fd435aad5dce..82c5e1fb9c97 100644 --- a/hadoop-hdds/docs/content/interface/_index.zh.md +++ b/hadoop-hdds/docs/content/interface/_index.zh.md @@ -1,5 +1,5 @@ --- -title: "编程接口" +title: "客户端接口" menu: main: weight: 5 diff --git a/hadoop-hdds/docs/content/recipe/BotoClient.zh.md b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md new file mode 100644 index 000000000000..64a1d8748a64 --- /dev/null +++ b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md @@ -0,0 +1,188 @@ +--- +title: 使用 Boto3 客户端访问 Ozone 对象存储 +linktitle: Boto3 +summary: 如何使用 Boto3 客户端访问 Ozone 对象存储? +--- + + +这个指南展示了如何从 Boto3 客户端访问 Ozone 对象存储。以下 API 已经过验证: + +- Create bucket +- List bucket +- Head bucket +- Delete bucket +- Upload file +- Download file +- Delete objects(keys) +- Head object +- Multipart upload + + +## 要求 + +您将需要较高版本的 Python3 来运行 Boto3 客户端,请参考 Boto3 的安装需求: + +https://boto3.amazonaws.com/v1/documentation/api/latest/index.html + +## 获取对 Ozone 的资源访问 +您可以参考 Amazon Boto3 文档,关于创建 `s3` 资源的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html + + s3 = boto3.resource('s3', + endpoint_url='http://localhost:9878', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999' + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + +## 通过 session 获取对 Ozone 的客户端访问 +您可以参考 Amazon Boto3 文档,关于 session 的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html + + Create a session + session = boto3.session.Session() + + Obtain s3 client to Ozone via session: + + s3_client = session.client( + service_name='s3', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999', + endpoint_url='http://localhost:9878', + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + In our code sample below, we're demonstrating the usage of both s3 and s3_client. + +如果您连接到一个安全的集群,有多种方式配置 Boto3 客户端凭证。在这些情况下,创建 Ozone s3 客户端时传递 `aws_access_key_id` 和 `aws_secret_access_key` 的上述步骤应该被跳过。 + +请参考 Boto3 文档以获取详细信息,在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + + +### 创建桶 + response = s3_client.create_bucket(Bucket='bucket1') + print(response) + +这将在一个名为 `s3v` 的卷中创建一个名为 `bucket1` 的桶 + +### 列出所有桶 + response = s3_client.list_buckets() + print('Existing buckets:') + for bucket in response['Buckets']: + print(f' {bucket["Name"]}') + +这将列出 Ozone `s3v` 卷中的所有桶 + +### 查看桶信息 + response = s3_client.head_bucket(Bucket='bucket1') + print(response) + +这将在 Ozone 卷 `s3v` 中查看桶 `bucket1` 的信息。 + +### 删除桶 + response = s3_client.delete_bucket(Bucket='bucket1') + print(response) + +这将从 Ozone 卷 `s3v` 中删除一个桶 `bucket1`。 + +### 上传文件 + response = s3.Bucket('bucket1').upload_file('./README.md','README.md') + print(response) + +这将从向 Ozone 卷 `s3v` 和桶 `bucket1` 中上传 `README.md` 文件并创建一个 `README.md` 键。 + +### 下载文件 + response = s3.Bucket('bucket1').download_file('README.md', 'download.md') + print(response) + +这将从从 Ozone 卷 `s3v` 和桶 `bucket1` 中下载 `README.md` 并创建一个 `README.md` 文件到本地。 + +### 查看对象信息 + response = s3_client.head_object(Bucket='bucket1', Key='README.md') + print(response) + +这将查看一个位于 Ozone 卷 `s3v` 和桶 `bucket1` 中的 `README.md` 文件的信息。 + +### 删除多个对象 + response = s3_client.delete_objects( + Bucket='bucket1', + Delete={ + 'Objects': [ + { + 'Key': 'README4.md', + }, + { + 'Key': 'README3.md', + }, + ], + 'Quiet': False, + }, + ) + +这将从 Ozone 卷 `s3v` 和桶 `bucket1` 中删除多个对象 `README3.md` 和 `README4.md` + +### 分片上传 + response = s3_client.create_multipart_upload(Bucket='bucket1', Key='key1') + print(response) + uid=response['UploadId'] + print(uid) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven.gz', + Key='key1', + PartNumber=1, + UploadId=str(uid) + ) + print(response) + etag1=response.get('CopyPartResult').get('ETag') + print(etag1) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven1.gz', + Key='key1', + PartNumber=2, + UploadId=str(uid) + ) + print(response) + etag2=response.get('CopyPartResult').get('ETag') + print(etag2) + + response = s3_client.complete_multipart_upload( + Bucket='bucket1', + Key='key1', + MultipartUpload={ + 'Parts': [ + { + 'ETag': str(etag1), + 'PartNumber': 1, + }, + { + 'ETag': str(etag2), + 'PartNumber': 2, + }, + ], + }, + UploadId=str(uid), + ) + print(response) + +这将使用来自 Ozone 卷 `s3v` 的 `maven.gz` 和 `maven1.gz` 作为复制源,以创建 Ozone 卷 `s3v` 中的新对象 `key1`。请注意,`ETag` 是必需的且对于使用分片上传 API 非常重要。 diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index 6281fd749591..94f60ea4aa0c 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -51,30 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.rat - apache-rat-plugin - - - static/slides/* - themes/ozonedoc/static/js/bootstrap.min.js - themes/ozonedoc/static/js/jquery-3.5.1.min.js - themes/ozonedoc/static/js/swagger-ui-bundle.js - themes/ozonedoc/static/css/bootstrap-theme.min.css - - themes/ozonedoc/static/css/bootstrap.min.css.map - themes/ozonedoc/static/css/bootstrap.min.css - themes/ozonedoc/static/css/bootstrap-theme.min.css.map - - themes/ozonedoc/static/css/swagger-ui.css - - themes/ozonedoc/static/fonts/glyphicons-halflings-regular.svg - - themes/ozonedoc/layouts/index.html - themes/ozonedoc/theme.toml - - - diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 5ead355066d0..af0887ad16bf 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -83,8 +83,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> log4j-core + com.lmax disruptor + runtime org.eclipse.jetty diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java index 85acc1431fc8..0ab92cfee02c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameRequestProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameResponseProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetConfigurationChangeProto; @@ -82,26 +83,45 @@ public class ReconfigureProtocolClientSideTranslatorPB implements private final ReconfigureProtocolPB rpcProxy; - public ReconfigureProtocolClientSideTranslatorPB(InetSocketAddress addr, + public ReconfigureProtocolClientSideTranslatorPB(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - rpcProxy = createReconfigureProtocolProxy(addr, ugi, conf); + rpcProxy = createReconfigureProtocolProxy(nodeType, addr, ugi, conf); } - static ReconfigureProtocolPB createReconfigureProtocolProxy( + static ReconfigureProtocolPB createReconfigureProtocolProxy(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - - RPC.setProtocolEngine(OzoneConfiguration.of(conf), - ReconfigureProtocolPB.class, ProtobufRpcEngine.class); Configuration hadoopConf = LegacyHadoopConfigurationSource .asHadoopConfiguration(conf); - return RPC.getProtocolProxy( - ReconfigureProtocolPB.class, - RPC.getProtocolVersion(ReconfigureProtocolPB.class), - addr, ugi, hadoopConf, - NetUtils.getDefaultSocketFactory(hadoopConf)) - .getProxy(); + if (nodeType == HddsProtos.NodeType.OM) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolOmPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolOmPB.class, + RPC.getProtocolVersion(ReconfigureProtocolOmPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else if (nodeType == HddsProtos.NodeType.DATANODE) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolDatanodePB.class, + RPC.getProtocolVersion(ReconfigureProtocolDatanodePB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolPB.class, + RPC.getProtocolVersion(ReconfigureProtocolPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } } @Override diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.h b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java similarity index 52% rename from hadoop-hdds/rocks-native/src/main/native/Pipe.h rename to hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java index aa75c6311cbc..49e95b9c26f2 100644 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.h +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java @@ -1,4 +1,4 @@ -/* +/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,41 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.hdds.protocolPB; -#ifndef ROCKS_NATIVE_PIPE_H -#define ROCKS_NATIVE_PIPE_H +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; -#include - -class Pipe { - public: - static const int READ_FILE_DESCRIPTOR_IDX; - static const int WRITE_FILE_DESCRIPTOR_IDX; - Pipe(); - ~Pipe(); - void close(); - int getReadFd() { - return getPipeFileDescriptorIndex(READ_FILE_DESCRIPTOR_IDX); - } - - int getWriteFd() { - return getPipeFileDescriptorIndex(WRITE_FILE_DESCRIPTOR_IDX); - } - - int getPipeFileDescriptorIndex(int idx) { - return p[idx]; - } - - bool isOpen() { - return open; - } - - - private: - int p[2]; - FILE* wr; - bool open; - -}; - -#endif //ROCKS_NATIVE_PIPE_H +/** + * Protocol that clients use to communicate with the DN to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +@KerberosInfo(serverPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) +public interface ReconfigureProtocolDatanodePB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java new file mode 100644 index 000000000000..2775e71efa74 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +/** + * Protocol that clients use to communicate with the OM to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +// TODO: move OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY to hdds-common. +@KerberosInfo(serverPrincipal = "ozone.om.kerberos.principal") +public interface ReconfigureProtocolOmPB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java index e1702ce0ada8..cb31a366ad7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java @@ -23,7 +23,7 @@ import org.apache.hadoop.security.KerberosInfo; /** - * Protocol that clients use to communicate with the OM/SCM to do + * Protocol that clients use to communicate with the SCM to do * reconfiguration on the fly. */ @ProtocolInfo( diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java index 8db07cbc80f3..7a6a5a904244 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java @@ -45,7 +45,7 @@ * ReconfigureProtocol. */ public class ReconfigureProtocolServerSideTranslatorPB implements - ReconfigureProtocolPB { + ReconfigureProtocolPB, ReconfigureProtocolOmPB, ReconfigureProtocolDatanodePB { private final ReconfigureProtocol impl; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java new file mode 100644 index 000000000000..5e33eefde6c5 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.client; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT; + +/** + * This client implements a background thread which periodically checks and + * gets the latest network topology cluster tree from SCM. + */ +public class ScmTopologyClient { + private static final Logger LOG = + LoggerFactory.getLogger(ScmTopologyClient.class); + + private final ScmBlockLocationProtocol scmBlockLocationProtocol; + private final AtomicReference cache = + new AtomicReference<>(); + private ScheduledExecutorService executorService; + + public ScmTopologyClient( + ScmBlockLocationProtocol scmBlockLocationProtocol) { + this.scmBlockLocationProtocol = scmBlockLocationProtocol; + } + + public NetworkTopology getClusterMap() { + return requireNonNull(cache.get(), + "ScmBlockLocationClient must have been initialized already."); + } + + public void start(ConfigurationSource conf) throws IOException { + final InnerNode initialTopology = + scmBlockLocationProtocol.getNetworkTopology(); + LOG.info("Initial network topology fetched from SCM: {}.", + initialTopology); + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + initialTopology)); + scheduleNetworkTopologyPoller(conf, Instant.now()); + } + + public void stop() { + if (executorService != null) { + executorService.shutdown(); + try { + if (executorService.awaitTermination(5, TimeUnit.SECONDS)) { + executorService.shutdownNow(); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while shutting down executor service.", e); + Thread.currentThread().interrupt(); + } + } + } + + private void scheduleNetworkTopologyPoller(ConfigurationSource conf, + Instant initialInvocation) { + Duration refreshDuration = parseRefreshDuration(conf); + Instant nextRefresh = initialInvocation.plus(refreshDuration); + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("NetworkTopologyPoller") + .setDaemon(true) + .build(); + executorService = Executors.newScheduledThreadPool(1, threadFactory); + Duration initialDelay = Duration.between(Instant.now(), nextRefresh); + + LOG.debug("Scheduling NetworkTopologyPoller with an initial delay of {}.", + initialDelay); + executorService.scheduleAtFixedRate(() -> checkAndRefresh(conf), + initialDelay.toMillis(), refreshDuration.toMillis(), + TimeUnit.MILLISECONDS); + } + + public static Duration parseRefreshDuration(ConfigurationSource conf) { + long refreshDurationInMs = conf.getTimeDuration( + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION, + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT, + TimeUnit.MILLISECONDS); + return Duration.ofMillis(refreshDurationInMs); + } + + private synchronized void checkAndRefresh(ConfigurationSource conf) { + InnerNode current = (InnerNode) cache.get().getNode(ROOT); + try { + InnerNode newTopology = scmBlockLocationProtocol.getNetworkTopology(); + if (!newTopology.equals(current)) { + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + newTopology)); + LOG.info("Updated network topology fetched from SCM: {}.", newTopology); + } + } catch (IOException e) { + throw new UncheckedIOException( + "Error fetching updated network topology from SCM", e); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java new file mode 100644 index 000000000000..8dc9cb3cca2f --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *

+ * Freon related helper classes used for load testing. + */ + +/** + * Contains SCM client related classes. + */ +package org.apache.hadoop.hdds.scm.client; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index ef2585488faa..8c84af859b4a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -138,4 +139,11 @@ List allocateBlock(long size, int numBlocks, */ List sortDatanodes(List nodes, String clientMachine) throws IOException; + + /** + * Retrieves the hierarchical cluster tree representing the network topology. + * @return the root node of the network topology cluster tree. + * @throws IOException + */ + InnerNode getNetworkTopology() throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 2e724969998b..1f114304ccaa 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -39,6 +40,8 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .SortDatanodesRequestProto; @@ -49,6 +52,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; @@ -328,6 +334,43 @@ public List sortDatanodes(List nodes, return results; } + @Override + public InnerNode getNetworkTopology() throws IOException { + GetClusterTreeRequestProto request = + GetClusterTreeRequestProto.newBuilder().build(); + SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.GetClusterTree) + .setGetClusterTreeRequest(request) + .build(); + + final SCMBlockLocationResponse wrappedResponse = + handleError(submitRequest(wrapper)); + GetClusterTreeResponseProto resp = + wrappedResponse.getGetClusterTreeResponse(); + + return (InnerNode) setParent( + InnerNodeImpl.fromProtobuf(resp.getClusterTree())); + } + + /** + * Sets the parent field for the clusterTree nodes recursively. + * + * @param node cluster tree without parents set. + * @return updated cluster tree with parents set. + */ + private Node setParent(Node node) { + if (node instanceof InnerNodeImpl) { + InnerNodeImpl innerNode = (InnerNodeImpl) node; + if (innerNode.getChildrenMap() != null) { + for (Map.Entry child : innerNode.getChildrenMap() + .entrySet()) { + child.getValue().setParent(innerNode); + setParent(child.getValue()); + } + } + } + return node; + } + @Override public Object getUnderlyingProxyObject() { return rpcProxy; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 84a0fa4886ce..b573ee0d040c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -68,6 +68,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; @@ -526,15 +528,16 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { /** * Attempts to decommission the list of nodes. * @param nodes The list of hostnames or hostname:ports to decommission + * @param force true to skip fail-early checks and try to decommission nodes * @throws IOException */ @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { Preconditions.checkNotNull(nodes); DecommissionNodesRequestProto request = DecommissionNodesRequestProto.newBuilder() - .addAllHosts(nodes) + .addAllHosts(nodes).setForce(force) .build(); DecommissionNodesResponseProto response = submitRequest(Type.DecommissionNodes, @@ -900,7 +903,13 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { StartContainerBalancerRequestProto.Builder builder = StartContainerBalancerRequestProto.newBuilder(); builder.setTraceID(TracingUtil.exportCurrentSpan()); @@ -909,29 +918,29 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (threshold.isPresent()) { double tsd = threshold.get(); Preconditions.checkState(tsd >= 0.0D && tsd < 100D, - "threshold should be specified in range [0.0, 100.0)."); + "Threshold should be specified in the range [0.0, 100.0)."); builder.setThreshold(tsd); } if (maxSizeToMovePerIterationInGB.isPresent()) { long mstm = maxSizeToMovePerIterationInGB.get(); Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); + "Max Size To Move Per Iteration In GB must be positive."); builder.setMaxSizeToMovePerIterationInGB(mstm); } if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "greater than equal to zero."); Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "lesser than equal to hundred."); builder.setMaxDatanodesPercentageToInvolvePerIteration(mdti); } if (iterations.isPresent()) { int i = iterations.get(); Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + "Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); builder.setIterations(i); } @@ -939,17 +948,53 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (maxSizeEnteringTargetInGB.isPresent()) { long mset = maxSizeEnteringTargetInGB.get(); Preconditions.checkState(mset > 0, - "maxSizeEnteringTargetInGB must be positive."); + "Max Size Entering Target In GB must be positive."); builder.setMaxSizeEnteringTargetInGB(mset); } if (maxSizeLeavingSourceInGB.isPresent()) { long msls = maxSizeLeavingSourceInGB.get(); Preconditions.checkState(msls > 0, - "maxSizeLeavingSourceInGB must be positive."); + "Max Size Leaving Source In GB must be positive."); builder.setMaxSizeLeavingSourceInGB(msls); } + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + Preconditions.checkState(bi > 0, + "Balancing Interval must be greater than zero."); + builder.setBalancingInterval(bi); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + Preconditions.checkState(mt > 0, + "Move Timeout must be greater than zero."); + builder.setMoveTimeout(mt); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + Preconditions.checkState(mrt > 0, + "Move Replication Timeout must be greater than zero."); + builder.setMoveReplicationTimeout(mrt); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + builder.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + builder.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + builder.setExcludeNodes(ex); + } + StartContainerBalancerRequestProto request = builder.build(); return submitRequest(Type.StartContainerBalancer, builder1 -> builder1.setStartContainerBalancerRequest(request)) @@ -1143,4 +1188,13 @@ public DecommissionScmResponseProto decommissionScm( .getDecommissionScmResponse(); return response; } + + @Override + public String getMetrics(String query) throws IOException { + GetMetricsRequestProto request = GetMetricsRequestProto.newBuilder().setQuery(query).build(); + GetMetricsResponseProto response = submitRequest(Type.GetMetrics, + builder -> builder.setGetMetricsRequest(request)).getGetMetricsResponse(); + String metricsJsonStr = response.getMetricsJson(); + return metricsJsonStr; + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java index dc217476a60c..047386730818 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java @@ -21,8 +21,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableRate; -import com.google.common.annotations.VisibleForTesting; - /** * Metrics for any event watcher. */ @@ -56,23 +54,19 @@ public void updateFinishingTime(long duration) { completionTime.add(duration); } - @VisibleForTesting - public MutableCounterLong getTrackedEvents() { + MutableCounterLong getTrackedEvents() { return trackedEvents; } - @VisibleForTesting - public MutableCounterLong getTimedOutEvents() { + MutableCounterLong getTimedOutEvents() { return timedOutEvents; } - @VisibleForTesting - public MutableCounterLong getCompletedEvents() { + MutableCounterLong getCompletedEvents() { return completedEvents; } - @VisibleForTesting - public MutableRate getCompletionTime() { + MutableRate getCompletionTime() { return completionTime; } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..4fae3686c93c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 32fcbfec6e44..31089bc1c0b6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedLogger; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; @@ -405,12 +406,7 @@ private ManagedDBOptions getDefaultDBOptions( // Apply logging settings. if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(dbOptions) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; + ManagedLogger logger = new ManagedLogger(dbOptions, (infoLogLevel, s) -> ROCKS_DB_LOGGER.info(s)); InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration .getRocksdbLogLevel() + "_LEVEL"); logger.setInfoLogLevel(level); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 504c3dd47f32..8095c1cbb1f4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -48,6 +48,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -55,7 +56,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Tests for RocksDBTable Store. @@ -96,11 +96,7 @@ public static void initConstants() { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail("Unexpected Exception " + ex); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 9e16ebb99e19..f437d6518c5f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -21,6 +21,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -29,7 +30,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; @@ -217,11 +217,7 @@ public void batchDelete() throws Exception { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail(ex.toString()); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 85ae7bd4b201..5d0ca946aeed 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -43,6 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + + + org.xerial.snappy + snappy-java + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.hadoop hadoop-annotations @@ -99,6 +111,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-core @@ -194,12 +210,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + com.google.guava guava @@ -286,5 +314,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.xerial.snappy + snappy-java + diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index abee8cc400fb..f50048a0182f 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -43,6 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + + + org.xerial.snappy + snappy-java + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.curator * @@ -79,6 +91,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-json @@ -101,6 +117,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + commons-cli @@ -112,6 +136,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + io.netty * @@ -134,5 +162,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.xerial.snappy + snappy-java + diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index d194670acc36..86ba8d5a715c 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -76,6 +76,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.junit.jupiter junit-jupiter-api + + + org.junit.jupiter + junit-jupiter-engine + org.junit.jupiter junit-jupiter-params diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 6adca817ed1d..eff95099371c 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -83,6 +83,7 @@ message ScmContainerLocationRequest { optional DecommissionScmRequestProto decommissionScmRequest = 44; optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; + optional GetMetricsRequestProto getMetricsRequest = 47; } message ScmContainerLocationResponse { @@ -137,6 +138,7 @@ message ScmContainerLocationResponse { optional DecommissionScmResponseProto decommissionScmResponse = 44; optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; + optional GetMetricsResponseProto getMetricsResponse = 47; enum Status { OK = 1; @@ -190,6 +192,7 @@ enum Type { DecommissionScm = 40; SingleNodeQuery = 41; GetContainersOnDecomNode = 42; + GetMetrics = 43; } /** @@ -359,6 +362,7 @@ message DatanodeUsageInfoResponseProto { */ message DecommissionNodesRequestProto { repeated string hosts = 1; + optional bool force = 2; } @@ -574,6 +578,12 @@ message StartContainerBalancerRequestProto { optional int64 maxSizeLeavingSourceInGB = 7; optional int32 maxDatanodesPercentageToInvolvePerIteration = 8; optional int32 iterations = 9; + optional int32 balancingInterval = 10; + optional int32 moveTimeout = 11; + optional int32 moveReplicationTimeout = 12; + optional bool networkTopologyEnable = 13; + optional string includeNodes = 14; + optional string excludeNodes = 15; } message StartContainerBalancerResponseProto { @@ -618,6 +628,14 @@ message GetContainersOnDecomNodeResponseProto { repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; } +message GetMetricsRequestProto { + optional string query = 1; +} + +message GetMetricsResponseProto { + optional string metricsJson = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 76fdfad111a9..2160f7c5edbf 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -51,11 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> javax.annotation javax.annotation-api - - com.google.code.findbugs - jsr305 - compile - diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto index 0206a8ea71d4..f5cac299238d 100644 --- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto +++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto @@ -107,6 +107,7 @@ enum Type { StreamWrite = 20; FinalizeBlock = 21; + Echo = 22; } @@ -215,6 +216,7 @@ message ContainerCommandRequestProto { optional uint32 version = 24; optional FinalizeBlockRequestProto finalizeBlock = 25; + optional EchoRequestProto echo = 26; } message ContainerCommandResponseProto { @@ -247,6 +249,7 @@ message ContainerCommandResponseProto { optional GetCommittedBlockLengthResponseProto getCommittedBlockLength = 21; optional FinalizeBlockResponseProto finalizeBlock = 22; + optional EchoResponseProto echo = 23; } message ContainerDataProto { @@ -390,6 +393,16 @@ message ListBlockResponseProto { repeated BlockData blockData = 1; } +message EchoRequestProto { + optional bytes payload = 1; + optional int32 payloadSizeResp = 2; + optional int32 sleepTimeMs = 3; +} + +message EchoResponseProto { + optional bytes payload = 1; +} + // Chunk Operations message ChunkInfo { @@ -423,9 +436,11 @@ message WriteChunkRequestProto { required DatanodeBlockID blockID = 1; optional ChunkInfo chunkData = 2; optional bytes data = 3; + optional PutBlockRequestProto block = 4; } message WriteChunkResponseProto { + optional GetCommittedBlockLengthResponseProto committedBlockLength = 1; } enum ReadChunkVersion { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3f346300b3ed..4555d1cf4a39 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -45,8 +45,10 @@ message DatanodeDetailsProto { optional string networkLocation = 7; // Network topology location optional NodeOperationalState persistedOpState = 8; // The Operational state persisted in the datanode.id file optional int64 persistedOpStateExpiry = 9; // The seconds after the epoch when the OpState should expire + optional int32 currentVersion = 10; // Current datanode wire version // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. + optional uint32 level = 101; } /** @@ -497,3 +499,26 @@ message CompactionLogEntryProto { repeated CompactionFileInfoProto outputFileIntoList = 4; optional string compactionReason = 5; } + +message NodeTopology { + optional string name = 1; + optional string location = 2; + optional uint32 cost = 3; + optional uint32 level = 4; +} + +message NetworkNode { + optional DatanodeDetailsProto datanodeDetails = 1; + optional InnerNode innerNode = 3; +} + +message ChildrenMap { + optional string networkName = 1; + optional NetworkNode networkNode = 2; +} + +message InnerNode { + optional NodeTopology nodeTopology = 1; + optional uint32 numOfLeaves = 2; + repeated ChildrenMap childrenMap = 3; +} diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto index 307c23a56202..3d281975f2b4 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto @@ -30,7 +30,6 @@ package hadoop.hdds.block; import "hdds.proto"; - // SCM Block protocol enum Type { @@ -39,6 +38,7 @@ enum Type { GetScmInfo = 13; SortDatanodes = 14; AddScm = 15; + GetClusterTree = 16; } message SCMBlockLocationRequest { @@ -56,6 +56,7 @@ message SCMBlockLocationRequest { optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; optional SortDatanodesRequestProto sortDatanodesRequest = 14; optional hadoop.hdds.AddScmRequestProto addScmRequestProto = 15; + optional GetClusterTreeRequestProto getClusterTreeRequest = 16; } message SCMBlockLocationResponse { @@ -80,6 +81,7 @@ message SCMBlockLocationResponse { optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; optional SortDatanodesResponseProto sortDatanodesResponse = 14; optional hadoop.hdds.AddScmResponseProto addScmResponse = 15; + optional GetClusterTreeResponseProto getClusterTreeResponse = 16; } /** @@ -230,6 +232,13 @@ message SortDatanodesResponseProto{ repeated DatanodeDetailsProto node = 1; } +message GetClusterTreeRequestProto { +} + +message GetClusterTreeResponseProto { + required InnerNode clusterTree = 1; +} + /** * Protocol used from OzoneManager to StorageContainerManager. * See request and response messages for details of the RPC calls. diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java new file mode 100644 index 000000000000..0d79a1c833d0 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Class to write the rocksdb lib name to a file. + * This would be used to build native ozone_rocksdb_tools library. + */ +public final class JniLibNamePropertyWriter { + + private JniLibNamePropertyWriter() { + } + + public static void main(String[] args) { + String filePath = args[0]; + try (Writer writer = new OutputStreamWriter( + Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) { + String libName = ManagedRocksObjectUtils.getRocksDBLibFileName(); + writer.write("rocksdbLibName=" + libName); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index 638739ff557e..4eb2a0d2bc36 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -18,20 +18,34 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.DBOptions; +import org.rocksdb.Logger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.LOG; import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; /** * Managed DBOptions. */ public class ManagedDBOptions extends DBOptions { + private final UncheckedAutoCloseable leakTracker = track(this); + private final AtomicReference loggerRef = new AtomicReference<>(); + + @Override + public DBOptions setLogger(Logger logger) { + IOUtils.close(LOG, loggerRef.getAndSet(logger)); + return super.setLogger(logger); + } @Override public void close() { try { + IOUtils.close(LOG, loggerRef.getAndSet(null)); super.close(); } finally { leakTracker.close(); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java new file mode 100644 index 000000000000..d04f91cd4e29 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.ratis.util.UncheckedAutoCloseable; +import org.rocksdb.InfoLogLevel; +import org.rocksdb.Logger; + +import java.util.function.BiConsumer; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; + +/** Managed {@link Logger}. */ +public class ManagedLogger extends Logger { + + private final UncheckedAutoCloseable leakTracker = track(this); + private final BiConsumer delegate; + + public ManagedLogger(ManagedDBOptions dbOptions, BiConsumer delegate) { + super(dbOptions); + this.delegate = delegate; + } + + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + delegate.accept(infoLogLevel, logMsg); + } + + @Override + public void close() { + try { + super.close(); + } finally { + leakTracker.close(); + } + } +} diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 9c86a47d7401..148abee7fc0e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.RocksDB; +import org.rocksdb.util.Environment; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,4 +95,11 @@ public static void waitForFileDelete(File file, Duration maxDuration) public static void loadRocksDBLibrary() { RocksDB.loadLibrary(); } + + /** + * Returns RocksDB library file name. + */ + public static String getRocksDBLibFileName() { + return Environment.getJniLibraryFileName("rocksdb"); + } } diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a308158c404c..567f432b3881 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -234,45 +234,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - org.apache.rat - apache-rat-plugin - - - **/*.json - **/hs_err*.log - **/.attach_* - **/**.rej - **/.factorypath - public - **/*.iml - **/target/** - **/output.xml - **/log.html - **/report.html - .gitattributes - .idea/** - src/main/resources/webapps/static/angular-1.8.0.min.js - src/main/resources/webapps/static/angular-nvd3-1.0.9.min.js - src/main/resources/webapps/static/angular-route-1.8.0.min.js - src/main/resources/webapps/static/d3-3.5.17.min.js - src/main/resources/webapps/static/nvd3-1.8.5.min.css.map - src/main/resources/webapps/static/nvd3-1.8.5.min.css - src/main/resources/webapps/static/nvd3-1.8.5.min.js.map - src/main/resources/webapps/static/nvd3-1.8.5.min.js - src/main/resources/webapps/static/jquery-3.5.1.min.js - src/main/resources/webapps/static/bootstrap-3.4.1/** - src/test/resources/additionalfields.container - src/test/resources/incorrect.checksum.container - src/test/resources/incorrect.container - src/test/resources/test.db.ini - src/test/resources/123-dn-container.db/** - src/test/resources/123.container - src/main/resources/proto.lock - - - org.apache.maven.plugins maven-jar-plugin diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index c12ddbb091bd..de8c68a4801e 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -55,8 +55,6 @@ 8 8 - https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -113,79 +111,80 @@ - com.googlecode.maven-download-plugin - download-maven-plugin + org.codehaus.mojo + exec-maven-plugin - rocksdb source download - generate-sources - - wget - - - https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz - rocksdb-v${rocksdb.version}.tar.gz - ${project.build.directory}/rocksdb - - - - zlib source download - generate-sources - - wget - - - ${zlib.url} - zlib-${zlib.version}.tar.gz - ${project.build.directory}/zlib - - - - bzip2 source download - generate-sources + set-property + initialize - wget + java - ${bzip2.url} - bzip2-v${bzip2.version}.tar.gz - ${project.build.directory}/bzip2 + org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter + + ${project.build.directory}/propertyFile.txt + + + + + org.codehaus.mojo + properties-maven-plugin + - lz4 source download - generate-sources + read-property-from-file + initialize - wget + read-project-properties - https://github.com/lz4/lz4/archive/refs/tags/v${lz4.version}.tar.gz - lz4-v${lz4.version}.tar.gz - ${project.build.directory}/lz4 + + ${project.build.directory}/propertyFile.txt + + + + + org.apache.maven.plugins + maven-dependency-plugin + - snappy source download - generate-sources + unpack-dependency + initialize - wget + unpack - https://github.com/google/snappy/archive/refs/tags/${snappy.version}.tar.gz - snappy-v${snappy.version}.tar.gz - ${project.build.directory}/snappy + + + org.rocksdb + rocksdbjni + jar + false + ${project.build.directory}/rocksdbjni + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + - zstd source download + rocksdb source download generate-sources wget - https://github.com/facebook/zstd/archive/refs/tags/v${zstd.version}.tar.gz - zstd-v${zstd.version}.tar.gz - ${project.build.directory}/zstd + https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz + rocksdb-v${rocksdb.version}.tar.gz + ${project.build.directory}/rocksdb @@ -219,89 +218,6 @@ - - - - - - - - - - run - - - - build-zlib - process-sources - - - - - - - - - - - - run - - - - build-bzip2 - process-sources - - - - - - - - - run - - - - build-lz4 - process-sources - - - - - - - - - run - - - - build-zstd - process-sources - - - - - - - - - run - - - - build-snappy - process-sources - - - - - - - - - @@ -319,11 +235,11 @@ + - - + - + @@ -337,6 +253,8 @@ + @@ -346,14 +264,12 @@ - - - - - - + + + @@ -423,8 +339,8 @@ ${env.JAVA_HOME}/bin/javah - org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool - org.apache.hadoop.hdds.utils.db.managed.PipeInputStream + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator ${project.build.directory}/native/javah @@ -479,8 +395,8 @@ ${project.build.outputDirectory}:${project.build.directory}/dependency/* -h ${project.build.directory}/native/javah - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt index 051660777493..eb4fb8d46fd7 100644 --- a/hadoop-hdds/rocks-native/src/CMakeLists.txt +++ b/hadoop-hdds/rocks-native/src/CMakeLists.txt @@ -21,6 +21,7 @@ # cmake_minimum_required(VERSION 2.8) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") project(ozone_native) @@ -29,50 +30,33 @@ find_package(JNI REQUIRED) include_directories(${JNI_INCLUDE_DIRS}) set(CMAKE_CXX_STANDARD ${CMAKE_STANDARDS}) -set(linked_libraries "") +set(CMAKE_SKIP_BUILD_RPATH FALSE) + +set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE) + +set(CMAKE_INSTALL_RPATH "") + +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH FALSE) + if(NOT GENERATED_JAVAH) message(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH") endif() include_directories(${GENERATED_JAVAH}) if(${SST_DUMP_INCLUDE}) include_directories(${ROCKSDB_HEADERS}) - set(SOURCE_FILES ${NATIVE_DIR}/SSTDumpTool.cpp ${NATIVE_DIR}/PipeInputStream.cpp ${NATIVE_DIR}/Pipe.h ${NATIVE_DIR}/Pipe.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) - ADD_LIBRARY(rocksdb STATIC IMPORTED) - set_target_properties( - rocksdb - PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb.a) + set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) ADD_LIBRARY(rocks_tools STATIC IMPORTED) set_target_properties( rocks_tools PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb_tools.a) - ADD_LIBRARY(bz2 STATIC IMPORTED) - set_target_properties( - bz2 - PROPERTIES - IMPORTED_LOCATION ${BZIP2_LIB}/libbz2.a) - ADD_LIBRARY(zlib STATIC IMPORTED) - set_target_properties( - zlib - PROPERTIES - IMPORTED_LOCATION ${ZLIB_LIB}/libz.a) - ADD_LIBRARY(lz4 STATIC IMPORTED) - set_target_properties( - lz4 - PROPERTIES - IMPORTED_LOCATION ${LZ4_LIB}/liblz4.a) - ADD_LIBRARY(snappy STATIC IMPORTED) - set_target_properties( - snappy - PROPERTIES - IMPORTED_LOCATION ${SNAPPY_LIB}/libsnappy.a) - ADD_LIBRARY(zstd STATIC IMPORTED) - set_target_properties( - zstd - PROPERTIES - IMPORTED_LOCATION ${ZSTD_LIB}/libzstd.a) - set(linked_libraries ${linked_libraries} bz2 zlib rocks_tools rocksdb lz4 snappy zstd) + IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a) endif() + add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES}) -target_link_libraries(ozone_rocksdb_tools ${linked_libraries}) + + +target_link_libraries(ozone_rocksdb_tools PRIVATE ${ROCKSDB_LIB}) +target_link_libraries(ozone_rocksdb_tools PRIVATE rocks_tools) +set_target_properties(ozone_rocksdb_tools PROPERTIES + BUILD_WITH_INSTALL_RPATH FALSE + LINK_FLAGS "-Wl,-rpath -Wl,'$ORIGIN'") diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java index d3121144d37a..8937f0803a18 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java @@ -26,6 +26,5 @@ public final class NativeConstants { private NativeConstants() { } - public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME - = "ozone_rocksdb_tools"; + public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools"; } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index 10df236f88d4..ce424c930e1c 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils; import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.util.ShutdownHookManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -28,6 +29,8 @@ import java.io.InputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -101,7 +104,7 @@ public static boolean isLibraryLoaded(final String libraryName) { .getOrDefault(libraryName, false); } - public synchronized boolean loadLibrary(final String libraryName) { + public synchronized boolean loadLibrary(final String libraryName, final List dependentFiles) { if (isLibraryLoaded(libraryName)) { return true; } @@ -116,9 +119,9 @@ public synchronized boolean loadLibrary(final String libraryName) { } if (!loaded) { - Optional file = copyResourceFromJarToTemp(libraryName); - if (file.isPresent()) { - System.load(file.get().getAbsolutePath()); + Pair, List> files = copyResourceFromJarToTemp(libraryName, dependentFiles); + if (files.getKey().isPresent()) { + System.load(files.getKey().get().getAbsolutePath()); loaded = true; } } @@ -137,19 +140,20 @@ static String getSystemProperty(String property) { // Added function to make this testable @VisibleForTesting - static InputStream getResourceStream(String libraryFileName) { + static InputStream getResourceStream(String libraryFileName) throws IOException { return NativeLibraryLoader.class.getClassLoader() .getResourceAsStream(libraryFileName); } - private Optional copyResourceFromJarToTemp(final String libraryName) + private Pair, List> copyResourceFromJarToTemp(final String libraryName, + final List dependentFileNames) throws IOException { final String libraryFileName = getJniLibraryFileName(libraryName); InputStream is = null; try { is = getResourceStream(libraryFileName); if (is == null) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } final String nativeLibDir = @@ -160,15 +164,28 @@ private Optional copyResourceFromJarToTemp(final String libraryName) // create a temporary file to copy the library to final File temp = File.createTempFile(libraryName, getLibOsSuffix(), dir); if (!temp.exists()) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } else { temp.deleteOnExit(); } Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + List dependentFiles = new ArrayList<>(); + for (String fileName : dependentFileNames) { + if (is != null) { + is.close(); + } + is = getResourceStream(fileName); + File file = new File(dir, fileName); + Files.copy(is, file.toPath(), StandardCopyOption.REPLACE_EXISTING); + if (file.exists()) { + file.deleteOnExit(); + } + dependentFiles.add(file); + } ShutdownHookManager.get().addShutdownHook(temp::delete, LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Optional.of(temp); + return Pair.of(Optional.of(temp), dependentFiles); } finally { if (is != null) { is.close(); diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..02125951c1fe --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import com.google.common.primitives.UnsignedLong; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.util.ClosableIterator; + +import java.util.Arrays; +import java.util.NoSuchElementException; +import java.util.function.Function; + +/** + * Iterator for SSTFileReader which would read all entries including tombstones. + */ +public class ManagedRawSSTFileIterator implements ClosableIterator { + // Native address of pointer to the object. + private final long nativeHandle; + private final Function transformer; + + ManagedRawSSTFileIterator(long nativeHandle, Function transformer) { + this.nativeHandle = nativeHandle; + this.transformer = transformer; + } + + private native boolean hasNext(long handle); + private native void next(long handle); + private native byte[] getKey(long handle); + private native byte[] getValue(long handle); + private native long getSequenceNumber(long handle); + private native int getType(long handle); + + @Override + public boolean hasNext() { + return this.hasNext(nativeHandle); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + KeyValue keyValue = new KeyValue(this.getKey(nativeHandle), + UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), + this.getType(nativeHandle), + this.getValue(nativeHandle)); + this.next(nativeHandle); + return this.transformer.apply(keyValue); + } + + private native void closeInternal(long handle); + + @Override + public void close() { + this.closeInternal(this.nativeHandle); + } + + /** + * Class containing Parsed KeyValue Record from RawSstReader output. + */ + public static final class KeyValue { + + private final byte[] key; + private final UnsignedLong sequence; + private final Integer type; + private final byte[] value; + + private KeyValue(byte[] key, UnsignedLong sequence, Integer type, + byte[] value) { + this.key = key; + this.sequence = sequence; + this.type = type; + this.value = value; + } + + public byte[] getKey() { + return Arrays.copyOf(key, key.length); + } + + public UnsignedLong getSequence() { + return sequence; + } + + public Integer getType() { + return type; + } + + public byte[] getValue() { + return Arrays.copyOf(value, value.length); + } + + @Override + public String toString() { + return "KeyValue{" + + "key=" + StringUtils.bytes2String(key) + + ", sequence=" + sequence + + ", type=" + type + + ", value=" + StringUtils.bytes2String(value) + + '}'; + } + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java new file mode 100644 index 000000000000..2a58dfce4c4c --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.Arrays; +import java.util.function.Function; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + +/** + * JNI for RocksDB RawSSTFileReader. + */ +public class ManagedRawSSTFileReader implements Closeable { + + public static boolean loadLibrary() throws NativeLibraryNotLoadedException { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME, Arrays.asList( + ManagedRocksObjectUtils.getRocksDBLibFileName()))) { + throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + return true; + } + + private final String fileName; + // Native address of pointer to the object. + private final long nativeHandle; + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); + + public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { + this.fileName = fileName; + this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); + } + + public ManagedRawSSTFileIterator newIterator( + Function transformerFunction, + ManagedSlice fromSlice, ManagedSlice toSlice) { + long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); + long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); + LOG.info("Iterating SST file: {} with native lib. " + + "LowerBound: {}, UpperBound: {}", fileName, fromSlice, toSlice); + return new ManagedRawSSTFileIterator<>( + newIterator(this.nativeHandle, fromSlice != null, + fromNativeHandle, toSlice != null, toNativeHandle), + transformerFunction); + } + + private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize); + + + private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo, + long toSliceHandle); + + private native void disposeInternal(long handle); + + @Override + public void close() { + disposeInternal(nativeHandle); + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java deleted file mode 100644 index d8844eaacbcd..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.collect.Maps; -import com.google.common.primitives.UnsignedLong; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.util.ClosableIterator; -import org.eclipse.jetty.io.RuntimeIOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -/** - * Iterator to Parse output of RocksDBSSTDumpTool. - */ -public abstract class ManagedSSTDumpIterator implements ClosableIterator { - - private static final Logger LOG = - LoggerFactory.getLogger(ManagedSSTDumpIterator.class); - // Since we don't have any restriction on the key & value, we are prepending - // the length of the pattern in the sst dump tool output. - // The first token in the pattern is the key. - // The second tells the sequence number of the key. - // The third token gives the type of key in the sst file. - // The fourth token - private InputStream processOutput; - private Optional currentKey; - private byte[] intBuffer; - private Optional nextKey; - - private ManagedSSTDumpTool.SSTDumpToolTask sstDumpToolTask; - private AtomicBoolean open; - private StackTraceElement[] stackTrace; - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options) - throws IOException { - this(sstDumpTool, sstFilePath, options, null, null); - } - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options, - ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) - throws IOException { - File sstFile = new File(sstFilePath); - if (!sstFile.exists()) { - throw new IOException(String.format("File in path : %s doesn't exist", - sstFile.getAbsolutePath())); - } - if (!sstFile.isFile()) { - throw new IOException(String.format("Path given: %s is not a file", - sstFile.getAbsolutePath())); - } - init(sstDumpTool, sstFile, options, lowerKeyBound, upperKeyBound); - this.stackTrace = Thread.currentThread().getStackTrace(); - } - - /** - * Parses next occuring number in the stream. - * - * @return Optional of the integer empty if no integer exists - */ - private Optional getNextNumberInStream() throws IOException { - int n = processOutput.read(intBuffer, 0, 4); - if (n == 4) { - return Optional.of(ByteBuffer.wrap(intBuffer).getInt()); - } else if (n >= 0) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.empty(); - } - - private Optional getNextByteArray() throws IOException { - Optional size = getNextNumberInStream(); - if (size.isPresent()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Allocating byte array, size: {}", size.get()); - } - byte[] b = new byte[size.get()]; - int n = processOutput.read(b); - if (n >= 0 && n != size.get()) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.of(b); - } - return Optional.empty(); - } - - private Optional getNextUnsignedLong() throws IOException { - long val = 0; - for (int i = 0; i < 8; i++) { - val = val << 8; - int nextByte = processOutput.read(); - if (nextByte < 0) { - if (i == 0) { - return Optional.empty(); - } - throw new IllegalStateException(String.format("Long expects " + - "8 bytes to be read from the stream, but read only %d bytes", i)); - } - val += nextByte; - } - return Optional.of(UnsignedLong.fromLongBits(val)); - } - - private void init(ManagedSSTDumpTool sstDumpTool, File sstFile, - ManagedOptions options, ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) { - Map argMap = Maps.newHashMap(); - argMap.put("file", sstFile.getAbsolutePath()); - argMap.put("silent", null); - argMap.put("command", "scan"); - // strings containing '\0' do not have the same value when encode UTF-8 on - // java which is 0. But in jni the utf-8 encoded value for '\0' - // becomes -64 -128. Thus the value becomes different. - // In order to support this, changes have been made on the rocks-tools - // to pass the address of the ManagedSlice and the jni can use the object - // of slice directly from there. - if (Objects.nonNull(lowerKeyBound)) { - argMap.put("from", String.valueOf(lowerKeyBound.getNativeHandle())); - } - if (Objects.nonNull(upperKeyBound)) { - argMap.put("to", String.valueOf(upperKeyBound.getNativeHandle())); - } - this.sstDumpToolTask = sstDumpTool.run(argMap, options); - processOutput = sstDumpToolTask.getPipedOutput(); - intBuffer = new byte[4]; - open = new AtomicBoolean(true); - currentKey = Optional.empty(); - nextKey = Optional.empty(); - next(); - } - - /** - * Throws Runtime exception in the case iterator is closed or - * the native Dumptool exited with non zero exit value. - */ - private void checkSanityOfProcess() { - if (!this.open.get()) { - throw new RuntimeException("Iterator has been closed"); - } - if (sstDumpToolTask.getFuture().isDone() && - sstDumpToolTask.exitValue() != 0) { - throw new RuntimeException("Process Terminated with non zero " + - String.format("exit value %d", sstDumpToolTask.exitValue())); - } - } - - /** - * Checks the status of the process & sees if there is another record. - * - * @return True if next exists & false otherwise - * Throws Runtime Exception in case of SST File read failure - */ - - @Override - public boolean hasNext() { - checkSanityOfProcess(); - return nextKey.isPresent(); - } - - /** - * Transforms Key to a certain value. - * - * @param value - * @return transformed Value - */ - protected abstract T getTransformedValue(Optional value); - - /** - * Returns the next record from SSTDumpTool. - * - * @return next Key - * Throws Runtime Exception incase of failure. - */ - @Override - public T next() { - checkSanityOfProcess(); - currentKey = nextKey; - nextKey = Optional.empty(); - try { - Optional key = getNextByteArray(); - if (!key.isPresent()) { - return getTransformedValue(currentKey); - } - UnsignedLong sequenceNumber = getNextUnsignedLong() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number" + - " for key %s", StringUtils.bytes2String(key.get())))); - - Integer type = getNextNumberInStream() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString()))); - byte[] val = getNextByteArray().orElseThrow(() -> - new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s of type %d", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString(), type))); - nextKey = Optional.of(new KeyValue(key.get(), sequenceNumber, type, val)); - } catch (IOException e) { - // TODO [SNAPSHOT] Throw custom snapshot exception - throw new RuntimeIOException(e); - } - return getTransformedValue(currentKey); - } - - @Override - public synchronized void close() throws UncheckedIOException { - if (this.sstDumpToolTask != null) { - if (!this.sstDumpToolTask.getFuture().isDone()) { - this.sstDumpToolTask.getFuture().cancel(true); - } - try { - this.processOutput.close(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - open.compareAndSet(true, false); - } - - @Override - protected void finalize() throws Throwable { - if (open.get()) { - LOG.warn("{} is not closed properly." + - " StackTrace for unclosed instance: {}", - this.getClass().getName(), - Arrays.stream(stackTrace) - .map(StackTraceElement::toString).collect( - Collectors.joining("\n"))); - } - this.close(); - super.finalize(); - } - - /** - * Class containing Parsed KeyValue Record from Sst Dumptool output. - */ - public static final class KeyValue { - - private final byte[] key; - private final UnsignedLong sequence; - private final Integer type; - private final byte[] value; - - private KeyValue(byte[] key, UnsignedLong sequence, Integer type, - byte[] value) { - this.key = key; - this.sequence = sequence; - this.type = type; - this.value = value; - } - - public byte[] getKey() { - return key; - } - - public UnsignedLong getSequence() { - return sequence; - } - - public Integer getType() { - return type; - } - - public byte[] getValue() { - return value; - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + StringUtils.bytes2String(key) + - ", sequence=" + sequence + - ", type=" + type + - ", value=" + StringUtils.bytes2String(value) + - '}'; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java deleted file mode 100644 index 5d965d7398e0..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; - -import java.io.InputStream; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -/** - * JNI for RocksDB SSTDumpTool. Pipes the output to an output stream - */ -public class ManagedSSTDumpTool { - - private int bufferCapacity; - private ExecutorService executorService; - - public ManagedSSTDumpTool(ExecutorService executorService, - int bufferCapacity) - throws NativeLibraryNotLoadedException { - if (!NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { - throw new NativeLibraryNotLoadedException( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - this.bufferCapacity = bufferCapacity; - this.executorService = executorService; - } - - public SSTDumpToolTask run(String[] args, ManagedOptions options) { - PipeInputStream pipeInputStream = new PipeInputStream(bufferCapacity); - return new SSTDumpToolTask(this.executorService.submit(() -> - this.runInternal(args, options.getNativeHandle(), - pipeInputStream.getNativeHandle())), pipeInputStream); - } - - public SSTDumpToolTask run(Map args, ManagedOptions options) { - return this.run(args.entrySet().stream().map(e -> "--" - + (e.getValue() == null || e.getValue().isEmpty() ? e.getKey() : - e.getKey() + "=" + e.getValue())).toArray(String[]::new), options); - } - - private native int runInternal(String[] args, long optionsHandle, - long pipeHandle); - - /** - * Class holding piped output of SST Dumptool & future of command. - */ - static class SSTDumpToolTask { - private Future future; - private InputStream pipedOutput; - - SSTDumpToolTask(Future future, InputStream pipedOutput) { - this.future = future; - this.pipedOutput = pipedOutput; - } - - public Future getFuture() { - return future; - } - - public InputStream getPipedOutput() { - return pipedOutput; - } - - public int exitValue() { - if (this.future.isDone()) { - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - return 1; - } - } - return 0; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java deleted file mode 100644 index df4f613f98e2..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import java.io.InputStream; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * JNI for reading data from pipe. - */ -public class PipeInputStream extends InputStream { - - private byte[] byteBuffer; - private long nativeHandle; - private int numberOfBytesLeftToRead; - private int index = 0; - private int capacity; - - private AtomicBoolean cleanup; - - PipeInputStream(int capacity) { - this.byteBuffer = new byte[capacity]; - this.numberOfBytesLeftToRead = 0; - this.capacity = capacity; - this.nativeHandle = newPipe(); - this.cleanup = new AtomicBoolean(false); - } - - long getNativeHandle() { - return nativeHandle; - } - - @Override - public int read() { - if (numberOfBytesLeftToRead < 0) { - this.close(); - return -1; - } - while (numberOfBytesLeftToRead == 0) { - numberOfBytesLeftToRead = readInternal(byteBuffer, capacity, - nativeHandle); - index = 0; - if (numberOfBytesLeftToRead != 0) { - return read(); - } - } - numberOfBytesLeftToRead--; - int ret = byteBuffer[index] & 0xFF; - index += 1; - return ret; - } - - private native long newPipe(); - - private native int readInternal(byte[] buff, int numberOfBytes, - long pipeHandle); - - private native void closeInternal(long pipeHandle); - - @Override - public void close() { - if (this.cleanup.compareAndSet(false, true)) { - closeInternal(this.nativeHandle); - } - } - - @Override - protected void finalize() throws Throwable { - close(); - super.finalize(); - } -} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp new file mode 100644 index 000000000000..1cf222528379 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, + jlong native_handle) { + return static_cast(reinterpret_cast(native_handle)->Valid()); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, + jlong native_handle) { + reinterpret_cast(native_handle)->Next(); +} + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint64_t sequence_number = + reinterpret_cast(native_handle)->sequenceNumber(); + jlong result; + std::memcpy(&result, &sequence_number, sizeof(jlong)); + return result; +} + + +jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint32_t type = reinterpret_cast(native_handle)->type(); + return static_cast(type); +} + + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, + jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp new file mode 100644 index 000000000000..f3b8dc02639d --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_sst_file_reader.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, + jlong options_handle, + jstring jfilename, + jint readahead_size) { + ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle); + const char *file_path = env->GetStringUTFChars(jfilename, nullptr); + size_t read_ahead_size_value = static_cast(readahead_size); + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true); + env->ReleaseStringUTFChars(jfilename, file_path); + return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, + jlong native_handle, + jboolean jhas_from, + jlong from_slice_handle, + jboolean jhas_to, + jlong to_slice_handle) { + ROCKSDB_NAMESPACE::Slice* from_slice = nullptr; + ROCKSDB_NAMESPACE::Slice* to_slice = nullptr; + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + reinterpret_cast(native_handle); + bool has_from = static_cast(jhas_from); + bool has_to = static_cast(jhas_to); + if (has_from) { + from_slice = reinterpret_cast(from_slice_handle); + } + if (has_to) { + to_slice = reinterpret_cast(to_slice_handle); + } + ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice); + return GET_CPLUSPLUS_POINTER(iterator); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp b/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp deleted file mode 100644 index 53f60cdd65af..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "Pipe.h" -#include "cplusplus_to_java_convert.h" -#include "org_apache_hadoop_hdds_utils_db_managed_PipeInputStream.h" - - -jlong Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_newPipe(JNIEnv *, jobject) { - Pipe *pipe = new Pipe(); - return GET_CPLUSPLUS_POINTER(pipe); -} - -jint Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_readInternal(JNIEnv *env, jobject object, jbyteArray jbyteArray, jint capacity, jlong nativeHandle) { - int cap_int = capacity; - Pipe *pipe = reinterpret_cast(nativeHandle); - jbyte *b = (env)->GetByteArrayElements(jbyteArray, JNI_FALSE); - cap_int = read(pipe->getReadFd(), b, cap_int); - if (cap_int == 0) { - if (!pipe->isOpen()) { - cap_int = -1; - } - } - env->ReleaseByteArrayElements(jbyteArray, b, 0); - return cap_int; -} - -void Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_closeInternal(JNIEnv *env, jobject object, jlong nativeHandle) { - delete reinterpret_cast(nativeHandle); -} - diff --git a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp b/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp deleted file mode 100644 index 285c5906c2d8..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool.h" -#include "rocksdb/options.h" -#include "rocksdb/sst_dump_tool.h" -#include -#include "cplusplus_to_java_convert.h" -#include "Pipe.h" -#include - -jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool_runInternal(JNIEnv *env, jobject obj, - jobjectArray argsArray, jlong optionsHandle, jlong pipeHandle) { - ROCKSDB_NAMESPACE::SSTDumpTool dumpTool; - ROCKSDB_NAMESPACE::Options options; - Pipe *pipe = reinterpret_cast(pipeHandle); - int length = env->GetArrayLength(argsArray); - char *args[length + 1]; - for (int i = 0; i < length; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)i); - char *utf_str = (char *)env->GetStringUTFChars(str_val, JNI_FALSE); - args[i + 1] = utf_str; - } - FILE *wr = fdopen(pipe->getWriteFd(), "w"); - int ret = dumpTool.Run(length + 1, args, options, wr); - for (int i = 1; i < length + 1; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)(i - 1)); - env->ReleaseStringUTFChars(str_val, args[i]); - } - fclose(wr); - pipe->close(); - return ret; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h index efe9d4a5be24..4862ea12a1b9 100644 --- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h +++ b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h @@ -16,7 +16,7 @@ * limitations under the License. */ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch index 841c2533b863..12dc74614a45 100644 --- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch +++ b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch @@ -16,592 +16,531 @@ * limitations under the License. */ -diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h -index 9261ba47d..1e62b88a3 100644 ---- a/include/rocksdb/sst_dump_tool.h -+++ b/include/rocksdb/sst_dump_tool.h -@@ -11,7 +11,8 @@ namespace ROCKSDB_NAMESPACE { - - class SSTDumpTool { - public: -- int Run(int argc, char const* const* argv, Options options = Options()); -+ int Run(int argc, char const* const* argv, Options options = Options(), -+ FILE* out = stdout, FILE* err = stderr); - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc -index eefbaaeee..734a2f0dd 100644 ---- a/table/sst_file_dumper.cc -+++ b/table/sst_file_dumper.cc -@@ -45,7 +45,7 @@ SstFileDumper::SstFileDumper(const Options& options, - Temperature file_temp, size_t readahead_size, - bool verify_checksum, bool output_hex, - bool decode_blob_index, const EnvOptions& soptions, -- bool silent) -+ bool silent, FILE* out, FILE* err) - : file_name_(file_path), - read_num_(0), - file_temp_(file_temp), -@@ -57,10 +57,13 @@ SstFileDumper::SstFileDumper(const Options& options, - ioptions_(options_), - moptions_(ColumnFamilyOptions(options_)), - read_options_(verify_checksum, false), -- internal_comparator_(BytewiseComparator()) { -+ internal_comparator_(BytewiseComparator()), -+ out_(out), -+ err_(err) -+ { - read_options_.readahead_size = readahead_size; - if (!silent_) { -- fprintf(stdout, "Process %s\n", file_path.c_str()); -+ fprintf(out_, "Process %s\n", file_path.c_str()); - } - init_result_ = GetTableReader(file_name_); - } -@@ -253,17 +256,17 @@ Status SstFileDumper::ShowAllCompressionSizes( - int32_t compress_level_from, int32_t compress_level_to, - uint32_t max_dict_bytes, uint32_t zstd_max_train_bytes, - uint64_t max_dict_buffer_bytes, bool use_zstd_dict_trainer) { -- fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); -+ fprintf(out_, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); - for (auto& i : compression_types) { - if (CompressionTypeSupported(i.first)) { -- fprintf(stdout, "Compression: %-24s\n", i.second); -+ fprintf(out_, "Compression: %-24s\n", i.second); - CompressionOptions compress_opt; - compress_opt.max_dict_bytes = max_dict_bytes; - compress_opt.zstd_max_train_bytes = zstd_max_train_bytes; - compress_opt.max_dict_buffer_bytes = max_dict_buffer_bytes; - compress_opt.use_zstd_dict_trainer = use_zstd_dict_trainer; - for (int32_t j = compress_level_from; j <= compress_level_to; j++) { -- fprintf(stdout, "Compression level: %d", j); -+ fprintf(out_, "Compression level: %d", j); - compress_opt.level = j; - Status s = ShowCompressionSize(block_size, i.first, compress_opt); - if (!s.ok()) { -@@ -271,7 +274,7 @@ Status SstFileDumper::ShowAllCompressionSizes( - } - } - } else { -- fprintf(stdout, "Unsupported compression type: %s.\n", i.second); -+ fprintf(err_, "Unsupported compression type: %s.\n", i.second); - } - } - return Status::OK(); -@@ -307,9 +310,9 @@ Status SstFileDumper::ShowCompressionSize( - } - - std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); -- fprintf(stdout, " Size: %10" PRIu64, file_size); -- fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks); -- fprintf(stdout, " Time Taken: %10s microsecs", -+ fprintf(out_, " Size: %10" PRIu64, file_size); -+ fprintf(out_, " Blocks: %6" PRIu64, num_data_blocks); -+ fprintf(out_, " Time Taken: %10s microsecs", - std::to_string( - std::chrono::duration_cast(end - start) - .count()) -@@ -342,11 +345,11 @@ Status SstFileDumper::ShowCompressionSize( - : ((static_cast(not_compressed_blocks) / - static_cast(num_data_blocks)) * - 100.0); -- fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, -+ fprintf(out_, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, - compressed_pcnt); -- fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", -+ fprintf(out_, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", - ratio_not_compressed_blocks, ratio_not_compressed_pcnt); -- fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", -+ fprintf(out_, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", - not_compressed_blocks, not_compressed_pcnt); - return Status::OK(); - } -@@ -362,7 +365,7 @@ Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number, - /* memory_allocator= */ nullptr, prefetch_buffer); - if (!s.ok()) { - if (!silent_) { -- fprintf(stdout, "Not able to read table properties\n"); -+ fprintf(err_, "Not able to read table properties\n"); - } - } - return s; -@@ -410,7 +413,7 @@ Status SstFileDumper::SetTableOptionsByMagicNumber( - - options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); - if (!silent_) { -- fprintf(stdout, "Sst file format: plain table\n"); -+ fprintf(out_, "Sst file format: plain table\n"); - } - } else { - char error_msg_buffer[80]; -@@ -427,15 +430,56 @@ Status SstFileDumper::SetOldTableOptions() { - assert(table_properties_ == nullptr); - options_.table_factory = std::make_shared(); - if (!silent_) { -- fprintf(stdout, "Sst file format: block-based(old version)\n"); -+ fprintf(out_, "Sst file format: block-based(old version)\n"); - } - - return Status::OK(); - } - -+void write(int value, FILE* file) { -+ char b[4]; -+ b[3] = value & 0x000000ff; -+ b[2] = (value & 0x0000ff00) >> 8; -+ b[1] = (value & 0x00ff0000) >> 16; -+ b[0] = (value & 0xff000000) >> 24; -+ std::fwrite(b, 4, 1, file); +diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h +new file mode 100644 +index 000000000..21242ed15 +--- /dev/null ++++ b/include/rocksdb/raw_iterator.h +@@ -0,0 +1,25 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++ ++#include "rocksdb/advanced_options.h" ++namespace ROCKSDB_NAMESPACE { ++ ++class RawIterator { ++ public: ++ virtual ~RawIterator() {} ++ virtual bool Valid() const = 0; ++ virtual Slice key() const = 0; ++ virtual Slice value() const = 0; ++ virtual uint64_t sequenceNumber() const = 0; ++ virtual uint32_t type() const = 0; ++ virtual void Next() = 0; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h +new file mode 100644 +index 000000000..09e748208 +--- /dev/null ++++ b/include/rocksdb/raw_sst_file_reader.h +@@ -0,0 +1,62 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++#include ++#include ++ ++#include "rocksdb/raw_iterator.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/options.h" ++ ++ ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileReader { ++ public: ++ ++ RawSstFileReader(const Options& options, const std::string& file_name, ++ size_t readahead_size, bool verify_checksum, ++ bool silent = false); ++ ~RawSstFileReader(); ++ ++ RawIterator* newIterator(bool has_from, Slice* from, ++ bool has_to, Slice *to); ++ Status getStatus() { return init_result_; } ++ ++ private: ++ // Get the TableReader implementation for the sst file ++ Status GetTableReader(const std::string& file_path); ++ Status ReadTableProperties(uint64_t table_magic_number, ++ uint64_t file_size); ++ ++ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number); ++ Status SetOldTableOptions(); ++ ++ // Helper function to call the factory with settings specific to the ++ // factory implementation ++ Status NewTableReader(uint64_t file_size); ++ ++ std::string file_name_; ++ Temperature file_temp_; ++ ++ // less verbose in stdout/stderr ++ bool silent_; ++ ++ // options_ and internal_comparator_ will also be used in ++ // ReadSequential internally (specifically, seek-related operations) ++ Options options_; ++ ++ Status init_result_; ++ ++ struct Rep; ++ std::unique_ptr rep_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/src.mk b/src.mk +index b94bc43ca..c13e5cde6 100644 +--- a/src.mk ++++ b/src.mk +@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\ + utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc + + TOOL_LIB_SOURCES = \ +- tools/io_tracer_parser_tool.cc \ +- tools/ldb_cmd.cc \ +- tools/ldb_tool.cc \ +- tools/sst_dump_tool.cc \ +- utilities/blob_db/blob_dump_tool.cc \ ++ tools/raw_sst_file_reader.cc \ ++ tools/raw_sst_file_iterator.cc \ + + ANALYZER_LIB_SOURCES = \ + tools/block_cache_analyzer/block_cache_trace_analyzer.cc \ +diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc +new file mode 100644 +index 000000000..3051637a3 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.cc +@@ -0,0 +1,76 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++ ++#include ++#include ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "rocksdb/status.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "tools/raw_sst_file_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, Slice* from_key, ++ bool has_to, Slice* to_key) ++ : iter_(iterator), ++ ikey(new ParsedInternalKey()), ++ has_to_(has_to), ++ to_key_(to_key) { ++ if (has_from) { ++ InternalKey k; ++ k.SetMinPossibleForUserKey(*from_key); ++ iter_->Seek(k.Encode()); ++ } else { ++ iter_->SeekToFirst(); ++ } ++ initKey(); ++} ++ ++bool RawSstFileIterator::Valid() const { ++ return iter_->Valid() && (!has_to_ || ++ BytewiseComparator()->Compare( ++ key(), *to_key_) < 0); ++} ++ ++void RawSstFileIterator::initKey() { ++ if (iter_->Valid()) { ++ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */); ++ } +} ++void RawSstFileIterator::Next() { ++ iter_->Next(); ++ initKey(); ++ ++} ++ ++Slice RawSstFileIterator::key() const { ++ return ikey->user_key; ++} ++ ++uint64_t RawSstFileIterator::sequenceNumber() const { ++ return ikey->sequence; ++} ++ ++uint32_t RawSstFileIterator::type() const { ++ return static_cast(ikey->type); ++} ++ ++Slice RawSstFileIterator::value() const { ++ return iter_->value(); ++} ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h +new file mode 100644 +index 000000000..58e34b260 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.h +@@ -0,0 +1,45 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE + -+void write(const char* value, int length, FILE* file) { -+ write(length, file); -+ fwrite(value, length, 1, file); ++#include ++#include ++#include "file/writable_file_writer.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/raw_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileIterator : public RawIterator { ++ public: ++ explicit RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, ++ Slice* from_key, ++ bool has_to, ++ Slice* to_key); ++ ++ bool Valid() const override; ++ Slice key() const override; ++ Slice value() const override; ++ uint64_t sequenceNumber() const override; ++ uint32_t type() const override; ++ void Next() final override; ++ ++ ~RawSstFileIterator(){ ++ delete iter_; ++ } ++ ++ private: ++ void initKey(); ++ InternalIterator* iter_; ++ ParsedInternalKey* ikey; ++ bool has_to_; ++ Slice* to_key_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc +new file mode 100644 +index 000000000..5ba8a82ee +--- /dev/null ++++ b/tools/raw_sst_file_reader.cc +@@ -0,0 +1,272 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++#include "rocksdb/raw_sst_file_reader.h" ++ ++#include ++#include ++#include ++#include ++ ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "options/cf_options.h" ++#include "rocksdb/env.h" ++#include "rocksdb/slice_transform.h" ++#include "rocksdb/status.h" ++#include "rocksdb/table_properties.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/format.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "table/table_reader.h" ++#include "tools/raw_sst_file_iterator.h" ++#include "db/dbformat.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++struct RawSstFileReader::Rep { ++ Options options; ++ EnvOptions soptions_; ++ ReadOptions read_options_; ++ ImmutableOptions ioptions_; ++ MutableCFOptions moptions_; ++ InternalKeyComparator internal_comparator_; ++ std::unique_ptr table_properties_; ++ std::unique_ptr table_reader_; ++ std::unique_ptr file_; ++ ++ Rep(const Options& opts, bool verify_checksum, size_t readahead_size) ++ : options(opts), ++ soptions_(EnvOptions()), ++ read_options_(verify_checksum, false), ++ ioptions_(options), ++ moptions_(ColumnFamilyOptions(options)), ++ internal_comparator_(InternalKeyComparator(BytewiseComparator())) { ++ read_options_.readahead_size = readahead_size; ++ } ++}; ++ ++RawSstFileReader::RawSstFileReader(const Options& options, ++ const std::string& file_name, ++ size_t readahead_size, ++ bool verify_checksum, ++ bool silent) :rep_(new Rep(options, ++ verify_checksum, ++ readahead_size)) { ++ file_name_ = file_name; ++ silent_ = silent; ++ options_ = options; ++ file_temp_ = Temperature::kUnknown; ++ init_result_ = GetTableReader(file_name_); +} + -+void write(const std::string& value, FILE* file) { -+ write(value.data(), (int)value.length(), file); ++RawSstFileReader::~RawSstFileReader() {} ++ ++ ++ ++extern const uint64_t kBlockBasedTableMagicNumber; ++extern const uint64_t kLegacyBlockBasedTableMagicNumber; ++extern const uint64_t kPlainTableMagicNumber; ++extern const uint64_t kLegacyPlainTableMagicNumber; ++ ++Status RawSstFileReader::GetTableReader(const std::string& file_path) { ++ // Warning about 'magic_number' being uninitialized shows up only in UBsan ++ // builds. Though access is guarded by 's.ok()' checks, fix the issue to ++ // avoid any warnings. ++ uint64_t magic_number = Footer::kNullTableMagicNumber; ++ ++ // read table magic number ++ Footer footer; ++ ++ const auto& fs = options_.env->GetFileSystem(); ++ std::unique_ptr file; ++ uint64_t file_size = 0; ++ FileOptions fopts = rep_->soptions_; ++ fopts.temperature = file_temp_; ++ Status s = fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ if (s.ok()) { ++ s = fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); ++ } ++ ++ // check empty file ++ // if true, skip further processing of this file ++ if (file_size == 0) { ++ return Status::Aborted(file_path, "Empty file"); ++ } ++ ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ ++ FilePrefetchBuffer prefetch_buffer( ++ 0 /* readahead_size */, 0 /* max_readahead_size */, true /* enable */, ++ false /* track_min_offset */); ++ if (s.ok()) { ++ const uint64_t kSstDumpTailPrefetchSize = 512 * 1024; ++ uint64_t prefetch_size = (file_size > kSstDumpTailPrefetchSize) ++ ? kSstDumpTailPrefetchSize ++ : file_size; ++ uint64_t prefetch_off = file_size - prefetch_size; ++ IOOptions opts; ++ s = prefetch_buffer.Prefetch(opts, rep_->file_.get(), prefetch_off, ++ static_cast(prefetch_size), ++ Env::IO_TOTAL /* rate_limiter_priority */); ++ ++ s = ReadFooterFromFile(opts, rep_->file_.get(), &prefetch_buffer, file_size, ++ &footer); ++ } ++ if (s.ok()) { ++ magic_number = footer.table_magic_number(); ++ } ++ ++ if (s.ok()) { ++ if (magic_number == kPlainTableMagicNumber || ++ magic_number == kLegacyPlainTableMagicNumber) { ++ rep_->soptions_.use_mmap_reads = true; ++ ++ fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ } ++ ++ s = ROCKSDB_NAMESPACE::ReadTableProperties( ++ rep_->file_.get(), file_size, magic_number, rep_->ioptions_, &(rep_->table_properties_), ++ /* memory_allocator= */ nullptr, (magic_number == kBlockBasedTableMagicNumber) ++ ? &prefetch_buffer ++ : nullptr); ++ // For old sst format, ReadTableProperties might fail but file can be read ++ if (s.ok()) { ++ s = SetTableOptionsByMagicNumber(magic_number); ++ if (s.ok()) { ++ if (rep_->table_properties_ && !rep_->table_properties_->comparator_name.empty()) { ++ ConfigOptions config_options; ++ const Comparator* user_comparator = nullptr; ++ s = Comparator::CreateFromString(config_options, ++ rep_->table_properties_->comparator_name, ++ &user_comparator); ++ if (s.ok()) { ++ assert(user_comparator); ++ rep_->internal_comparator_ = InternalKeyComparator(user_comparator); ++ } ++ } ++ } ++ } else { ++ if (!silent_) { ++ fprintf(stderr, "Not able to read table properties\n"); ++ } ++ s = SetOldTableOptions(); ++ } ++ options_.comparator = rep_->internal_comparator_.user_comparator(); ++ } ++ ++ if (s.ok()) { ++ s = NewTableReader(file_size); ++ } ++ return s; +} + -+void write(Slice &slice, FILE* file) { -+ int size = (int)slice.size(); -+ write(slice.data(), size, file); ++Status RawSstFileReader::NewTableReader(uint64_t file_size) { ++ auto t_opt = ++ TableReaderOptions(rep_->ioptions_, rep_->moptions_.prefix_extractor, rep_->soptions_, ++ rep_->internal_comparator_, false /* skip_filters */, ++ false /* imortal */, true /* force_direct_prefetch */); ++ // Allow open file with global sequence number for backward compatibility. ++ t_opt.largest_seqno = kMaxSequenceNumber; ++ ++ // We need to turn off pre-fetching of index and filter nodes for ++ // BlockBasedTable ++ if (options_.table_factory->IsInstanceOf( ++ TableFactory::kBlockBasedTableName())) { ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_), ++ /*enable_prefetch=*/false); ++ } ++ ++ // For all other factory implementation ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_)); +} + -+void write(SequenceNumber sequenceNumber, FILE* file) { ++Status RawSstFileReader::SetTableOptionsByMagicNumber( ++ uint64_t table_magic_number) { ++ assert(rep_->table_properties_); ++ if (table_magic_number == kBlockBasedTableMagicNumber || ++ table_magic_number == kLegacyBlockBasedTableMagicNumber) { ++ BlockBasedTableFactory* bbtf = new BlockBasedTableFactory(); ++ // To force tail prefetching, we fake reporting two useful reads of 512KB ++ // from the tail. ++ // It needs at least two data points to warm up the stats. ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ ++ options_.table_factory.reset(bbtf); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based\n"); ++ } ++ ++ auto& props = rep_->table_properties_->user_collected_properties; ++ auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); ++ if (pos != props.end()) { ++ auto index_type_on_file = static_cast( ++ DecodeFixed32(pos->second.c_str())); ++ if (index_type_on_file == ++ BlockBasedTableOptions::IndexType::kHashSearch) { ++ options_.prefix_extractor.reset(NewNoopTransform()); ++ } ++ } ++ } else if (table_magic_number == kPlainTableMagicNumber || ++ table_magic_number == kLegacyPlainTableMagicNumber) { ++ options_.allow_mmap_reads = true; + -+ char b[8]; -+ int idx = 7; -+ while (idx >= 0) { -+ b[idx] = sequenceNumber % 256; -+ sequenceNumber /= 256; -+ idx -= 1; ++ PlainTableOptions plain_table_options; ++ plain_table_options.user_key_len = kPlainTableVariableLength; ++ plain_table_options.bloom_bits_per_key = 0; ++ plain_table_options.hash_table_ratio = 0; ++ plain_table_options.index_sparseness = 1; ++ plain_table_options.huge_page_tlb_size = 0; ++ plain_table_options.encoding_type = kPlain; ++ plain_table_options.full_scan_mode = true; ++ ++ options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: plain table\n"); ++ } ++ } else { ++ char error_msg_buffer[80]; ++ snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1, ++ "Unsupported table magic number --- %lx", ++ (long)table_magic_number); ++ return Status::InvalidArgument(error_msg_buffer); + } -+ fwrite(b, 8, 1, file); ++ ++ return Status::OK(); +} + -+void write(ParsedInternalKey &key, FILE* file) { -+ write(key.user_key, file); -+ write(key.sequence, file); -+ write(static_cast(key.type), file); ++Status RawSstFileReader::SetOldTableOptions() { ++ assert(rep_->table_properties_ == nullptr); ++ options_.table_factory = std::make_shared(); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based(old version)\n"); ++ } ++ ++ return Status::OK(); +} + - Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, -- bool has_from, const std::string& from_key, -- bool has_to, const std::string& to_key, -+ bool has_from, const Slice& from_key, -+ bool has_to, const Slice& to_key, - bool use_from_as_prefix) { - if (!table_reader_) { - return init_result_; -@@ -446,6 +490,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - /*arena=*/nullptr, /*skip_filters=*/false, - TableReaderCaller::kSSTDumpTool); - uint64_t i = 0; -+ - if (has_from) { - InternalKey ikey; - ikey.SetMinPossibleForUserKey(from_key); -@@ -453,6 +498,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - } else { - iter->SeekToFirst(); - } -+ - for (; iter->Valid(); iter->Next()) { - Slice key = iter->key(); - Slice value = iter->value(); -@@ -478,22 +524,19 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - - if (print_kv) { - if (!decode_blob_index_ || ikey.type != kTypeBlobIndex) { -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- value.ToString(output_hex_).c_str()); -+ write(ikey, out_); -+ write(value, out_); - } else { - BlobIndex blob_index; -- - const Status s = blob_index.DecodeFrom(value); - if (!s.ok()) { -- fprintf(stderr, "%s => error decoding blob index\n", -- ikey.DebugString(true, output_hex_).c_str()); -+ write(ikey, err_); -+ write("error decoding blob index", err_); - continue; - } -- -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- blob_index.DebugString(output_hex_).c_str()); -+ write(ikey, out_); -+ std::string v = blob_index.DebugString(output_hex_); -+ write(v, out_); - } - } - } -diff --git a/table/sst_file_dumper.h b/table/sst_file_dumper.h -index 7be876390..768c5b1e2 100644 ---- a/table/sst_file_dumper.h -+++ b/table/sst_file_dumper.h -@@ -22,11 +22,13 @@ class SstFileDumper { - bool verify_checksum, bool output_hex, - bool decode_blob_index, - const EnvOptions& soptions = EnvOptions(), -- bool silent = false); -+ bool silent = false, -+ FILE* out = stdout, -+ FILE* err = stderr); - - Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from, -- const std::string& from_key, bool has_to, -- const std::string& to_key, -+ const Slice& from_key, bool has_to, -+ const Slice& to_key, - bool use_from_as_prefix = false); - - Status ReadTableProperties( -@@ -94,6 +96,8 @@ class SstFileDumper { - ReadOptions read_options_; - InternalKeyComparator internal_comparator_; - std::unique_ptr table_properties_; -+ FILE* out_; -+ FILE* err_; - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc -index 7053366e7..8f248ddf3 100644 ---- a/tools/sst_dump_tool.cc -+++ b/tools/sst_dump_tool.cc -@@ -31,7 +31,7 @@ static const std::vector> - - namespace { - --void print_help(bool to_stderr) { -+void print_help(bool to_stderr, FILE* err_, FILE* out_) { - std::string supported_compressions; - for (CompressionType ct : GetSupportedCompressions()) { - if (!supported_compressions.empty()) { -@@ -43,7 +43,7 @@ void print_help(bool to_stderr) { - supported_compressions += str; - } - fprintf( -- to_stderr ? stderr : stdout, -+ to_stderr ? err_ : out_, - R"(sst_dump --file= [--command=check|scan|raw|recompress|identify] - --file= - Path to SST file or directory containing SST files -@@ -149,7 +149,13 @@ bool ParseIntArg(const char* arg, const std::string arg_name, - } - } // namespace - --int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { -+Slice* AssignSlicePrependedWithLength(const char* buf) { -+ long val = std::stol(buf); -+ return reinterpret_cast(val); ++RawIterator* RawSstFileReader::newIterator( ++ bool has_from, Slice* from, bool has_to, Slice* to) { ++ InternalIterator* iter = rep_->table_reader_->NewIterator( ++ rep_->read_options_, rep_->moptions_.prefix_extractor.get(), ++ /*arena=*/nullptr, /*skip_filters=*/false, ++ TableReaderCaller::kSSTDumpTool); ++ return new RawSstFileIterator(iter, has_from, from, has_to, to); ++ +} ++} // namespace ROCKSDB_NAMESPACE + -+int SSTDumpTool::Run(int argc, char const* const* argv, Options options, -+ FILE* out, FILE* err) { - std::string env_uri, fs_uri; - const char* dir_or_file = nullptr; - uint64_t read_num = std::numeric_limits::max(); -@@ -170,8 +176,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - bool has_compression_level_from = false; - bool has_compression_level_to = false; - bool has_specified_compression_types = false; -- std::string from_key; -- std::string to_key; -+ bool silent = false; -+ Slice* from_key = nullptr; -+ Slice* to_key = nullptr; - std::string block_size_str; - std::string compression_level_from_str; - std::string compression_level_to_str; -@@ -197,7 +204,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - int64_t tmp_val; - - for (int i = 1; i < argc; i++) { -- if (strncmp(argv[i], "--env_uri=", 10) == 0) { -+ if (strncmp(argv[i], "--silent", 8) == 0) { -+ silent = true; -+ } else if (strncmp(argv[i], "--env_uri=", 10) == 0) { - env_uri = argv[i] + 10; - } else if (strncmp(argv[i], "--fs_uri=", 9) == 0) { - fs_uri = argv[i] + 9; -@@ -217,13 +226,13 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - } else if (strncmp(argv[i], "--command=", 10) == 0) { - command = argv[i] + 10; - } else if (strncmp(argv[i], "--from=", 7) == 0) { -- from_key = argv[i] + 7; -+ from_key = AssignSlicePrependedWithLength(argv[i] + 7); - has_from = true; - } else if (strncmp(argv[i], "--to=", 5) == 0) { -- to_key = argv[i] + 5; -+ to_key = AssignSlicePrependedWithLength(argv[i] + 5); - has_to = true; - } else if (strncmp(argv[i], "--prefix=", 9) == 0) { -- from_key = argv[i] + 9; -+ from_key = AssignSlicePrependedWithLength( argv[i] + 9); - use_from_as_prefix = true; - } else if (strcmp(argv[i], "--show_properties") == 0) { - show_properties = true; -@@ -273,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - std::cerr << pik_status.getState() << "\n"; - retc = -1; - } -- fprintf(stdout, "key=%s\n", ikey.DebugString(true, true).c_str()); -+ fprintf(out, "key=%s\n", ikey.DebugString(true, true).c_str()); - return retc; - } else if (ParseIntArg(argv[i], "--compression_level_from=", - "compression_level_from must be numeric", -@@ -288,9 +297,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", -+ fprintf(err, "compression_max_dict_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_bytes = static_cast(tmp_val); -@@ -298,10 +307,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_zstd_max_train_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, -+ fprintf(err, - "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_zstd_max_train_bytes = static_cast(tmp_val); -@@ -309,56 +318,56 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_buffer_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0) { -- fprintf(stderr, -+ fprintf(err, - "compression_max_dict_buffer_bytes must be positive: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_buffer_bytes = static_cast(tmp_val); - } else if (strcmp(argv[i], "--compression_use_zstd_finalize_dict") == 0) { - compression_use_zstd_finalize_dict = true; - } else if (strcmp(argv[i], "--help") == 0) { -- print_help(/*to_stderr*/ false); -+ print_help(/*to_stderr*/ false, err, out); - return 0; - } else if (strcmp(argv[i], "--version") == 0) { - printf("%s\n", GetRocksBuildInfoAsString("sst_dump").c_str()); - return 0; - } else { -- fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "Unrecognized argument '%s'\n\n", argv[i]); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - } - - if(has_compression_level_from && has_compression_level_to) { - if(!has_specified_compression_types || compression_types.size() != 1) { -- fprintf(stderr, "Specify one compression type.\n\n"); -+ fprintf(err, "Specify one compression type.\n\n"); - exit(1); - } - } else if(has_compression_level_from || has_compression_level_to) { -- fprintf(stderr, "Specify both --compression_level_from and " -+ fprintf(err, "Specify both --compression_level_from and " - "--compression_level_to.\n\n"); - exit(1); - } - - if (use_from_as_prefix && has_from) { -- fprintf(stderr, "Cannot specify --prefix and --from\n\n"); -+ fprintf(err, "Cannot specify --prefix and --from\n\n"); - exit(1); - } - - if (input_key_hex) { - if (has_from || use_from_as_prefix) { -- from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key); -+ *from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key -> ToString()); - } - if (has_to) { -- to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key); -+ *to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key->ToString()); - } - } - - if (dir_or_file == nullptr) { -- fprintf(stderr, "file or directory must be specified.\n\n"); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "file or directory must be specified.\n\n"); -+ print_help(/*to_stderr*/ true, err, out); - exit(1); - } - -@@ -373,10 +382,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = Env::CreateFromUri(config_options, env_uri, fs_uri, &options.env, - &env_guard); - if (!s.ok()) { -- fprintf(stderr, "CreateEnvFromUri: %s\n", s.ToString().c_str()); -+ fprintf(err, "CreateEnvFromUri: %s\n", s.ToString().c_str()); - exit(1); -- } else { -- fprintf(stdout, "options.env is %p\n", options.env); -+ } else if (!silent){ -+ fprintf(out, "options.env is %p\n", options.env); - } - } - -@@ -390,7 +399,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = env->FileExists(dir_or_file); - // dir_or_file does not exist - if (!s.ok()) { -- fprintf(stderr, "%s%s: No such file or directory\n", s.ToString().c_str(), -+ fprintf(err, "%s%s: No such file or directory\n", s.ToString().c_str(), - dir_or_file); - return 1; - } -@@ -421,10 +430,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - ROCKSDB_NAMESPACE::SstFileDumper dumper( - options, filename, Temperature::kUnknown, readahead_size, -- verify_checksum, output_hex, decode_blob_index); -+ verify_checksum, output_hex, decode_blob_index, EnvOptions(), -+ silent, out, err); - // Not a valid SST - if (!dumper.getStatus().ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - dumper.getStatus().ToString().c_str()); - continue; - } else { -@@ -433,10 +443,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // where there is at least one valid SST - if (valid_sst_files.size() == 1) { - // from_key and to_key are only used for "check", "scan", or "" -- if (command == "check" || command == "scan" || command == "") { -- fprintf(stdout, "from [%s] to [%s]\n", -- ROCKSDB_NAMESPACE::Slice(from_key).ToString(true).c_str(), -- ROCKSDB_NAMESPACE::Slice(to_key).ToString(true).c_str()); -+ if (!silent && (command == "check" || command == "scan" || -+ command == "")) { -+ fprintf(out, "from [%s] to [%s]\n", -+ from_key->ToString(true).c_str(), -+ to_key->ToString(true).c_str()); - } - } - } -@@ -449,7 +460,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - compression_zstd_max_train_bytes, compression_max_dict_buffer_bytes, - !compression_use_zstd_finalize_dict); - if (!st.ok()) { -- fprintf(stderr, "Failed to recompress: %s\n", st.ToString().c_str()); -+ fprintf(err, "Failed to recompress: %s\n", st.ToString().c_str()); - exit(1); - } - return 0; -@@ -461,10 +472,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - st = dumper.DumpTable(out_filename); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); - exit(1); - } else { -- fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]); -+ fprintf(out, "raw dump written to file %s\n", &out_filename[0]); - } - continue; - } -@@ -473,10 +484,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "" || command == "scan" || command == "check") { - st = dumper.ReadSequential( - command == "scan", read_num > 0 ? (read_num - total_read) : read_num, -- has_from || use_from_as_prefix, from_key, has_to, to_key, -+ has_from || use_from_as_prefix, *from_key, has_to, *to_key, - use_from_as_prefix); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - st.ToString().c_str()); - } - total_read += dumper.GetReadNumber(); -@@ -488,10 +499,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "verify") { - st = dumper.VerifyChecksum(); - if (!st.ok()) { -- fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(), -+ fprintf(err, "%s is corrupted: %s\n", filename.c_str(), - st.ToString().c_str()); - } else { -- fprintf(stdout, "The file is ok\n"); -+ fprintf(out, "The file is ok\n"); - } - continue; - } -@@ -503,15 +514,15 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - table_properties_from_reader; - st = dumper.ReadTableProperties(&table_properties_from_reader); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -- fprintf(stderr, "Try to use initial table properties\n"); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "Try to use initial table properties\n"); - table_properties = dumper.GetInitTableProperties(); - } else { - table_properties = table_properties_from_reader.get(); - } - if (table_properties != nullptr) { - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Table Properties:\n" - "------------------------------\n" - " %s", -@@ -523,18 +534,18 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - total_index_block_size += table_properties->index_size; - total_filter_block_size += table_properties->filter_size; - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Raw user collected properties\n" - "------------------------------\n"); - for (const auto& kv : table_properties->user_collected_properties) { - std::string prop_name = kv.first; - std::string prop_val = Slice(kv.second).ToString(true); -- fprintf(stdout, " # %s: 0x%s\n", prop_name.c_str(), -+ fprintf(out, " # %s: 0x%s\n", prop_name.c_str(), - prop_val.c_str()); - } - } - } else { -- fprintf(stderr, "Reader unexpectedly returned null properties\n"); -+ fprintf(err, "Reader unexpectedly returned null properties\n"); - } - } - } -@@ -555,9 +566,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // Exit with an error state - if (dir) { - fprintf(stdout, "------------------------------\n"); -- fprintf(stderr, "No valid SST files found in %s\n", dir_or_file); -+ fprintf(err, "No valid SST files found in %s\n", dir_or_file); - } else { -- fprintf(stderr, "%s is not a valid SST file\n", dir_or_file); -+ fprintf(err, "%s is not a valid SST file\n", dir_or_file); - } - return 1; - } else { ++#endif // ROCKSDB_LITE diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 8fc4e83e7a1d..f0074e0a1ac9 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; @@ -27,6 +28,7 @@ import java.io.ByteArrayInputStream; import java.io.File; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -56,37 +58,27 @@ private static Stream nativeLibraryDirectoryLocations() { @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @MethodSource("nativeLibraryDirectoryLocations") - public void testNativeLibraryLoader( - String nativeLibraryDirectoryLocation) { + public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throws NativeLibraryNotLoadedException { Map libraryLoadedMap = new HashMap<>(); NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = - mockStatic(NativeLibraryLoader.class, - CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, + CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()) - .thenReturn(loader); - assertTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - assertTrue(NativeLibraryLoader - .isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); + ManagedRawSSTFileReader.loadLibrary(); + assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getResourceStream(anyString())) + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName); + NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, Collections.emptyList()); NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); // Checking if the resource with random was copied to a temp file. - File[] libPath = - new File(nativeLibraryDirectoryLocation == null ? "" : - nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> - name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); + File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && + name.endsWith(NativeLibraryLoader.getLibOsSuffix())); assertNotNull(libPath); assertEquals(1, libPath.length); assertTrue(libPath[0].delete()); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..00816e60d7f2 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.TestUtils; +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test for ManagedRawSSTFileReaderIterator. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +class TestManagedRawSSTFileIterator { + + @TempDir + private Path tempDir; + + private File createSSTFileWithKeys( + TreeMap, String> keys) throws Exception { + File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); + try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); + ManagedOptions managedOptions = new ManagedOptions(); + ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, managedOptions)) { + sstFileWriter.open(file.getAbsolutePath()); + for (Map.Entry, String> entry : keys.entrySet()) { + if (entry.getKey().getValue() == 0) { + sstFileWriter.delete(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8)); + } else { + sstFileWriter.put(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8), + entry.getValue().getBytes(StandardCharsets.UTF_8)); + } + } + sstFileWriter.finish(); + } + return file; + } + + private static Stream keyValueFormatArgs() { + return Stream.of(Arguments.of(Named.of("Key starting with a single quote", "'key%1$d=>"), + Named.of("Value starting with a number ending with a single quote", "%1$dvalue'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number", "%1$dvalue%1$d")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number & containing null character & new line character", + "%1$dvalue\n\0%1$d")), + Arguments.of(Named.of("Key ending with a number & containing a null character", "key\0%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d"))); + } + + @BeforeAll + public static void init() throws NativeLibraryNotLoadedException { + ManagedRawSSTFileReader.loadLibrary(); + } + + + @ParameterizedTest + @MethodSource("keyValueFormatArgs") + public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueFormat) throws Exception { + TreeMap, String> keys = IntStream.range(0, 100).boxed().collect(Collectors.toMap( + i -> Pair.of(String.format(keyFormat, i), i % 2), + i -> i % 2 == 0 ? "" : String.format(valueFormat, i), + (v1, v2) -> v2, + TreeMap::new)); + File file = createSSTFileWithKeys(keys); + try (ManagedOptions options = new ManagedOptions(); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>( + options, file.getAbsolutePath(), 2 * 1024 * 1024)) { + List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() + .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); + for (Optional keyStart : testBounds) { + for (Optional keyEnd : testBounds) { + Map, String> expectedKeys = keys.entrySet().stream() + .filter(e -> keyStart.map(s -> e.getKey().getKey().compareTo(s) >= 0).orElse(true)) + .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0).orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + Optional lowerBound = keyStart.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + Optional upperBound = keyEnd.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + try (ManagedRawSSTFileIterator iterator + = reader.newIterator(Function.identity(), lowerBound.orElse(null), upperBound.orElse(null))) { + while (iterator.hasNext()) { + ManagedRawSSTFileIterator.KeyValue r = iterator.next(); + String key = StringUtils.bytes2String(r.getKey()); + Pair recordKey = Pair.of(key, r.getType()); + assertThat(expectedKeys).containsKey(recordKey); + assertEquals(Optional.ofNullable(expectedKeys.get(recordKey)).orElse(""), + StringUtils.bytes2String(r.getValue())); + expectedKeys.remove(recordKey); + } + assertEquals(0, expectedKeys.size()); + } finally { + lowerBound.ifPresent(ManagedSlice::close); + upperBound.ifPresent(ManagedSlice::close); + } + } + } + } + } +} diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java deleted file mode 100644 index d2796c19fc50..000000000000 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.primitives.Bytes; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.Named; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.TreeMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Test for ManagedSSTDumpIterator. - */ -class TestManagedSSTDumpIterator { - - @TempDir - private Path tempDir; - - private File createSSTFileWithKeys( - TreeMap, String> keys) throws Exception { - File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); - try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); - ManagedOptions managedOptions = new ManagedOptions(); - ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( - envOptions, managedOptions)) { - sstFileWriter.open(file.getAbsolutePath()); - for (Map.Entry, String> entry : keys.entrySet()) { - if (entry.getKey().getValue() == 0) { - sstFileWriter.delete(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8)); - } else { - sstFileWriter.put(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8), - entry.getValue().getBytes(StandardCharsets.UTF_8)); - } - } - sstFileWriter.finish(); - } - return file; - } - - private static Stream keyValueFormatArgs() { - return Stream.of( - Arguments.of( - Named.of("Key starting with a single quote", - "'key%1$d=>"), - Named.of("Value starting with a number ending with a" + - " single quote", "%1$dvalue'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number", "%1$dvalue%1$d") - ), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'")), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number " + - "& containing null character & new line character", - "%1$dvalue\n\0%1$d") - ), - Arguments.of( - Named.of("Key ending with a number & containing" + - " a null character", "key\0%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$dvalue\r%1$d") - ) - ); - } - - private static byte[] getBytes(Integer val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(4); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putInt(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(Long val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(8); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putLong(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(String val) { - byte[] b = new byte[val.length()]; - for (int i = 0; i < val.length(); i++) { - b[i] = (byte) val.charAt(i); - } - return b; - } - - private static Stream invalidPipeInputStreamBytes() { - return Stream.of( - Arguments.of(Named.of("Invalid 3 byte integer", - new byte[]{0, 0, 0})), - Arguments.of(Named.of("Invalid 2 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid 1 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid key name length", - Bytes.concat(getBytes(4), getBytes("key")))), - Arguments.of(Named.of("Invalid Unsigned Long length", - Bytes.concat(getBytes(4), getBytes("key1"), - new byte[]{0, 0}))), - Arguments.of(Named.of("Invalid Sequence number", - Bytes.concat(getBytes(4), getBytes("key1")))), - Arguments.of(Named.of("Invalid Type", - Bytes.concat(getBytes(4), getBytes("key1"), - getBytes(4L)))), - Arguments.of(Named.of("Invalid Value", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(0)))), - Arguments.of(Named.of("Invalid Value length", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(1), getBytes(6), - getBytes("val")))) - ); - } - - @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) - @ParameterizedTest - @MethodSource("keyValueFormatArgs") - @Unhealthy("HDDS-9274") - public void testSSTDumpIteratorWithKeyFormat(String keyFormat, - String valueFormat) - throws Exception { - assumeTrue(NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - - TreeMap, String> keys = - IntStream.range(0, 100).boxed().collect( - Collectors.toMap( - i -> Pair.of(String.format(keyFormat, i), i % 2), - i -> i % 2 == 0 ? "" : String.format(valueFormat, i), - (v1, v2) -> v2, - TreeMap::new)); - File file = createSSTFileWithKeys(keys); - ExecutorService executorService = - new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1), - new ThreadPoolExecutor.CallerRunsPolicy()); - ManagedSSTDumpTool tool = new ManagedSSTDumpTool(executorService, 8192); - List> testBounds = TestUtils.getTestingBounds( - keys.keySet().stream().collect(Collectors.toMap(Pair::getKey, - Pair::getValue, (v1, v2) -> v1, TreeMap::new))); - for (Optional keyStart : testBounds) { - for (Optional keyEnd : testBounds) { - Map, String> expectedKeys = keys.entrySet() - .stream().filter(e -> keyStart.map(s -> e.getKey().getKey() - .compareTo(s) >= 0).orElse(true)) - .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - Optional lowerBound = keyStart - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - Optional upperBound = keyEnd - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - try (ManagedOptions options = new ManagedOptions(); - ManagedSSTDumpIterator iterator = - new ManagedSSTDumpIterator(tool, - file.getAbsolutePath(), options, lowerBound.orElse(null), - upperBound.orElse(null)) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - } - ) { - while (iterator.hasNext()) { - ManagedSSTDumpIterator.KeyValue r = iterator.next(); - String key = new String(r.getKey(), StandardCharsets.UTF_8); - Pair recordKey = Pair.of(key, r.getType()); - assertThat(expectedKeys).containsKey(recordKey); - assertEquals(Optional.ofNullable(expectedKeys - .get(recordKey)).orElse(""), - new String(r.getValue(), StandardCharsets.UTF_8)); - expectedKeys.remove(recordKey); - } - assertEquals(0, expectedKeys.size()); - } finally { - lowerBound.ifPresent(ManagedSlice::close); - upperBound.ifPresent(ManagedSlice::close); - } - } - } - executorService.shutdown(); - } - - - @ParameterizedTest - @MethodSource("invalidPipeInputStreamBytes") - public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes) - throws ExecutionException, - InterruptedException, IOException { - ByteArrayInputStream byteArrayInputStream = - new ByteArrayInputStream(inputBytes); - ManagedSSTDumpTool tool = mock(ManagedSSTDumpTool.class); - File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile(); - Future future = mock(Future.class); - when(future.isDone()).thenReturn(false); - when(future.get()).thenReturn(0); - when(tool.run(any(Map.class), - any(ManagedOptions.class))) - .thenReturn(new ManagedSSTDumpTool.SSTDumpToolTask(future, - byteArrayInputStream)); - try (ManagedOptions options = new ManagedOptions()) { - assertThrows(IllegalStateException.class, - () -> new ManagedSSTDumpIterator( - tool, file.getAbsolutePath(), options) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - }); - } - } -} diff --git a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties new file mode 100644 index 000000000000..959da047fb7f --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#

+# http://www.apache.org/licenses/LICENSE-2.0 +#

+# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +name=PropertiesConfig + +# Checks for config change periodically and reloads +monitorInterval=5 + +filter=read, write +# filter.read.onMatch = DENY avoids logging all READ events +# filter.read.onMatch = ACCEPT permits logging all READ events +# The above two settings ignore the log levels in configuration +# filter.read.onMatch = NEUTRAL permits logging of only those READ events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.read.type = MarkerFilter +filter.read.marker = READ +filter.read.onMatch = NEUTRAL +filter.read.onMismatch = NEUTRAL + +# filter.write.onMatch = DENY avoids logging all WRITE events +# filter.write.onMatch = ACCEPT permits logging all WRITE events +# The above two settings ignore the log levels in configuration +# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.write.type = MarkerFilter +filter.write.marker = WRITE +filter.write.onMatch = NEUTRAL +filter.write.onMismatch = NEUTRAL + +# Log Levels are organized from most specific to least: +# OFF (most specific, no logging) +# FATAL (most specific, little data) +# ERROR +# WARN +# INFO +# DEBUG +# TRACE (least specific, a lot of data) +# ALL (least specific, all data) + +appenders = console, audit +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %-5level | %c{1} | %msg%n + +appender.audit.type = File +appender.audit.name = AUDITLOG +appender.audit.fileName=audit.log +appender.audit.layout.type=PatternLayout +appender.audit.layout.pattern= %-5level | %c{1} | %C | %msg%n + +loggers=audit +logger.audit.type=AsyncLogger +logger.audit.name=OMAudit +logger.audit.level = INFO +logger.audit.appenderRefs = audit +logger.audit.appenderRef.file.ref = AUDITLOG + +rootLogger.level = INFO +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties new file mode 100644 index 000000000000..398786689af3 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java index be949cd4fbdd..913eeb73384a 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java @@ -20,12 +20,12 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.util.ClosableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; import org.rocksdb.SstFileReader; @@ -37,9 +37,9 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; +import java.util.function.Function; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -91,111 +91,96 @@ public long getEstimatedTotalKeys() throws RocksDBException { } public Stream getKeyStream(String lowerBound, - String upperBound) throws RocksDBException { + String upperBound) throws RocksDBException { // TODO: [SNAPSHOT] Check if default Options and ReadOptions is enough. - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - private ManagedOptions options; - private ReadOptions readOptions; - - private ManagedSlice lowerBoundSLice; - - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - this.readOptions = new ManagedReadOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSLice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - readOptions.setIterateLowerBound(lowerBoundSLice); - } - - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - readOptions.setIterateUpperBound(upperBoundSlice); - } - } + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + private ManagedOptions options; + private ReadOptions readOptions; + + private ManagedSlice lowerBoundSLice; + + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + this.readOptions = new ManagedReadOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSLice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + readOptions.setIterateLowerBound(lowerBoundSLice); + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException { - return new ManagedSstFileIterator(file, options, readOptions) { - @Override - protected String getIteratorValue( - SstFileReaderIterator iterator) { - return new String(iterator.key(), UTF_8); - } - }; - } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + readOptions.setIterateUpperBound(upperBoundSlice); + } + } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException { + return new ManagedSstFileIterator(file, options, readOptions) { @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - readOptions.close(); - IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + protected String getIteratorValue( + SstFileReaderIterator iterator) { + return new String(iterator.key(), UTF_8); } }; + } + + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + readOptions.close(); + IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - public Stream getKeyStreamWithTombstone( - ManagedSSTDumpTool sstDumpTool, String lowerBound, - String upperBound) throws RocksDBException { - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - //TODO: [SNAPSHOT] Check if default Options is enough. - private ManagedOptions options; - private ManagedSlice lowerBoundSlice; - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - } - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - } - } + public Stream getKeyStreamWithTombstone(String lowerBound, String upperBound) throws RocksDBException { + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + //TODO: [SNAPSHOT] Check if default Options is enough. + private ManagedOptions options; + private ManagedSlice lowerBoundSlice; + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + } + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws IOException { - return new ManagedSSTDumpIterator(sstDumpTool, file, - options, lowerBoundSlice, upperBoundSlice) { - @Override - protected String getTransformedValue(Optional value) { - return value.map(v -> StringUtils.bytes2String(v.getKey())) - .orElse(null); - } - }; - } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) { + return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, + keyValue -> StringUtils.bytes2String(keyValue.getKey())); + } - @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); - } - }; + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - private abstract static class ManagedSstFileIterator implements - ClosableIterator { - private SstFileReader fileReader; - private SstFileReaderIterator fileReaderIterator; + private abstract static class ManagedSstFileIterator implements ClosableIterator { + private final SstFileReader fileReader; + private final SstFileReaderIterator fileReaderIterator; - ManagedSstFileIterator(String path, ManagedOptions options, - ReadOptions readOptions) - throws RocksDBException { + ManagedSstFileIterator(String path, ManagedOptions options, ReadOptions readOptions) throws RocksDBException { this.fileReader = new SstFileReader(options); this.fileReader.open(path); this.fileReaderIterator = fileReader.newIterator(readOptions); @@ -223,8 +208,35 @@ public String next() { } } - private abstract static class MultipleSstFileIterator implements - ClosableIterator { + private static class ManagedRawSstFileIterator implements ClosableIterator { + private final ManagedRawSSTFileReader fileReader; + private final ManagedRawSSTFileIterator fileReaderIterator; + private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; + + ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, + Function keyValueFunction) { + this.fileReader = new ManagedRawSSTFileReader<>(options, path, READ_AHEAD_SIZE); + this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound); + } + + @Override + public void close() { + this.fileReaderIterator.close(); + this.fileReader.close(); + } + + @Override + public boolean hasNext() { + return fileReaderIterator.hasNext(); + } + + @Override + public String next() { + return fileReaderIterator.next(); + } + } + + private abstract static class MultipleSstFileIterator implements ClosableIterator { private final Iterator fileNameIterator; @@ -238,16 +250,13 @@ private MultipleSstFileIterator(Collection files) { protected abstract void init(); - protected abstract ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException, - IOException; + protected abstract ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException, IOException; @Override public boolean hasNext() { try { do { - if (Objects.nonNull(currentFileIterator) && - currentFileIterator.hasNext()) { + if (Objects.nonNull(currentFileIterator) && currentFileIterator.hasNext()) { return true; } } while (moveToNextFile()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java new file mode 100644 index 000000000000..8031eca7b0db --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ozone.graph; + +import com.google.common.graph.MutableGraph; +import org.apache.ozone.rocksdiff.CompactionNode; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +/** + * This class is used for testing the PrintableGraph class. + * It contains methods to test the generation and printing of graphs with different types. + */ +@ExtendWith(MockitoExtension.class) +public class TestPrintableGraph { + @TempDir + private Path dir; + + @Mock + private MutableGraph mutableGraph; + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintNoGraphMessage(PrintableGraph.GraphType graphType) { + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + try { + graph.generateImage(dir.resolve(graphType.name()).toString()); + } catch (IOException e) { + assertEquals("Graph is empty.", e.getMessage()); + } + } + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintActualGraph(PrintableGraph.GraphType graphType) throws IOException { + Set nodes = Stream.of( + new CompactionNode("fileName1", + 100, 100, "startKey1", "endKey1", "columnFamily1"), + new CompactionNode("fileName2", + 200, 200, "startKey2", "endKey2", null), + new CompactionNode("fileName3", + 300, 300, null, "endKey3", "columnFamily3"), + new CompactionNode("fileName4", + 400, 400, "startKey4", null, "columnFamily4") + ).collect(Collectors.toSet()); + when(mutableGraph.nodes()).thenReturn(nodes); + + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + graph.generateImage(dir.resolve(graphType.name()).toString()); + + assertTrue(Files.exists(dir.resolve(graphType.name())), "Graph hasn't been generated"); + } +} diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java index edc491e7c8da..1031992f3b5d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java @@ -17,18 +17,15 @@ */ package org.apache.ozone.rocksdb.util; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.TestUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -42,10 +39,6 @@ import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -164,51 +157,38 @@ public void testGetKeyStream(int numberOfFiles) @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @ValueSource(ints = {0, 1, 2, 3, 7, 10}) - @Unhealthy("HDDS-9274") public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException, IOException, NativeLibraryNotLoadedException { - assumeTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + assumeTrue(ManagedRawSSTFileReader.loadLibrary()); Pair, List> data = createDummyData(numberOfFiles); List files = data.getRight(); SortedMap keys = data.getLeft(); - ExecutorService executorService = new ThreadPoolExecutor(0, - 2, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), new ThreadPoolExecutor.DiscardPolicy()); - ManagedSSTDumpTool sstDumpTool = - new ManagedSSTDumpTool(executorService, 256); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and // validating the keys read from the sst file. List> bounds = TestUtils.getTestingBounds(keys); - try { - for (Optional lowerBound : bounds) { - for (Optional upperBound : bounds) { - // Calculating the expected keys which lie in the given boundary. - Map keysInBoundary = - keys.entrySet().stream().filter(entry -> lowerBound - .map(l -> entry.getKey().compareTo(l) >= 0) - .orElse(true) && - upperBound.map(u -> entry.getKey().compareTo(u) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, - Map.Entry::getValue)); - try (Stream keyStream = new SstFileSetReader(files) - .getKeyStreamWithTombstone(sstDumpTool, lowerBound.orElse(null), - upperBound.orElse(null))) { - keyStream.forEach( - key -> { - assertNotNull(keysInBoundary.remove(key)); - }); - } - assertEquals(0, keysInBoundary.size()); + for (Optional lowerBound : bounds) { + for (Optional upperBound : bounds) { + // Calculating the expected keys which lie in the given boundary. + Map keysInBoundary = + keys.entrySet().stream().filter(entry -> lowerBound + .map(l -> entry.getKey().compareTo(l) >= 0) + .orElse(true) && + upperBound.map(u -> entry.getKey().compareTo(u) < 0) + .orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, + Map.Entry::getValue)); + try (Stream keyStream = new SstFileSetReader(files) + .getKeyStreamWithTombstone(lowerBound.orElse(null), + upperBound.orElse(null))) { + keyStream.forEach( + key -> { + assertNotNull(keysInBoundary.remove(key)); + }); } + assertEquals(0, keysInBoundary.size()); } - } finally { - executorService.shutdown(); } } } diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml index 50f349186089..dc08720c9687 100644 --- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml @@ -51,4 +51,9 @@ + + + + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java new file mode 100644 index 000000000000..0778b9a30dc3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import com.fasterxml.jackson.core.JsonEncoding; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Set; +import javax.management.AttributeNotFoundException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.ReflectionException; +import javax.management.RuntimeErrorException; +import javax.management.RuntimeMBeanException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.TabularData; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class used to fetch metrics from MBeanServer. + */ +public class FetchMetrics { + private static final Logger LOG = LoggerFactory.getLogger(FetchMetrics.class); + private transient MBeanServer mBeanServer; + private transient JsonFactory jsonFactory; + + public FetchMetrics() { + this.mBeanServer = ManagementFactory.getPlatformMBeanServer(); + this.jsonFactory = new JsonFactory(); + } + + public String getMetrics(String qry) { + try { + JsonGenerator jg = null; + ByteArrayOutputStream opStream = new ByteArrayOutputStream(); + + try { + jg = this.jsonFactory.createGenerator(opStream, JsonEncoding.UTF8); + jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + jg.useDefaultPrettyPrinter(); + jg.writeStartObject(); + if (qry == null) { + qry = "*:*"; + } + this.listBeans(jg, new ObjectName(qry)); + } finally { + if (jg != null) { + jg.close(); + } + } + return new String(opStream.toByteArray(), StandardCharsets.UTF_8); + } catch (IOException | MalformedObjectNameException ex) { + LOG.error("Caught an exception while processing getMetrics request", ex); + } + return null; + } + + private void listBeans(JsonGenerator jg, ObjectName qry) + throws IOException { + LOG.debug("Listing beans for " + qry); + Set names = null; + names = this.mBeanServer.queryNames(qry, (QueryExp) null); + jg.writeArrayFieldStart("beans"); + Iterator it = names.iterator(); + + while (it.hasNext()) { + ObjectName oname = (ObjectName) it.next(); + String code = ""; + + MBeanInfo minfo; + try { + minfo = this.mBeanServer.getMBeanInfo(oname); + code = minfo.getClassName(); + String prs = ""; + + try { + if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { + prs = "modelerType"; + code = (String) this.mBeanServer.getAttribute(oname, prs); + } + } catch (AttributeNotFoundException | MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", ex); + } + } catch (InstanceNotFoundException var17) { + continue; + } catch (IntrospectionException | ReflectionException ex) { + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, ex); + continue; + } + jg.writeStartObject(); + jg.writeStringField("name", oname.toString()); + jg.writeStringField("modelerType", code); + MBeanAttributeInfo[] attrs = minfo.getAttributes(); + for (int i = 0; i < attrs.length; ++i) { + this.writeAttribute(jg, oname, attrs[i]); + } + jg.writeEndObject(); + } + jg.writeEndArray(); + } + + private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException { + if (attr.isReadable()) { + String attName = attr.getName(); + if (!"modelerType".equals(attName)) { + if (attName.indexOf("=") < 0 && attName.indexOf(":") < 0 && attName.indexOf(" ") < 0) { + Object value = null; + + try { + value = this.mBeanServer.getAttribute(oname, attName); + } catch (RuntimeMBeanException var7) { + if (var7.getCause() instanceof UnsupportedOperationException) { + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } else { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } + return; + } catch (RuntimeErrorException var8) { + LOG.error("getting attribute {} of {} threw an exception", new Object[]{attName, oname, var8}); + return; + } catch (MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", ex); + return; + } catch (AttributeNotFoundException | InstanceNotFoundException ex) { + return; + } + this.writeAttribute(jg, attName, value); + } + } + } + } + + private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException { + jg.writeFieldName(attName); + this.writeObject(jg, value); + } + + private void writeObject(JsonGenerator jg, Object value) throws IOException { + if (value == null) { + jg.writeNull(); + } else { + Class c = value.getClass(); + Object entry; + if (c.isArray()) { + jg.writeStartArray(); + int len = Array.getLength(value); + + for (int j = 0; j < len; ++j) { + entry = Array.get(value, j); + this.writeObject(jg, entry); + } + + jg.writeEndArray(); + } else if (value instanceof Number) { + Number n = (Number) value; + jg.writeNumber(n.toString()); + } else if (value instanceof Boolean) { + Boolean b = (Boolean) value; + jg.writeBoolean(b); + } else if (value instanceof CompositeData) { + CompositeData cds = (CompositeData) value; + CompositeType comp = cds.getCompositeType(); + Set keys = comp.keySet(); + jg.writeStartObject(); + Iterator var7 = keys.iterator(); + + while (var7.hasNext()) { + String key = (String) var7.next(); + this.writeAttribute(jg, key, cds.get(key)); + } + + jg.writeEndObject(); + } else if (value instanceof TabularData) { + TabularData tds = (TabularData) value; + jg.writeStartArray(); + Iterator var14 = tds.values().iterator(); + + while (var14.hasNext()) { + entry = var14.next(); + this.writeObject(jg, entry); + } + jg.writeEndArray(); + } else { + jg.writeString(value.toString()); + } + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 7b5cbe9f21fc..f47abe65befd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -342,10 +342,13 @@ private static void blockTillTaskStop(Thread balancingThread) { // NOTE: join should be called outside the lock in hierarchy // to avoid locking others waiting // wait for balancingThread to die with interrupt - balancingThread.interrupt(); LOG.info("Container Balancer waiting for {} to stop", balancingThread); try { - balancingThread.join(); + while (balancingThread.isAlive()) { + // retry interrupt every 5ms to avoid waiting when thread is sleeping + balancingThread.interrupt(); + balancingThread.join(5); + } } catch (InterruptedException exception) { Thread.currentThread().interrupt(); } @@ -383,6 +386,11 @@ public void saveConfiguration(ContainerBalancerConfiguration configuration, .build()); } + @VisibleForTesting + public ContainerBalancerConfiguration getConfig() { + return this.config; + } + private void validateConfiguration(ContainerBalancerConfiguration conf) throws InvalidContainerBalancerConfigurationException { // maxSizeEnteringTarget and maxSizeLeavingSource should by default be diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index 7e2ba2fd0125..e275d345a5a7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -338,6 +338,10 @@ public Duration getMoveReplicationTimeout() { return Duration.ofMillis(moveReplicationTimeout); } + public void setMoveReplicationTimeout(Duration duration) { + this.moveReplicationTimeout = duration.toMillis(); + } + public void setMoveReplicationTimeout(long millis) { this.moveReplicationTimeout = millis; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 6350c3c76194..684df784c279 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -152,11 +152,16 @@ public boolean canSizeLeaveSource(DatanodeDetails source, long size) { if (sizeLeavingNode.containsKey(source)) { long sizeLeavingAfterMove = sizeLeavingNode.get(source) + size; //size can be moved out of source datanode only when the following - //two condition are met. - //1 sizeLeavingAfterMove does not succeed the configured + //three conditions are met. + //1 size should be greater than zero bytes + //2 sizeLeavingAfterMove does not succeed the configured // MaxSizeLeavingTarget - //2 after subtracting sizeLeavingAfterMove, the usage is bigger + //3 after subtracting sizeLeavingAfterMove, the usage is bigger // than or equal to lowerLimit + if (size <= 0) { + LOG.debug("{} bytes container cannot leave datanode {}", size, source.getUuidString()); + return false; + } if (sizeLeavingAfterMove > config.getMaxSizeLeavingSource()) { LOG.debug("{} bytes cannot leave datanode {} because 'size.leaving" + ".source.max' limit is {} and {} bytes have already left.", diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index a3661243be69..32310ef9e7bf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -294,6 +294,7 @@ public synchronized void start() { if (!isRunning()) { LOG.info("Starting Replication Monitor Thread."); running = true; + metrics = ReplicationManagerMetrics.create(this); if (rmConf.isLegacyEnabled()) { legacyReplicationManager.setMetrics(metrics); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java index 5c3ee4e29aec..eb75db9bd504 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java @@ -235,10 +235,15 @@ public ReplicationManagerMetrics(ReplicationManager manager) { } public static ReplicationManagerMetrics create(ReplicationManager manager) { - return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, - "SCM Replication manager (closed container replication) related " - + "metrics", - new ReplicationManagerMetrics(manager)); + ReplicationManagerMetrics replicationManagerMetrics = (ReplicationManagerMetrics) + DefaultMetricsSystem.instance().getSource(METRICS_SOURCE_NAME); + if (replicationManagerMetrics == null) { + return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, + "SCM Replication manager (closed container replication) related " + + "metrics", + new ReplicationManagerMetrics(manager)); + } + return replicationManagerMetrics; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java index 15163bf3e6a6..9d65eae06b15 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java @@ -17,12 +17,13 @@ package org.apache.hadoop.hdds.scm.ha; -import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisResponseProto; import org.apache.hadoop.hdds.scm.ha.io.CodecFactory; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; /** * Represents the response from RatisServer. @@ -72,13 +73,11 @@ public static Message encode(final Object result) } final Class type = result.getClass(); - final ByteString value = CodecFactory.getCodec(type).serialize(result); - final SCMRatisResponseProto response = SCMRatisResponseProto.newBuilder() - .setType(type.getName()).setValue(value).build(); - return Message.valueOf( - org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom( - response.toByteArray())); + .setType(type.getName()) + .setValue(CodecFactory.getCodec(type).serialize(result)) + .build(); + return Message.valueOf(UnsafeByteOperations.unsafeWrap(response.toByteString().asReadOnlyByteBuffer())); } public static SCMRatisResponse decode(RaftClientReply reply) @@ -87,14 +86,13 @@ public static SCMRatisResponse decode(RaftClientReply reply) return new SCMRatisResponse(reply.getException()); } - final byte[] response = reply.getMessage().getContent().toByteArray(); + final ByteString response = reply.getMessage().getContent(); - if (response.length == 0) { + if (response.isEmpty()) { return new SCMRatisResponse(); } - final SCMRatisResponseProto responseProto = SCMRatisResponseProto - .parseFrom(response); + final SCMRatisResponseProto responseProto = SCMRatisResponseProto.parseFrom(response.toByteArray()); try { final Class type = ReflectionUtil.getClass(responseProto.getType()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index a5583b48b107..9d9bf07fda3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -440,7 +440,7 @@ public void close() throws IOException { transactionBuffer.close(); HadoopExecutors. shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - } else { + } else if (!scm.isStopped()) { scm.shutDown("scm statemachine is closed by ratis, terminate SCM"); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java index fbfbb49c2521..7b10f60a5755 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java @@ -35,6 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable { void stopMonitoring(DatanodeDetails dn); Set getTrackedNodes(); void setMetrics(NodeDecommissionMetrics metrics); - Map> getContainersReplicatedOnNode(DatanodeDetails dn) + Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index d7975ff1e58e..23bf41dc83e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -494,7 +494,8 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) return underReplicated == 0 && unclosed == 0; } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) { + @Override + public Map> getContainersPendingReplication(DatanodeDetails dn) { Iterator iterator = trackedNodes.iterator(); while (iterator.hasNext()) { TrackedNode trackedNode = iterator.next(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 7893e90812dc..ab296fc52bf8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -354,5 +354,4 @@ public int hashCode() { public boolean equals(Object obj) { return super.equals(obj); } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 38e59b89e767..42a43ad589d8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -21,10 +21,14 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -42,6 +46,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -56,6 +61,7 @@ public class NodeDecommissionManager { private final DatanodeAdminMonitor monitor; private final NodeManager nodeManager; + private ContainerManager containerManager; private final SCMContext scmContext; private final boolean useHostnames; @@ -252,10 +258,11 @@ private boolean validateDNPortMatch(int port, DatanodeDetails dn) { return false; } - public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, + public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, ContainerManager cm, SCMContext scmContext, EventPublisher eventQueue, ReplicationManager rm) { this.nodeManager = nm; + this.containerManager = cm; this.scmContext = scmContext; executor = Executors.newScheduledThreadPool(1, @@ -294,9 +301,9 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, TimeUnit.SECONDS); } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) + public Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException { - return getMonitor().getContainersReplicatedOnNode(dn); + return getMonitor().getContainersPendingReplication(dn); } @VisibleForTesting @@ -305,9 +312,21 @@ public DatanodeAdminMonitor getMonitor() { } public synchronized List decommissionNodes( - List nodes) { + List nodes, boolean force) { List errors = new ArrayList<>(); List dns = mapHostnamesToDatanodes(nodes, errors); + // add check for fail-early if force flag is not set + if (!force) { + LOG.info("Force flag = {}. Checking if decommission is possible for dns: {}", force, dns); + boolean decommissionPossible = checkIfDecommissionPossible(dns, errors); + if (!decommissionPossible) { + LOG.error("Cannot decommission nodes as sufficient node are not available."); + errors.add(new DatanodeAdminError("AllHosts", "Sufficient nodes are not available.")); + return errors; + } + } else { + LOG.info("Force flag = {}. Skip checking if decommission is possible for dns: {}", force, dns); + } for (DatanodeDetails dn : dns) { try { startDecommission(dn); @@ -368,6 +387,61 @@ public synchronized void startDecommission(DatanodeDetails dn) } } + private synchronized boolean checkIfDecommissionPossible(List dns, List errors) { + int numDecom = dns.size(); + List validDns = new ArrayList<>(dns); + int inServiceTotal = nodeManager.getNodeCount(NodeStatus.inServiceHealthy()); + for (DatanodeDetails dn : dns) { + try { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.IN_SERVICE) { + numDecom--; + validDns.remove(dn); + } + } catch (NodeNotFoundException ex) { + numDecom--; + validDns.remove(dn); + } + } + + for (DatanodeDetails dn : validDns) { + Set containers; + try { + containers = nodeManager.getContainers(dn); + } catch (NodeNotFoundException ex) { + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + + "decommission it", dn.getHostName()); + continue; // ignore the DN and continue to next one + } + + for (ContainerID cid : containers) { + ContainerInfo cif; + try { + cif = containerManager.getContainer(cid); + } catch (ContainerNotFoundException ex) { + LOG.warn("Could not find container info for container {}.", cid); + continue; // ignore the container and continue to next one + } + synchronized (cif) { + if (cif.getState().equals(HddsProtos.LifeCycleState.DELETED) || + cif.getState().equals(HddsProtos.LifeCycleState.DELETING)) { + continue; + } + int reqNodes = cif.getReplicationConfig().getRequiredNodes(); + if ((inServiceTotal - numDecom) < reqNodes) { + LOG.info("Cannot decommission nodes. Tried to decommission {} nodes of which valid nodes = {}. " + + "Cluster state: In-service nodes = {}, nodes required for replication = {}. " + + "Failing due to datanode : {}, container : {}", + dns.size(), numDecom, inServiceTotal, reqNodes, dn, cid); + return false; + } + } + } + } + return true; + } + public synchronized List recommissionNodes( List nodes) { List errors = new ArrayList<>(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java index 7b1d6dd27d3a..3aff2f456e4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java @@ -261,5 +261,4 @@ public int compareTo(NodeStatus o) { } return order; } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java deleted file mode 100644 index c0f46f15fe20..000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .NO_SUCH_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ContainerMap extends Node2ObjectsMap { - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ContainerMap() { - super(); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - public Set getContainers(UUID datanode) { - return getObjects(datanode); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - @Override - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - super.insertNewDatanode(datanodeID, containerIDs); - } - - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param containers - Set of Containers tht is present on DN. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void setContainersForDatanode(UUID datanodeID, - Set containers) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(containers); - if (dn2ObjectMap - .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) - == null) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - } - - @VisibleForTesting - @Override - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java index 6533cb807642..35107829f883 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java @@ -18,10 +18,14 @@ package org.apache.hadoop.hdds.scm.node.states; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -34,11 +38,13 @@ *

TODO: this information needs to be regenerated from pipeline reports * on SCM restart */ -public class Node2PipelineMap extends Node2ObjectsMap { +public class Node2PipelineMap { + private final Map> dn2PipelineMap = new ConcurrentHashMap<>(); - /** Constructs a Node2PipelineMap Object. */ + /** + * Constructs a Node2PipelineMap Object. + */ public Node2PipelineMap() { - super(); } /** @@ -47,17 +53,19 @@ public Node2PipelineMap() { * @param datanode - UUID * @return Set of pipelines or Null. */ - public Set getPipelines(UUID datanode) { - return getObjects(datanode); + public Set getPipelines(@Nonnull UUID datanode) { + final Set s = dn2PipelineMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** * Return 0 if there are no pipelines associated with this datanode ID. + * * @param datanode - UUID * @return Number of pipelines or 0. */ public int getPipelinesCount(UUID datanode) { - return getObjects(datanode).size(); + return getPipelines(datanode).size(); } /** @@ -65,18 +73,18 @@ public int getPipelinesCount(UUID datanode) { * * @param pipeline Pipeline to be added */ - public synchronized void addPipeline(Pipeline pipeline) { + public void addPipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) + dn2PipelineMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) .add(pipeline.getId()); } } - public synchronized void removePipeline(Pipeline pipeline) { + public void removePipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfPresent(dnId, + dn2PipelineMap.computeIfPresent(dnId, (k, v) -> { v.remove(pipeline.getId()); return v; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 8336bce5eae7..163f42351032 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -244,7 +243,6 @@ private List filterPipelineEngagement() { getPipelineStateManager(), d))) .filter(d -> (d.getPipelines() >= getNodeManager().pipelineLimit(d.getDn()))) - .sorted(Comparator.comparingInt(DnWithPipelines::getPipelines)) .map(d -> d.getDn()) .collect(Collectors.toList()); return excluded; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index 0914cdd90b22..e77e2aebb31f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.RatisUtil; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -159,6 +161,10 @@ private SCMBlockLocationResponse processMessage( request.getSortDatanodesRequest(), request.getVersion() )); break; + case GetClusterTree: + response.setGetClusterTreeResponse( + getClusterTree(request.getVersion())); + break; default: // Should never happen throw new IOException("Unknown Operation " + request.getCmdType() + @@ -276,4 +282,13 @@ public SortDatanodesResponseProto sortDatanodes( throw new ServiceException(ex); } } + + public GetClusterTreeResponseProto getClusterTree(int clientVersion) + throws IOException { + GetClusterTreeResponseProto.Builder resp = + GetClusterTreeResponseProto.newBuilder(); + InnerNode clusterTree = impl.getNetworkTopology(); + resp.setClusterTree(clusterTree.toProtobuf(clientVersion).getInnerNode()); + return resp.build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index f402b9309fe4..16a8cbd5a4f5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -72,6 +72,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; @@ -714,6 +716,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionScmResponse(decommissionScm( request.getDecommissionScmRequest())) .build(); + case GetMetrics: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetMetricsResponse(getMetrics(request.getGetMetricsRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -1099,6 +1107,12 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxSizeToMovePerIterationInGB = Optional.empty(); Optional maxSizeEnteringTargetInGB = Optional.empty(); Optional maxSizeLeavingSourceInGB = Optional.empty(); + Optional balancingInterval = Optional.empty(); + Optional moveTimeout = Optional.empty(); + Optional moveReplicationTimeout = Optional.empty(); + Optional networkTopologyEnable = Optional.empty(); + Optional includeNodes = Optional.empty(); + Optional excludeNodes = Optional.empty(); if (request.hasThreshold()) { threshold = Optional.of(request.getThreshold()); @@ -1124,19 +1138,47 @@ public StartContainerBalancerResponseProto startContainerBalancer( maxSizeToMovePerIterationInGB = Optional.of(request.getMaxSizeToMovePerIterationInGB()); } + if (request.hasMaxSizeEnteringTargetInGB()) { maxSizeEnteringTargetInGB = Optional.of(request.getMaxSizeEnteringTargetInGB()); } + if (request.hasMaxSizeLeavingSourceInGB()) { maxSizeLeavingSourceInGB = Optional.of(request.getMaxSizeLeavingSourceInGB()); } + if (request.hasBalancingInterval()) { + balancingInterval = Optional.of(request.getBalancingInterval()); + } + + if (request.hasMoveTimeout()) { + moveTimeout = Optional.of(request.getMoveTimeout()); + } + + if (request.hasMoveReplicationTimeout()) { + moveReplicationTimeout = Optional.of(request.getMoveReplicationTimeout()); + } + + if (request.hasNetworkTopologyEnable()) { + networkTopologyEnable = Optional.of(request.getNetworkTopologyEnable()); + } + + if (request.hasIncludeNodes()) { + includeNodes = Optional.of(request.getIncludeNodes()); + } + + if (request.hasExcludeNodes()) { + excludeNodes = Optional.of(request.getExcludeNodes()); + } + return impl.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } public StopContainerBalancerResponseProto stopContainerBalancer( @@ -1157,7 +1199,7 @@ public ContainerBalancerStatusResponseProto getContainerBalancerStatus( public DecommissionNodesResponseProto decommissionNodes( DecommissionNodesRequestProto request) throws IOException { List errors = - impl.decommissionNodes(request.getHostsList()); + impl.decommissionNodes(request.getHostsList(), request.getForce()); DecommissionNodesResponseProto.Builder response = DecommissionNodesResponseProto.newBuilder(); for (DatanodeAdminError e : errors) { @@ -1287,4 +1329,8 @@ public DecommissionScmResponseProto decommissionScm( return impl.decommissionScm( request.getScmId()); } + + public GetMetricsResponseProto getMetrics(GetMetricsRequestProto request) throws IOException { + return GetMetricsResponseProto.newBuilder().setMetricsJson(impl.getMetrics(request.getQuery())).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index 80b8257c40b2..02bc10ba6e40 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; /** * This class is used for maintaining SafeMode metric information, which can @@ -33,16 +34,16 @@ public class SafeModeMetrics { // These all values will be set to some values when safemode is enabled. - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numContainerWithOneReplicaReportedThreshold; private @Metric MutableCounterLong currentContainersWithOneReplicaReportedCount; // When hdds.scm.safemode.pipeline-availability.check is set then only // below metrics will have some values, otherwise they will be zero. - private @Metric MutableCounterLong numHealthyPipelinesThreshold; + private @Metric MutableGaugeLong numHealthyPipelinesThreshold; private @Metric MutableCounterLong currentHealthyPipelinesCount; - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numPipelinesWithAtleastOneReplicaReportedThreshold; private @Metric MutableCounterLong currentPipelinesWithAtleastOneReplicaReportedCount; @@ -55,7 +56,7 @@ public static SafeModeMetrics create() { } public void setNumHealthyPipelinesThreshold(long val) { - this.numHealthyPipelinesThreshold.incr(val); + this.numHealthyPipelinesThreshold.set(val); } public void incCurrentHealthyPipelinesCount() { @@ -63,7 +64,7 @@ public void incCurrentHealthyPipelinesCount() { } public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) { - this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val); + this.numPipelinesWithAtleastOneReplicaReportedThreshold.set(val); } public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { @@ -71,35 +72,35 @@ public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { } public void setNumContainerWithOneReplicaReportedThreshold(long val) { - this.numContainerWithOneReplicaReportedThreshold.incr(val); + this.numContainerWithOneReplicaReportedThreshold.set(val); } public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } - public MutableCounterLong getNumHealthyPipelinesThreshold() { + MutableGaugeLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } - public MutableCounterLong getCurrentHealthyPipelinesCount() { + MutableCounterLong getCurrentHealthyPipelinesCount() { return currentHealthyPipelinesCount; } - public MutableCounterLong + MutableGaugeLong getNumPipelinesWithAtleastOneReplicaReportedThreshold() { return numPipelinesWithAtleastOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { + MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { return currentPipelinesWithAtleastOneReplicaReportedCount; } - public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { + MutableGaugeLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { + MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { return currentContainersWithOneReplicaReportedCount; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java index fcd52d0ebd76..1c1a1c624502 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java @@ -67,7 +67,7 @@ private RootCARotationMetrics(MetricsSystem ms) { this.ms = ms; } - public MutableGaugeLong getSuccessTimeInNs() { + MutableGaugeLong getSuccessTimeInNs() { return successTimeInNs; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 69f190c7fbd8..79002e27a2e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -27,6 +27,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; @@ -73,6 +74,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.IO_EXCEPTION; import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; @@ -203,15 +205,19 @@ public List allocateBlock( AllocatedBlock block = scm.getScmBlockManager() .allocateBlock(size, replicationConfig, owner, excludeList); if (block != null) { - blocks.add(block); // Sort the datanodes if client machine is specified final Node client = getClientNode(clientMachine); if (client != null) { final List nodes = block.getPipeline().getNodes(); final List sorted = scm.getClusterMap() .sortByDistanceCost(client, nodes, nodes.size()); - block.getPipeline().setNodesInOrder(sorted); + if (!Objects.equals(sorted, block.getPipeline().getNodesInOrder())) { + block = block.toBuilder() + .setPipeline(block.getPipeline().copyWithNodesInOrder(sorted)) + .build(); + } } + blocks.add(block); } } @@ -412,6 +418,11 @@ private Node getOtherNode(String clientMachine) { return null; } + @Override + public InnerNode getNetworkTopology() { + return (InnerNode) scm.getClusterMap().getNode(ROOT); + } + @Override public AuditMessage buildAuditMessageForSuccess( AuditAction op, Map auditMap) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 13bef8590b79..ecfb92104da2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -22,7 +22,6 @@ package org.apache.hadoop.hdds.scm.server; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Maps; import com.google.protobuf.BlockingService; @@ -63,6 +62,7 @@ import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -99,6 +99,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -591,7 +592,7 @@ public void deleteContainer(long containerID) throws IOException { @Override public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { try { - return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn); + return scm.getScmDecommissionManager().getContainersPendingReplication(dn); } catch (NodeNotFoundException e) { throw new IOException("Failed to get containers list. Unable to find required node", e); } @@ -645,11 +646,11 @@ public HddsProtos.Node queryNode(UUID uuid) } @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { try { getScm().checkAdminAccess(getRemoteUser(), false); - return scm.getScmDecommissionManager().decommissionNodes(nodes); + return scm.getScmDecommissionManager().decommissionNodes(nodes, force); } catch (Exception ex) { LOG.error("Failed to decommission nodes", ex); throw ex; @@ -1046,67 +1047,130 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTarget, - Optional maxSizeLeavingSource) throws IOException { + Optional maxSizeLeavingSource, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { getScm().checkAdminAccess(getRemoteUser(), false); ContainerBalancerConfiguration cbc = scm.getConfiguration().getObject(ContainerBalancerConfiguration.class); Map auditMap = Maps.newHashMap(); - if (threshold.isPresent()) { - double tsd = threshold.get(); - auditMap.put("threshold", String.valueOf(tsd)); - Preconditions.checkState(tsd >= 0.0D && tsd < 100.0D, - "threshold should be specified in range [0.0, 100.0)."); - cbc.setThreshold(tsd); - } - if (maxSizeToMovePerIterationInGB.isPresent()) { - long mstm = maxSizeToMovePerIterationInGB.get(); - auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); - Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); - cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); - } - if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { - int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); - auditMap.put("maxDatanodesPercentageToInvolvePerIteration", - String.valueOf(mdti)); - Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "greater than equal to zero."); - Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "lesser than or equal to 100."); - cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); - } - if (iterations.isPresent()) { - int i = iterations.get(); - auditMap.put("iterations", String.valueOf(i)); - Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + try { + if (threshold.isPresent()) { + double tsd = threshold.get(); + auditMap.put("threshold", String.valueOf(tsd)); + if (tsd < 0.0D || tsd >= 100.0D) { + throw new IOException("Threshold should be specified in the range [0.0, 100.0)."); + } + cbc.setThreshold(tsd); + } + + if (maxSizeToMovePerIterationInGB.isPresent()) { + long mstm = maxSizeToMovePerIterationInGB.get(); + auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); + if (mstm <= 0) { + throw new IOException("Max Size To Move Per Iteration In GB must be positive."); + } + cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); + } + + if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { + int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); + auditMap.put("maxDatanodesPercentageToInvolvePerIteration", + String.valueOf(mdti)); + if (mdti < 0 || mdti > 100) { + throw new IOException("Max Datanodes Percentage To Involve Per Iteration" + + "should be specified in the range [0, 100]"); + } + cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); + } + + if (iterations.isPresent()) { + int i = iterations.get(); + auditMap.put("iterations", String.valueOf(i)); + if (i < -1 || i == 0) { + throw new IOException("Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); - cbc.setIterations(i); - } + } + cbc.setIterations(i); + } - if (maxSizeEnteringTarget.isPresent()) { - long mset = maxSizeEnteringTarget.get(); - auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); - Preconditions.checkState(mset > 0, - "maxSizeEnteringTarget must be " + + if (maxSizeEnteringTarget.isPresent()) { + long mset = maxSizeEnteringTarget.get(); + auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); + if (mset <= 0) { + throw new IOException("Max Size Entering Target must be " + "greater than zero."); - cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); - } + } + cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); + } - if (maxSizeLeavingSource.isPresent()) { - long msls = maxSizeLeavingSource.get(); - auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); - Preconditions.checkState(msls > 0, - "maxSizeLeavingSource must be " + + if (maxSizeLeavingSource.isPresent()) { + long msls = maxSizeLeavingSource.get(); + auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); + if (msls <= 0) { + throw new IOException("Max Size Leaving Source must be " + "greater than zero."); - cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); - } + } + cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); + } - ContainerBalancer containerBalancer = scm.getContainerBalancer(); - try { + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + auditMap.put("balancingInterval", String.valueOf(bi)); + if (bi <= 0) { + throw new IOException("Balancing Interval must be greater than zero."); + } + cbc.setBalancingInterval(Duration.ofMinutes(bi)); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + auditMap.put("moveTimeout", String.valueOf(mt)); + if (mt <= 0) { + throw new IOException("Move Timeout must be greater than zero."); + } + cbc.setMoveTimeout(Duration.ofMinutes(mt)); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + auditMap.put("moveReplicationTimeout", String.valueOf(mrt)); + if (mrt <= 0) { + throw new IOException("Move Replication Timeout must be greater than zero."); + } + cbc.setMoveReplicationTimeout(Duration.ofMinutes(mrt)); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + auditMap.put("networkTopologyEnable", String.valueOf(nt)); + cbc.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + auditMap.put("includeNodes", (in)); + cbc.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + auditMap.put("excludeNodes", (ex)); + cbc.setExcludeNodes(ex); + } + + ContainerBalancer containerBalancer = scm.getContainerBalancer(); containerBalancer.startBalancer(cbc); + + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + SCMAction.START_CONTAINER_BALANCER, auditMap)); + return StartContainerBalancerResponseProto.newBuilder() + .setStart(true) + .build(); } catch (IllegalContainerBalancerStateException | IOException | InvalidContainerBalancerConfigurationException e) { AUDIT.logWriteFailure(buildAuditMessageForFailure( @@ -1116,11 +1180,6 @@ public StartContainerBalancerResponseProto startContainerBalancer( .setMessage(e.getMessage()) .build(); } - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.START_CONTAINER_BALANCER, auditMap)); - return StartContainerBalancerResponseProto.newBuilder() - .setStart(true) - .build(); } @Override @@ -1373,4 +1432,10 @@ public DecommissionScmResponseProto decommissionScm( } return decommissionScmResponseBuilder.build(); } + + @Override + public String getMetrics(String query) throws IOException { + FetchMetrics fetchMetrics = new FetchMetrics(); + return fetchMetrics.getMetrics(query); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java index d7d47a78b778..dab66cc51543 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMContainerMetrics.java @@ -63,7 +63,6 @@ public void unRegister() { } @Override - @SuppressWarnings("SuspiciousMethodCalls") public void getMetrics(MetricsCollector collector, boolean all) { Map stateCount = scmmxBean.getContainerStateCount(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index b6dc6f599bd6..484a1e6f0f4b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -213,7 +213,7 @@ public static class ReportFromDatanode { private final DatanodeDetails datanodeDetails; - private final T report; + private T report; public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { this.datanodeDetails = datanodeDetails; @@ -227,6 +227,10 @@ public DatanodeDetails getDatanodeDetails() { public T getReport() { return report; } + + public void setReport(T report) { + this.report = report; + } } /** @@ -381,9 +385,11 @@ public String getEventId() { @Override public void mergeReport(ContainerReport nextReport) { if (nextReport.getType() == ContainerReportType.ICR) { - getReport().getReportList().addAll( - ((ReportFromDatanode) nextReport) - .getReport().getReportList()); + // To update existing report list , need to create a builder and then + // merge new reports to existing report list. + IncrementalContainerReportProto reportProto = getReport().toBuilder().addAllReport( + ((ReportFromDatanode) nextReport).getReport().getReportList()).build(); + setReport(reportProto); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java index 0ea2d0e9559b..9cbd6d97deda 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolDatanode; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolOm; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; @@ -43,6 +44,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; /** * {@link PolicyProvider} for SCM protocols. @@ -85,7 +87,10 @@ public static SCMPolicyProvider getInstance() { SecretKeyProtocolScm.class), new Service( HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL, - SecretKeyProtocolDatanode.class) + SecretKeyProtocolDatanode.class), + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 11fdc0d16d79..fa67dd68dedc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -845,7 +845,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, pipelineManager, eventQueue, serviceManager, scmContext); } - scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, + scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, scmContext, eventQueue, replicationManager); statefulServiceStateManager = StatefulServiceStateManagerImpl.newBuilder() @@ -1796,6 +1796,10 @@ public void shutDown(String message) { ExitUtils.terminate(0, message, LOG); } + public boolean isStopped() { + return isStopped.get(); + } + /** * Wait until service has completed shutdown. */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 84f3684ab7cc..21c3f1c9a8ab 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap; import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java similarity index 63% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java index 5269a7aaeb3e..507eb75c5d78 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java @@ -16,37 +16,47 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import java.util.UUID; -import java.util.Set; +import java.util.Collections; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.TreeSet; -import java.util.HashSet; -import java.util.Collections; - +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE; /** * This data structure maintains the list of containers that is on a datanode. * This information is built from the DN container reports. */ -public class Node2ObjectsMap { +class Node2ContainerMap { + private final Map> dn2ContainerMap = new ConcurrentHashMap<>(); - @SuppressWarnings("visibilitymodifier") - protected final Map> dn2ObjectMap; /** * Constructs a Node2ContainerMap Object. */ - public Node2ObjectsMap() { - dn2ObjectMap = new ConcurrentHashMap<>(); + Node2ContainerMap() { + super(); + } + + /** + * Returns null if there no containers associated with this datanode ID. + * + * @param datanode - UUID + * @return Set of containers or Null. + */ + public @Nonnull Set getContainers(@Nonnull UUID datanode) { + final Set s = dn2ContainerMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** @@ -56,9 +66,8 @@ public Node2ObjectsMap() { * @param datanodeID - UUID of the Datanode. * @return True if this is tracked, false if this map does not know about it. */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return dn2ObjectMap.containsKey(datanodeID); + public boolean isKnownDatanode(@Nonnull UUID datanodeID) { + return dn2ContainerMap.containsKey(datanodeID); } /** @@ -67,15 +76,10 @@ public boolean isKnownDatanode(UUID datanodeID) { * @param datanodeID -- Datanode UUID * @param containerIDs - List of ContainerIDs. */ - @VisibleForTesting - public void insertNewDatanode(UUID datanodeID, Set containerIDs) + public void insertNewDatanode(@Nonnull UUID datanodeID, @Nonnull Set containerIDs) throws SCMException { - Preconditions.checkNotNull(containerIDs); - Preconditions.checkNotNull(datanodeID); - if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) - != null) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); + if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) != null) { + throw new SCMException("Node already exists in the map", DUPLICATE_DATANODE); } } @@ -84,32 +88,15 @@ public void insertNewDatanode(UUID datanodeID, Set containerIDs) * * @param datanodeID - Datanode ID. */ - @VisibleForTesting - public void removeDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null); + public void removeDatanode(@Nonnull UUID datanodeID) { + dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null); } - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - Set getObjects(UUID datanode) { - Preconditions.checkNotNull(datanode); - final Set s = dn2ObjectMap.get(datanode); - return s != null ? new HashSet<>(s) : Collections.emptySet(); - } - - public ReportResult.ReportResultBuilder newBuilder() { + public @Nonnull ReportResult.ReportResultBuilder newBuilder() { return new ReportResult.ReportResultBuilder<>(); } - public ReportResult processReport(UUID datanodeID, Set objects) { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(objects); - + public @Nonnull ReportResult processReport(@Nonnull UUID datanodeID, @Nonnull Set objects) { if (!isKnownDatanode(datanodeID)) { return newBuilder() .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND) @@ -118,11 +105,11 @@ public ReportResult processReport(UUID datanodeID, Set objects) { } // Conditions like Zero length containers should be handled by removeAll. - Set currentSet = dn2ObjectMap.get(datanodeID); - TreeSet newObjects = new TreeSet<>(objects); + Set currentSet = dn2ContainerMap.get(datanodeID); + TreeSet newObjects = new TreeSet<>(objects); newObjects.removeAll(currentSet); - TreeSet missingObjects = new TreeSet<>(currentSet); + TreeSet missingObjects = new TreeSet<>(currentSet); missingObjects.removeAll(objects); if (newObjects.isEmpty() && missingObjects.isEmpty()) { @@ -159,8 +146,22 @@ public ReportResult processReport(UUID datanodeID, Set objects) { .build(); } - @VisibleForTesting + /** + * Updates the Container list of an existing DN. + * + * @param datanodeID - UUID of DN. + * @param containers - Set of Containers tht is present on DN. + * @throws SCMException - if we don't know about this datanode, for new DN + * use addDatanodeInContainerMap. + */ + public void setContainersForDatanode(@Nonnull UUID datanodeID, @Nonnull Set containers) + throws SCMException { + if (dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) == null) { + throw new SCMException("No such datanode", NO_SUCH_DATANODE); + } + } + public int size() { - return dn2ObjectMap.size(); + return dn2ContainerMap.size(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java similarity index 99% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java index 0aab0aeca837..92e0a2c494f5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java @@ -17,10 +17,10 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -32,9 +32,9 @@ import java.util.concurrent.ConcurrentHashMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; /** * Test classes for Node2ContainerMap. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 3bed3878123d..a4d7f3761202 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -55,6 +55,7 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -132,7 +133,7 @@ public class TestContainerBalancerTask { * Sets up configuration values and creates a mock cluster. */ @BeforeEach - public void setup() throws IOException, NodeNotFoundException, + public void setup(TestInfo testInfo) throws IOException, NodeNotFoundException, TimeoutException { conf = new OzoneConfiguration(); rmConf = new ReplicationManagerConfiguration(); @@ -164,7 +165,11 @@ public void setup() throws IOException, NodeNotFoundException, conf.setFromObject(balancerConfiguration); GenericTestUtils.setLogLevel(ContainerBalancerTask.LOG, Level.DEBUG); - averageUtilization = createCluster(); + int[] sizeArray = testInfo.getTestMethod() + .filter(method -> method.getName().equals("balancerShouldMoveOnlyPositiveSizeContainers")) + .map(method -> new int[]{0, 0, 0, 0, 0, 1, 2, 3, 4, 5}) + .orElse(null); + averageUtilization = createCluster(sizeArray); mockNodeManager = new MockNodeManager(datanodeToContainersMap); NetworkTopology clusterMap = mockNodeManager.getClusterNetworkTopologyMap(); @@ -1114,6 +1119,34 @@ public void balancerShouldExcludeECContainersWhenLegacyRmIsEnabled() } } + /** + * Test to check if balancer picks up only positive size + * containers to move from source to destination. + */ + @Test + public void balancerShouldMoveOnlyPositiveSizeContainers() + throws IllegalContainerBalancerStateException, IOException, + InvalidContainerBalancerConfigurationException, TimeoutException { + + startBalancer(balancerConfiguration); + /* + Get all containers that were selected by balancer and assert none of + them is a zero or negative size container. + */ + Map containerToSource = + containerBalancerTask.getContainerToSourceMap(); + assertFalse(containerToSource.isEmpty()); + boolean zeroOrNegSizeContainerMoved = false; + for (Map.Entry entry : + containerToSource.entrySet()) { + ContainerInfo containerInfo = cidToInfoMap.get(entry.getKey()); + if (containerInfo.getUsedBytes() <= 0) { + zeroOrNegSizeContainerMoved = true; + } + } + assertFalse(zeroOrNegSizeContainerMoved); + } + /** * Determines unBalanced nodes, that is, over and under utilized nodes, * according to the generated utilization values for nodes and the threshold. @@ -1169,8 +1202,8 @@ private void generateUtilizations(int count) throws IllegalArgumentException { * cluster have utilization values determined by generateUtilizations method. * @return average utilization (used space / capacity) of the cluster */ - private double createCluster() { - generateData(); + private double createCluster(int[] sizeArray) { + generateData(sizeArray); createReplicasForContainers(); long clusterCapacity = 0, clusterUsedSpace = 0; @@ -1204,7 +1237,7 @@ private double createCluster() { /** * Create some datanodes and containers for each node. */ - private void generateData() { + private void generateData(int[] sizeArray) { this.numberOfNodes = 10; generateUtilizations(numberOfNodes); nodesInCluster = new ArrayList<>(nodeUtilizations.size()); @@ -1216,13 +1249,19 @@ private void generateData() { new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat()); - // create containers with varying used space int sizeMultiple = 0; + if (sizeArray == null) { + sizeArray = new int[10]; + for (int j = 0; j < numberOfNodes; j++) { + sizeArray[j] = sizeMultiple; + sizeMultiple %= 5; + sizeMultiple++; + } + } + // create containers with varying used space for (int j = 0; j < i; j++) { - sizeMultiple %= 5; - sizeMultiple++; ContainerInfo container = - createContainer((long) i * i + j, sizeMultiple); + createContainer((long) i * i + j, sizeArray[j]); cidToInfoMap.put(container.containerID(), container); containerIDSet.add(container.containerID()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java index a97cdbddb8af..3775531d30d1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.ozone.test.TestClock; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -74,6 +75,13 @@ public void setup() { dn3 = MockDatanodeDetails.randomDatanodeDetails(); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @Test public void testGetPendingOpsReturnsEmptyList() { List ops = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java index 22c3630e0c6b..f69822129365 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.assertj.core.util.Lists; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -174,6 +175,13 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) { .thenReturn(new ContainerPlacementStatusDefault(2, 2, 3)); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @ParameterizedTest @ValueSource(strings = {"rs-6-3-1024k", "rs-10-4-1024k"}) void defersNonCriticalPartialReconstruction(String rep) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 47844f32fb0d..ecb3ce4b039d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -55,6 +55,7 @@ import org.apache.hadoop.util.Lists; import org.apache.ozone.test.TestClock; import org.apache.ratis.protocol.exceptions.NotLeaderException; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -190,6 +191,13 @@ public void setup() throws IOException { when(scmContext.getScm()).thenReturn(scm); } + @AfterEach + void cleanup() { + if (replicationManager.getMetrics() != null) { + replicationManager.getMetrics().unRegister(); + } + } + private ReplicationManager createReplicationManager() throws IOException { return new ReplicationManager( configuration, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d28..f3a303cad738 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 06565e1b7e5a..5c04ad63210e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -864,8 +864,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 0); DatanodeAdminMonitorTestUtil .mockGetContainerReplicaCount(repManager, @@ -877,8 +877,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 2); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java new file mode 100644 index 000000000000..ede005745e5e --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.scm.FetchMetrics; +import org.junit.jupiter.api.Test; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +class TestFetchMetrics { + private static FetchMetrics fetchMetrics = new FetchMetrics(); + + @Test + public void testFetchAll() { + String result = fetchMetrics.getMetrics(null); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } + + @Test + public void testFetchFiltered() { + String result = fetchMetrics.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 09f0dd59b9f9..a0c0280d4083 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -18,20 +18,27 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import java.io.File; import java.io.IOException; @@ -39,13 +46,21 @@ import java.util.UUID; import java.util.Arrays; import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Unit tests for the decommission manager. @@ -56,15 +71,42 @@ public class TestNodeDecommissionManager { private NodeDecommissionManager decom; private StorageContainerManager scm; private NodeManager nodeManager; + private ContainerManager containerManager; private OzoneConfiguration conf; + private static int id = 1; @BeforeEach void setup(@TempDir File dir) throws Exception { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - nodeManager = createNodeManager(conf); - decom = new NodeDecommissionManager(conf, nodeManager, + scm = HddsTestUtils.getScm(conf); + nodeManager = scm.getScmNodeManager(); + containerManager = mock(ContainerManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.allocateContainer(any(ReplicationConfig.class), anyString())) + .thenAnswer(invocation -> createMockContainer((ReplicationConfig)invocation.getArguments()[0], + (String) invocation.getArguments()[1])); + } + + private ContainerInfo createMockContainer(ReplicationConfig rep, String owner) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(id) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner(owner); + id++; + return builder.build(); + } + private ContainerInfo getMockContainer(ReplicationConfig rep, ContainerID conId) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(conId.getId()) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner("admin"); + return builder.build(); } @Test @@ -99,37 +141,37 @@ public void testAnyInvalidHostThrowsException() { // Try to decommission a host that does exist, but give incorrect port List error = decom.decommissionNodes( - singletonList(dns.get(1).getIpAddress() + ":10")); + singletonList(dns.get(1).getIpAddress() + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(1).getIpAddress()); // Try to decommission a host that does not exist - error = decom.decommissionNodes(singletonList("123.123.123.123")); + error = decom.decommissionNodes(singletonList("123.123.123.123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123.123.123.123"); // Try to decommission a host that does exist and a host that does not error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - "123,123,123,123")); + "123,123,123,123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123,123,123,123"); // Try to decommission a host with many DNs on the address with no port - error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress())); + error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress()); // Try to decommission a host with many DNs on the address with a port // that does not exist error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress() - + ":10")); + + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress() + ":10"); // Try to decommission 2 hosts with address that does not exist // Both should return error error = decom.decommissionNodes(Arrays.asList( - "123.123.123.123", "234.234.234.234")); + "123.123.123.123", "234.234.234.234"), false); assertEquals(2, error.size()); assertTrue(error.get(0).getHostname().contains("123.123.123.123") && error.get(1).getHostname().contains("234.234.234.234")); @@ -142,7 +184,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Decommission 2 valid nodes decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -151,14 +193,14 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Running the command again gives no error - nodes already decommissioning // are silently ignored. decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); // Attempt to decommission dn(10) which has multiple hosts on the same IP // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); - decom.decommissionNodes(singletonList(multiAddr)); + decom.decommissionNodes(singletonList(multiAddr), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -166,7 +208,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // dn(11) with identical ports. nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); - decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress())); + decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(duplicatePorts).getOperationalState()); @@ -217,13 +259,13 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // Attempt to decommission with just the IP, which should fail. List error = - decom.decommissionNodes(singletonList(extraDN.getIpAddress())); + decom.decommissionNodes(singletonList(extraDN.getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(extraDN.getIpAddress()); // Now try the one with the unique port decom.decommissionNodes( - singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1)); + singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(extraDN).getOperationalState()); @@ -239,7 +281,7 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() nodeManager.processHeartbeat(expectedDN); decom.decommissionNodes(singletonList( - expectedDN.getIpAddress() + ":" + ratisPort)); + expectedDN.getIpAddress() + ":" + ratisPort), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(expectedDN).getOperationalState()); // The other duplicate is still in service @@ -323,7 +365,7 @@ public void testNodesCannotTransitionFromDecomToMaint() throws Exception { // Try to go from maint to decom: List dn = new ArrayList<>(); dn.add(dns.get(1).getIpAddress()); - List errors = decom.decommissionNodes(dn); + List errors = decom.decommissionNodes(dn, false); assertEquals(1, errors.size()); assertEquals(dns.get(1).getHostName(), errors.get(0).getHostname()); @@ -369,10 +411,268 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception { assertEquals(decom.getMonitor().getTrackedNodes().size(), 3); } - private SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException, AuthenticationException { - scm = HddsTestUtils.getScm(config); - return (SCMNodeManager) scm.getScmNodeManager(); + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsEC = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + ContainerInfo containerRatis = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(containerRatis.containerID()); + Set idsEC = new HashSet<>(); + ContainerInfo containerEC = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(containerEC.containerID()); + + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> { + ContainerID containerID = (ContainerID)invocation.getArguments()[0]; + if (idsEC.contains(containerID)) { + return getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0]); + } + return getMockContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + (ContainerID)invocation.getArguments()[0]); + }); + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksNotInService() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + // decommission one node successfully + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + // try to decommission 2 nodes, one in service and one in decommissioning state, should be successful. + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksForNNF() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 3; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + nodeManager = mock(NodeManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, + SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + when(nodeManager.getNodesByAddress(any())).thenAnswer(invocation -> + getDatanodeDetailsList((String)invocation.getArguments()[0], dns)); + when(nodeManager.getContainers(any())).thenReturn(idsRatis); + when(nodeManager.getNodeCount(any())).thenReturn(5); + + when(nodeManager.getNodeStatus(any())).thenAnswer(invocation -> + getNodeOpState((DatanodeDetails) invocation.getArguments()[0], dns)); + Mockito.doAnswer(invocation -> { + setNodeOpState((DatanodeDetails)invocation.getArguments()[0], + (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns); + return null; + }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), any( + HddsProtos.NodeOperationalState.class)); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false); + assertFalse(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + private List getDatanodeDetailsList(String ipaddress, List dns) { + List datanodeDetails = new ArrayList<>(); + for (DatanodeDetails dn : dns) { + if (dn.getIpAddress().equals(ipaddress)) { + datanodeDetails.add(dn); + break; + } + } + return datanodeDetails; + } + + private void setNodeOpState(DatanodeDetails dn, HddsProtos.NodeOperationalState newState, List dns) { + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + datanode.setPersistedOpState(newState); + break; + } + } + } + + private NodeStatus getNodeOpState(DatanodeDetails dn, List dns) throws NodeNotFoundException { + if (dn.equals(dns.get(0))) { + throw new NodeNotFoundException(); + } + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + return new NodeStatus(datanode.getPersistedOpState(), HddsProtos.NodeState.HEALTHY); + } + } + return null; } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd85..6a4cebe9c7a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -151,10 +151,10 @@ public void testGetVersionTask() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); @@ -179,14 +179,14 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); + OzoneContainer ozoneContainer = createVolume(ozoneConf); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { - OzoneContainer ozoneContainer = createVolume(ozoneConf); HddsVolume hddsVolume = (HddsVolume) ozoneContainer.getVolumeSet() .getVolumesList().get(0); KeyValueContainer kvContainer = addContainer(ozoneConf, hddsVolume); @@ -212,17 +212,19 @@ public void testDeletedContainersClearedOnStartup() throws Exception { hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(leftoverContainers); assertEquals(0, leftoverContainers.length); + } finally { + ozoneContainer.stop(); } } @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -267,7 +269,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +581,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index 093dd93430b9..cc496a28e777 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -22,13 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; import picocli.CommandLine; /** @@ -75,12 +69,6 @@ public UserGroupInformation getUser() throws IOException { * @param argv - System Args Strings[] */ public static void main(String[] argv) { - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - new OzoneAdmin().run(argv); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java index 158bc6da7b89..17885eecc975 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java @@ -38,49 +38,97 @@ public class ContainerBalancerStartSubcommand extends ScmSubcommand { @Option(names = {"-t", "--threshold"}, description = "Percentage deviation from average utilization of " + - "the cluster after which a datanode will be rebalanced (for " + - "example, '10' for 10%%).") + "the cluster after which a datanode will be rebalanced. The value " + + "should be in the range [0.0, 100.0), with a default of 10 " + + "(specify '10' for 10%%).") private Optional threshold; @Option(names = {"-i", "--iterations"}, - description = "Maximum consecutive iterations that" + - " balancer will run for.") + description = "Maximum consecutive iterations that " + + "balancer will run for. The value should be positive " + + "or -1, with a default of 10 (specify '10' for 10 iterations).") private Optional iterations; @Option(names = {"-d", "--max-datanodes-percentage-to-involve-per-iteration", "--maxDatanodesPercentageToInvolvePerIteration"}, description = "Max percentage of healthy, in service datanodes " + - "that can be involved in balancing in one iteration (for example, " + + "that can be involved in balancing in one iteration. The value " + + "should be in the range [0,100], with a default of 20 (specify " + "'20' for 20%%).") private Optional maxDatanodesPercentageToInvolvePerIteration; @Option(names = {"-s", "--max-size-to-move-per-iteration-in-gb", "--maxSizeToMovePerIterationInGB"}, description = "Maximum size that can be moved per iteration of " + - "balancing (for example, '500' for 500GB).") + "balancing. The value should be positive, with a default of 500 " + + "(specify '500' for 500GB).") private Optional maxSizeToMovePerIterationInGB; @Option(names = {"-e", "--max-size-entering-target-in-gb", "--maxSizeEnteringTargetInGB"}, description = "Maximum size that can enter a target datanode while " + - "balancing. This is the sum of data from multiple sources (for " + - "example, '26' for 26GB).") + "balancing. This is the sum of data from multiple sources. The value " + + "should be positive, with a default of 26 (specify '26' for 26GB).") private Optional maxSizeEnteringTargetInGB; @Option(names = {"-l", "--max-size-leaving-source-in-gb", "--maxSizeLeavingSourceInGB"}, description = "Maximum size that can leave a source datanode while " + - "balancing. This is the sum of data moving to multiple targets " + - "(for example, '26' for 26GB).") + "balancing. This is the sum of data moving to multiple targets. " + + "The value should be positive, with a default of 26 " + + "(specify '26' for 26GB).") private Optional maxSizeLeavingSourceInGB; + @Option(names = {"--balancing-iteration-interval-minutes"}, + description = "The interval period in minutes between each iteration of Container Balancer. " + + "The value should be positive, with a default of 70 (specify '70' for 70 minutes).") + private Optional balancingInterval; + + @Option(names = {"--move-timeout-minutes"}, + description = "The amount of time in minutes to allow a single container to move " + + "from source to target. The value should be positive, with a default of 65 " + + "(specify '65' for 65 minutes).") + private Optional moveTimeout; + + @Option(names = {"--move-replication-timeout-minutes"}, + description = "The " + + "amount of time in minutes to allow a single container's replication from source " + + "to target as part of container move. The value should be positive, with " + + "a default of 50. For example, if \"hdds.container" + + ".balancer.move.timeout\" is 65 minutes, then out of those 65 minutes " + + "50 minutes will be the deadline for replication to complete (specify " + + "'50' for 50 minutes).") + private Optional moveReplicationTimeout; + + @Option(names = {"--move-network-topology-enable"}, + description = "Whether to take network topology into account when " + + "selecting a target for a source. " + + "This configuration is false by default.") + private Optional networkTopologyEnable; + + @Option(names = {"--include-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. Only the Datanodes " + + "specified in this list are balanced. This configuration is empty by " + + "default and is applicable only if it is non-empty (specify \"hostname1,hostname2,hostname3\").") + private Optional includeNodes; + + @Option(names = {"--exclude-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. The Datanodes specified " + + "in this list are excluded from balancing. This configuration is empty " + + "by default (specify \"hostname1,hostname2,hostname3\").") + private Optional excludeNodes; + @Override public void execute(ScmClient scmClient) throws IOException { StartContainerBalancerResponseProto response = scmClient. startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); if (response.getStart()) { System.out.println("Container Balancer started successfully."); } else { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java index 89e7680f31c5..c15109a32784 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java @@ -34,7 +34,8 @@ public class ContainerBalancerStopSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { + System.out.println("Sending stop command. Waiting for Container Balancer to stop..."); scmClient.stopContainerBalancer(); - System.out.println("Stopping ContainerBalancer..."); + System.out.println("Container Balancer stopped."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..f334f1a03e90 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,8 +98,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; replicationType = HddsProtos.ReplicationType.RATIS; @@ -184,7 +184,7 @@ public void createContainer(XceiverClientSpi client, } } - private String getEncodedContainerToken(long containerId) throws IOException { + public String getEncodedContainerToken(long containerId) throws IOException { if (!containerTokenEnabled) { return ""; } @@ -237,9 +237,9 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { } @Override - public List decommissionNodes(List hosts) + public List decommissionNodes(List hosts, boolean force) throws IOException { - return storageContainerLocationClient.decommissionNodes(hosts); + return storageContainerLocationClient.decommissionNodes(hosts, force); } @Override @@ -483,12 +483,19 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) - throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { return storageContainerLocationClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } @Override @@ -563,4 +570,9 @@ public DecommissionScmResponseProto decommissionScm( return storageContainerLocationClient.decommissionScm(scmId); } + @Override + public String getMetrics(String query) throws IOException { + return storageContainerLocationClient.getMetrics(query); + } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java index ff82b82ec87a..29f2f3d45727 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,12 +33,9 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStartSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); + System.out.println("Starting ReplicationManager..."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index 9bc3649dd9f0..b2e308e14227 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,18 +33,15 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStatusSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getReplicationManagerStatus(); // Output data list if (execReturn) { - LOG.info("ReplicationManager is Running."); + System.out.println("ReplicationManager is Running."); } else { - LOG.info("ReplicationManager is Not Running."); + System.out.println("ReplicationManager is Not Running."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java index 7d3063a7636c..12de13c07d26 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,14 +33,11 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStopSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + + System.out.println("Stopping ReplicationManager..."); + System.out.println("Requested SCM to stop ReplicationManager, " + "it might take sometime for the ReplicationManager to stop."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index db2f02c5e125..747215dcac71 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeCheckSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - @CommandLine.Option(names = {"--verbose"}, description = "Show detailed status of rules.") private boolean verbose; @@ -52,17 +47,17 @@ public void execute(ScmClient scmClient) throws IOException { // Output data list if (execReturn) { - LOG.info("SCM is in safe mode."); + System.out.println("SCM is in safe mode."); if (verbose) { for (Map.Entry> entry : scmClient.getSafeModeRuleStatuses().entrySet()) { Pair value = entry.getValue(); - LOG.info("validated:{}, {}, {}", + System.out.printf("validated:%s, %s, %s%n", value.getLeft(), entry.getKey(), value.getRight()); } } } else { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index bcf64deb85e2..e4173c9767e3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; /** @@ -36,14 +34,11 @@ versionProvider = HddsVersionProvider.class) public class SafeModeExitSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeExitSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.forceExitSafeMode(); if (execReturn) { - LOG.info("SCM exit safe mode successfully."); + System.out.println("SCM exit safe mode successfully."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index abaca08cfbb9..ad94d4fffd0d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Mixin; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeWaitSubcommand implements Callable { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeWaitSubcommand.class); - @Option(description = "Define timeout (in second) to wait until (exit code 1) " + "or until safemode is ended (exit code 0).", defaultValue = "30", @@ -62,26 +57,26 @@ public Void call() throws Exception { long remainingTime; do { if (!scmClient.inSafeMode()) { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); return null; } remainingTime = getRemainingTimeInSec(); if (remainingTime > 0) { - LOG.info( + System.out.printf( "SCM is in safe mode. Will retry in 1 sec. Remaining time " - + "(sec): {}", + + "(sec): %s%n", remainingTime); Thread.sleep(1000); } else { - LOG.info("SCM is in safe mode. No more retries."); + System.out.println("SCM is in safe mode. No more retries."); } } while (remainingTime > 0); } catch (InterruptedException ex) { - LOG.info( - "SCM is not available (yet?). Error is {}. Will retry in 1 sec. " - + "Remaining time (sec): {}", + System.out.printf( + "SCM is not available (yet?). Error is %s. Will retry in 1 sec. " + + "Remaining time (sec): %s%n", ex.getMessage(), getRemainingTimeInSec()); Thread.sleep(1000); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java index cab7a29a4ea6..09caf8147ad4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import java.io.IOException; @@ -36,13 +34,10 @@ versionProvider = HddsVersionProvider.class) public class CleanExpiredCertsSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CleanExpiredCertsSubcommand.class); - @Override protected void execute(SCMSecurityProtocol client) throws IOException { List pemEncodedCerts = client.removeExpiredCertificates(); - LOG.info("List of removed expired certificates:"); - printCertList(LOG, pemEncodedCerts); + System.out.println("List of removed expired certificates:"); + printCertList(pemEncodedCerts); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java index 6177c8f7ff4e..c708d424d9c9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java @@ -26,12 +26,8 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Parameters; -import picocli.CommandLine.Spec; /** * This is the handler that process certificate info command. @@ -44,12 +40,6 @@ class InfoSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - - @Spec - private CommandSpec spec; - @Parameters(description = "Serial id of the certificate in decimal.") private String serialId; @@ -61,12 +51,12 @@ public void execute(SCMSecurityProtocol client) throws IOException { "Certificate can't be found"); // Print container report info. - LOG.info("Certificate id: {}", serialId); + System.out.printf("Certificate id: %s%n", serialId); try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - LOG.info(cert.toString()); + System.out.println(cert); } catch (CertificateException ex) { - LOG.error("Failed to get certificate id " + serialId); + System.err.println("Failed to get certificate id " + serialId); throw new IOException("Fail to get certificate id " + serialId, ex); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java index c2e0bd7fadff..ea0898381478 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -54,9 +52,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Certificate serial id to start the iteration", defaultValue = "0", showDefaultValue = Visibility.ALWAYS) @@ -114,7 +109,7 @@ protected void execute(SCMSecurityProtocol client) throws IOException { CertificateCodec.getX509Certificate(certPemStr); certList.add(new Certificate(cert)); } catch (CertificateException ex) { - LOG.error("Failed to parse certificate."); + err.println("Failed to parse certificate."); } } System.out.println( @@ -122,9 +117,9 @@ protected void execute(SCMSecurityProtocol client) throws IOException { return; } - LOG.info("Certificate list:(Type={}, BatchSize={}, CertCount={})", + System.out.printf("Certificate list:(Type=%s, BatchSize=%s, CertCount=%s)%n", type.toUpperCase(), count, certPemList.size()); - printCertList(LOG, certPemList); + printCertList(certPemList); } private static class BigIntJsonSerializer extends JsonSerializer { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java index d7ebb44e0ffc..354adbb5d6ba 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.cli.ScmOption; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; import picocli.CommandLine; import java.io.IOException; @@ -37,29 +36,29 @@ public abstract class ScmCertSubcommand implements Callable { @CommandLine.Mixin private ScmOption scmOption; - private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s"; + private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s%n"; - protected void printCertList(Logger log, List pemEncodedCerts) { + protected void printCertList(List pemEncodedCerts) { if (pemEncodedCerts.isEmpty()) { - log.info("No certificates to list"); + System.out.println("No certificates to list"); return; } - log.info(String.format(OUTPUT_FORMAT, "SerialNumber", "Valid From", - "Expiry", "Subject", "Issuer")); + System.out.printf(OUTPUT_FORMAT, "SerialNumber", "Valid From", + "Expiry", "Subject", "Issuer"); for (String certPemStr : pemEncodedCerts) { try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - printCert(cert, log); + printCert(cert); } catch (CertificateException e) { - log.error("Failed to parse certificate.", e); + System.err.println("Failed to parse certificate: " + e.getMessage()); } } } - protected void printCert(X509Certificate cert, Logger log) { - log.info(String.format(OUTPUT_FORMAT, cert.getSerialNumber(), + protected void printCert(X509Certificate cert) { + System.out.printf(OUTPUT_FORMAT, cert.getSerialNumber(), cert.getNotBefore(), cert.getNotAfter(), cert.getSubjectDN(), - cert.getIssuerDN())); + cert.getIssuerDN()); } protected abstract void execute(SCMSecurityProtocol client) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java index 9eedbf858958..313dc64c9fc9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -40,9 +38,6 @@ versionProvider = HddsVersionProvider.class) public class CreateSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CreateSubcommand.class); - @Option(description = "Owner of the new container", defaultValue = "OZONE", names = { "-o", "--owner"}) private String owner; @@ -50,7 +45,7 @@ public class CreateSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", + System.out.printf("Container %s is created.%n", container.getContainerInfo().getContainerID()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 8ed9f520b29d..0e67661bba1d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -45,8 +45,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; @@ -63,9 +61,6 @@ versionProvider = HddsVersionProvider.class) public class InfoSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - @Spec private CommandSpec spec; @@ -126,13 +121,13 @@ private void printOutput(ScmClient scmClient, String id, boolean first) private void printHeader() { if (json && multiContainer) { - LOG.info("["); + System.out.println("["); } } private void printFooter() { if (json && multiContainer) { - LOG.info("]"); + System.out.println("]"); } } @@ -142,9 +137,9 @@ private void printError(String error) { private void printBreak() { if (json) { - LOG.info(","); + System.out.println(","); } else { - LOG.info(""); + System.out.println(""); } } @@ -175,47 +170,47 @@ private void printDetails(ScmClient scmClient, long containerID, new ContainerWithPipelineAndReplicas(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } else { ContainerWithoutDatanodes wrapper = new ContainerWithoutDatanodes(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } } else { // Print container report info. - LOG.info("Container id: {}", containerID); + System.out.printf("Container id: %s%n", containerID); boolean verbose = spec != null && spec.root().userObject() instanceof GenericParentCommand && ((GenericParentCommand) spec.root().userObject()).isVerbose(); if (verbose) { - LOG.info("Pipeline Info: {}", container.getPipeline()); + System.out.printf("Pipeline Info: %s%n", container.getPipeline()); } else { - LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); + System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId()); } - LOG.info("Write PipelineId: {}", + System.out.printf("Write PipelineId: %s%n", container.getContainerInfo().getPipelineID().getId()); try { String pipelineState = scmClient.getPipeline( container.getContainerInfo().getPipelineID().getProtobuf()) .getPipelineState().toString(); - LOG.info("Write Pipeline State: {}", pipelineState); + System.out.printf("Write Pipeline State: %s%n", pipelineState); } catch (IOException ioe) { if (SCMHAUtils.unwrapException( ioe) instanceof PipelineNotFoundException) { - LOG.info("Write Pipeline State: CLOSED"); + System.out.println("Write Pipeline State: CLOSED"); } else { printError("Failed to retrieve pipeline info"); } } - LOG.info("Container State: {}", container.getContainerInfo().getState()); + System.out.printf("Container State: %s%n", container.getContainerInfo().getState()); // Print pipeline of an existing container. String machinesStr = container.getPipeline().getNodes().stream().map( InfoSubcommand::buildDatanodeDetails) .collect(Collectors.joining(",\n")); - LOG.info("Datanodes: [{}]", machinesStr); + System.out.printf("Datanodes: [%s]%n", machinesStr); // Print the replica details if available if (replicas != null) { @@ -223,7 +218,7 @@ private void printDetails(ScmClient scmClient, long containerID, .sorted(Comparator.comparing(ContainerReplicaInfo::getReplicaIndex)) .map(InfoSubcommand::buildReplicaDetails) .collect(Collectors.joining(",\n")); - LOG.info("Replicas: [{}]", replicaStr); + System.out.printf("Replicas: [%s]%n", replicaStr); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index b120fe4169da..ecc43d04087a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -36,8 +36,6 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -52,9 +50,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Container id to start the iteration") private long startId; @@ -94,7 +89,7 @@ public class ListSubcommand extends ScmSubcommand { private void outputContainerInfo(ContainerInfo containerInfo) throws IOException { // Print container report info. - LOG.info("{}", WRITER.writeValueAsString(containerInfo)); + System.out.println(WRITER.writeValueAsString(containerInfo)); } @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index b53632f8eec5..b146d68a587f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -24,9 +28,15 @@ import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.LinkedHashMap; +import java.util.ArrayList; +import java.util.Date; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -55,6 +65,11 @@ public class DecommissionStatusSubCommand extends ScmSubcommand { defaultValue = "") private String ipAddress; + @CommandLine.Option(names = { "--json" }, + description = "Show output in json format", + defaultValue = "false") + private boolean json; + @Override public void execute(ScmClient scmClient) throws IOException { List decommissioningNodes; @@ -77,21 +92,115 @@ public void execute(ScmClient scmClient) throws IOException { } } else { decommissioningNodes = allNodes.collect(Collectors.toList()); - System.out.println("\nDecommission Status: DECOMMISSIONING - " + - decommissioningNodes.size() + " node(s)"); + if (!json) { + System.out.println("\nDecommission Status: DECOMMISSIONING - " + + decommissioningNodes.size() + " node(s)"); + } + } + + String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + int numDecomNodes = -1; + JsonNode jsonNode = null; + if (metricsJson != null) { + ObjectMapper objectMapper = new ObjectMapper(); + JsonFactory factory = objectMapper.getFactory(); + JsonParser parser = factory.createParser(metricsJson); + jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); + JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); + numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); + } + + if (json) { + List> decommissioningNodesDetails = new ArrayList<>(); + + for (HddsProtos.Node node : decommissioningNodes) { + DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( + node.getNodeID()); + Map datanodeMap = new LinkedHashMap<>(); + datanodeMap.put("datanodeDetails", datanode); + datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes)); + datanodeMap.put("containers", getContainers(scmClient, datanode)); + decommissioningNodesDetails.add(datanodeMap); + } + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(decommissioningNodesDetails)); + return; } for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + printCounts(datanode, jsonNode, numDecomNodes); Map> containers = scmClient.getContainersOnDecomNode(datanode); System.out.println(containers); } } + + private String errorMessage = "Error getting pipeline and container metrics for "; + + public String getErrorMessage() { + return errorMessage; + } + + public void setErrorMessage(String errorMessage) { + this.errorMessage = errorMessage; + } + private void printDetails(DatanodeDetails datanode) { System.out.println("\nDatanode: " + datanode.getUuid().toString() + " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + "/" + datanode.getHostName() + ")"); } + + private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = getCounts(datanode, counts, numDecomNodes); + System.out.println("Decommission Started At : " + countsMap.get("decommissionStartTime")); + System.out.println("No. of Unclosed Pipelines: " + countsMap.get("numOfUnclosedPipelines")); + System.out.println("No. of UnderReplicated Containers: " + countsMap.get("numOfUnderReplicatedContainers")); + System.out.println("No. of Unclosed Containers: " + countsMap.get("numOfUnclosedContainers")); + } + + private Map getCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + Map countsMap = new LinkedHashMap<>(); + String errMsg = getErrorMessage() + datanode.getHostName(); + try { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + JsonNode pipelinesDN = counts.get("PipelinesWaitingToCloseDN." + i); + JsonNode underReplicatedDN = counts.get("UnderReplicatedDN." + i); + JsonNode unclosedDN = counts.get("UnclosedContainersDN." + i); + JsonNode startTimeDN = counts.get("StartTimeDN." + i); + if (pipelinesDN == null || underReplicatedDN == null || unclosedDN == null || startTimeDN == null) { + throw new IOException(errMsg); + } + + int pipelines = Integer.parseInt(pipelinesDN.toString()); + double underReplicated = Double.parseDouble(underReplicatedDN.toString()); + double unclosed = Double.parseDouble(unclosedDN.toString()); + long startTime = Long.parseLong(startTimeDN.toString()); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + countsMap.put("decommissionStartTime", formatter.format(date)); + countsMap.put("numOfUnclosedPipelines", pipelines); + countsMap.put("numOfUnderReplicatedContainers", underReplicated); + countsMap.put("numOfUnclosedContainers", unclosed); + return countsMap; + } + } + System.err.println(errMsg); + } catch (IOException e) { + System.err.println(errMsg); + } + return countsMap; + } + + private Map getContainers(ScmClient scmClient, DatanodeDetails datanode) throws IOException { + Map> containers = scmClient.getContainersOnDecomNode(datanode); + return containers.entrySet().stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + entry -> entry.getValue().stream(). + map(ContainerID::toString). + collect(Collectors.toList()))); + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index e7d3a4443831..31123ae81b51 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -48,6 +48,11 @@ public class DecommissionSubCommand extends ScmSubcommand { paramLabel = "") private List parameters = new ArrayList<>(); + @CommandLine.Option(names = { "--force" }, + defaultValue = "false", + description = "Forcefully try to decommission the datanode(s)") + private boolean force; + @Override public void execute(ScmClient scmClient) throws IOException { if (parameters.size() > 0) { @@ -62,7 +67,7 @@ public void execute(ScmClient scmClient) throws IOException { } else { hosts = parameters; } - List errors = scmClient.decommissionNodes(hosts); + List errors = scmClient.decommissionNodes(hosts, force); System.out.println("Started decommissioning datanode(s):\n" + String.join("\n", hosts)); if (errors.size() > 0) { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java index d8c1addb78e0..efc11d550f55 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java @@ -28,9 +28,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,6 +41,7 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.regex.Matcher; @@ -52,6 +50,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -69,8 +68,6 @@ public class TestInfoSubCommand { private ScmClient scmClient; private InfoSubcommand cmd; private List datanodes; - private Logger logger; - private TestAppender appender; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); @@ -88,18 +85,12 @@ public void setup() throws IOException { when(scmClient.getContainerWithPipeline(anyLong())).then(i -> getContainerWithPipeline(i.getArgument(0))); when(scmClient.getPipeline(any())).thenThrow(new PipelineNotFoundException("Pipeline not found.")); - appender = new TestAppender(); - logger = Logger.getLogger( - org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand.class); - logger.addAppender(appender); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @AfterEach public void after() { - logger.removeAppender(appender); System.setOut(originalOut); System.setErr(originalErr); System.setIn(originalIn); @@ -150,10 +141,8 @@ public void testContainersCanBeReadFromStdin() throws IOException { private void validateMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^Container id: (1|123|456|789).*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Container id: (1|123|456|789).*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -191,10 +180,8 @@ public void testMultipleContainersCanBePassedJson() throws Exception { private void validateJsonMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^.*\"containerInfo\".*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^.*\"containerInfo\".*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -213,34 +200,33 @@ private void testReplicaIncludedInOutput(boolean includeIndex) cmd.execute(scmClient); // Ensure we have a line for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) - .collect(Collectors.toList()); - assertEquals(1, replica.size()); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern pattern = Pattern.compile("Replicas: \\[.*\\]", Pattern.DOTALL); + Matcher matcher = pattern.matcher(output); + assertTrue(matcher.find()); + String replica = matcher.group(); // Ensure each DN UUID is mentioned in the message: for (DatanodeDetails dn : datanodes) { - Pattern pattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", + Pattern uuidPattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertTrue(matcher.matches()); + assertThat(replica).matches(uuidPattern); } // Ensure the replicaIndex output is in order if (includeIndex) { List indexList = new ArrayList<>(); for (int i = 1; i < datanodes.size() + 1; i++) { String temp = "ReplicaIndex: " + i; - indexList.add(replica.get(0).getRenderedMessage().indexOf(temp)); + indexList.add(replica.indexOf(temp)); } assertEquals(datanodes.size(), indexList.size()); assertTrue(inSort(indexList)); } // Ensure ReplicaIndex is not mentioned as it was not passed in the proto: - Pattern pattern = Pattern.compile(".*ReplicaIndex.*", - Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertEquals(includeIndex, matcher.matches()); + assertEquals(includeIndex, + Pattern.compile(".*ReplicaIndex.*", Pattern.DOTALL) + .matcher(replica) + .matches()); } @Test @@ -253,9 +239,8 @@ public void testReplicasNotOutputIfError() throws IOException { cmd.execute(scmClient); // Ensure we have no lines for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Replicas:.*")) .collect(Collectors.toList()); assertEquals(0, replica.size()); @@ -274,9 +259,7 @@ public void testReplicasNotOutputIfErrorWithJson() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertFalse(json.matches("(?s).*replicas.*")); } @@ -310,11 +293,8 @@ private void testJsonOutput() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - // Ensure each DN UUID is mentioned in the message after replicas: - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertTrue(json.matches("(?s).*replicas.*")); for (DatanodeDetails dn : datanodes) { Pattern pattern = Pattern.compile( @@ -409,25 +389,4 @@ private List createDatanodeDetails(int count) { return dns; } - private static class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 3be931c13211..b3c15a46f76f 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; @@ -44,6 +46,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -135,6 +139,16 @@ public void setup() throws Exception { chunkManager = new FilePerBlockStrategy(true, blockManager, null); } + @BeforeAll + public static void beforeClass() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test public void testUpgrade() throws IOException { int num = 2; @@ -187,7 +201,7 @@ private Map putAnyBlockData(KeyValueContainerData data, private void putChunksInBlock(int numOfChunksPerBlock, int i, List chunks, KeyValueContainer container, BlockID blockID) { - long chunkLength = 100; + final long chunkLength = 100; try { for (int k = 0; k < numOfChunksPerBlock; k++) { final String chunkName = String.format("%d_chunk_%d_block_%d", @@ -199,11 +213,10 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, .setChecksumData(Checksum.getNoChecksumDataProto()).build(); chunks.add(info); ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); - final ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + try (ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength)) { + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + } } } catch (IOException ex) { LOG.warn("Putting chunks in blocks was not successful for BlockID: " diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index e271cdfe0298..27c360e72743 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -104,7 +104,10 @@ public void testContainerBalancerStopSubcommand() throws IOException { ScmClient scmClient = mock(ScmClient.class); stopCmd.execute(scmClient); - Pattern p = Pattern.compile("^Stopping\\sContainerBalancer..."); + Pattern p = Pattern.compile("^Sending\\sstop\\scommand." + + "\\sWaiting\\sfor\\sContainer\\sBalancer\\sto\\sstop...\\n" + + "Container\\sBalancer\\sstopped."); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -114,7 +117,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsNotRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn( StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() @@ -133,7 +136,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn(StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() .setStart(false) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 41c31caf1f0a..fce593ab8c35 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -60,6 +60,7 @@ public class TestDecommissionStatusSubCommand { private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); private Map> containerOnDecom = getContainersOnDecomNodes(); + private ArrayList metrics = getMetrics(); @BeforeEach public void setup() throws UnsupportedEncodingException { @@ -80,6 +81,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -91,15 +93,17 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)"); + p = Pattern.compile("No\\. of Unclosed Pipelines:"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + assertTrue(m.find()); // metrics for both are shown + p = Pattern.compile("UnderReplicated=.* UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + assertTrue(m.find()); // container lists for both are shown } @Test @@ -109,6 +113,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(0)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -117,10 +122,10 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -131,24 +136,22 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); cmd.execute(scmClient); // check status of host0 - Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host0\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as uuid of only host0 is passed, host1 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -161,6 +164,7 @@ public void testIdOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -172,10 +176,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -186,24 +190,22 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); cmd.execute(scmClient); // check status of host1 - Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host1\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as IpAddress of only host1 is passed, host0 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -216,6 +218,7 @@ public void testIpOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -226,11 +229,11 @@ public void testIpOptionDecommissionStatusFail() throws IOException { Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -275,4 +278,38 @@ private Map> getContainersOnDecomNodes() { return containerMap; } + private ArrayList getMetrics() { + ArrayList result = new ArrayList<>(); + // no nodes decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}"); + // 2 nodes in decommisioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 2, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 2, " + + "\"ContainersUnderReplicatedTotal\" : 6, \"ContainersUnclosedTotal\" : 6, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\", \"tag.Hostname.1\" : \"host0\", " + + "\"PipelinesWaitingToCloseDN.1\" : 1, \"UnderReplicatedDN.1\" : 3, " + + "\"SufficientlyReplicatedDN.1\" : 0, \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 111211, " + + "\"tag.datanode.2\" : \"host1\", \"tag.Hostname.2\" : \"host1\", " + + "\"PipelinesWaitingToCloseDN.2\" : 1, \"UnderReplicatedDN.2\" : 3, " + + "\"SufficientlyReplicatedDN.2\" : 0, \"UnclosedContainersDN.2\" : 3, \"StartTimeDN.2\" : 221221} ]}"); + // only host 1 decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 1, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 1, " + + "\"ContainersUnderReplicatedTotal\" : 3, \"ContainersUnclosedTotal\" : 3, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\",\n \"tag.Hostname.1\" : \"host0\",\n " + + "\"PipelinesWaitingToCloseDN.1\" : 1,\n \"UnderReplicatedDN.1\" : 3,\n " + + "\"SufficientlyReplicatedDN.1\" : 0,\n \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 221221} ]}"); + return result; + } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java index e7e01ffaa1af..d6f0f8ae8267 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; @@ -100,7 +101,7 @@ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { @Test public void testNoErrorsWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); CommandLine c = new CommandLine(cmd); @@ -123,7 +124,7 @@ public void testNoErrorsWhenDecommissioning() throws IOException { @Test public void testErrorsReportedWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); e.add(new DatanodeAdminError("host1", "host1 error")); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java index d2a4c54b8bf2..a6225d1b5da6 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; @@ -72,7 +73,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java index e274cd4fd544..083ada8a4207 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 687605987a68..fee94c55f9ab 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -18,12 +18,15 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,80 +40,60 @@ public final class BucketArgs { /** * ACL Information. */ - private List acls; + private final ImmutableList acls; /** * Bucket Version flag. */ - private Boolean versioning; + private final boolean versioning; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Custom key/value metadata. */ - private Map metadata; + private final Map metadata; /** * Bucket encryption key name. */ - private String bucketEncryptionKey; - private DefaultReplicationConfig defaultReplicationConfig; + private final String bucketEncryptionKey; + private final DefaultReplicationConfig defaultReplicationConfig; private final String sourceVolume; private final String sourceBucket; - private long quotaInBytes; - private long quotaInNamespace; + private final long quotaInBytes; + private final long quotaInNamespace; - private String owner; + private final String owner; /** * Bucket Layout. */ - private BucketLayout bucketLayout = BucketLayout.DEFAULT; - - /** - * Private constructor, constructed via builder. - * @param versioning Bucket version flag. - * @param storageType Storage type to be used. - * @param acls list of ACLs. - * @param metadata map of bucket metadata - * @param bucketEncryptionKey bucket encryption key name - * @param sourceVolume - * @param sourceBucket - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - */ - @SuppressWarnings("parameternumber") - private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey, String sourceVolume, String sourceBucket, - long quotaInBytes, long quotaInNamespace, BucketLayout bucketLayout, - String owner, DefaultReplicationConfig defaultReplicationConfig) { - this.acls = acls; - this.versioning = versioning; - this.storageType = storageType; - this.metadata = metadata; - this.bucketEncryptionKey = bucketEncryptionKey; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private final BucketLayout bucketLayout; + + private BucketArgs(Builder b) { + acls = b.acls == null ? ImmutableList.of() : ImmutableList.copyOf(b.acls); + versioning = b.versioning; + storageType = b.storageType; + metadata = b.metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(b.metadata); + bucketEncryptionKey = b.bucketEncryptionKey; + sourceVolume = b.sourceVolume; + sourceBucket = b.sourceBucket; + quotaInBytes = b.quotaInBytes; + quotaInNamespace = b.quotaInNamespace; + bucketLayout = b.bucketLayout; + owner = b.owner; + defaultReplicationConfig = b.defaultReplicationConfig; } /** * Returns true if bucket version is enabled, else false. * @return isVersionEnabled */ - public Boolean getVersioning() { + public boolean getVersioning() { return versioning; } @@ -206,7 +189,7 @@ public String getOwner() { * Builder for OmBucketInfo. */ public static class Builder { - private Boolean versioning; + private boolean versioning; private StorageType storageType; private List acls; private Map metadata; @@ -220,12 +203,11 @@ public static class Builder { private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - metadata = new HashMap<>(); quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } - public BucketArgs.Builder setVersioning(Boolean versionFlag) { + public BucketArgs.Builder setVersioning(boolean versionFlag) { this.versioning = versionFlag; return this; } @@ -235,13 +217,19 @@ public BucketArgs.Builder setStorageType(StorageType storage) { return this; } - public BucketArgs.Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; + public BucketArgs.Builder addAcl(OzoneAcl acl) { + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } public BucketArgs.Builder addMetadata(String key, String value) { - this.metadata.put(key, value); + if (metadata == null) { + metadata = new HashMap<>(); + } + metadata.put(key, value); return this; } @@ -291,9 +279,7 @@ public BucketArgs.Builder setDefaultReplicationConfig( * @return instance of BucketArgs. */ public BucketArgs build() { - return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey, sourceVolume, sourceBucket, quotaInBytes, - quotaInNamespace, bucketLayout, owner, defaultReplicationConfig); + return new BucketArgs(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 481bdbbd5c2a..e96d0f84a437 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -565,6 +565,21 @@ public String createSnapshot(String volumeName, return proxy.createSnapshot(volumeName, bucketName, snapshotName); } + /** + * Rename snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + proxy.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index ca885b3b6b06..112c76f8c0a8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -154,7 +154,7 @@ public class OzoneBucket extends WithMetadata { private String owner; protected OzoneBucket(Builder builder) { - this.metadata = builder.metadata; + super(builder); this.proxy = builder.proxy; this.volumeName = builder.volumeName; this.name = builder.name; // bucket name @@ -954,8 +954,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneBucket. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String volumeName; @@ -983,8 +982,9 @@ private Builder(ConfigurationSource conf, ClientProtocol proxy) { this.proxy = proxy; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } @@ -1253,7 +1253,7 @@ List getNextShallowListOfKeys(String prevKey) proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false, startKey, listCacheSize, false); - if (addedKeyPrefix) { + if (addedKeyPrefix && statuses.size() > 0) { // previous round already include the startKey, so remove it statuses.remove(0); } else { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index c1902cdb60d2..c085720d1918 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -98,18 +98,21 @@ public ReplicationConfig getReplicationConfig() { /** * Class that represents each Part information of a multipart upload part. */ - public static class PartInfo { + public static final class PartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; - public PartInfo(int number, String name, long time, long size) { + public PartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -127,5 +130,9 @@ public long getModificationTime() { public long getSize() { return size; } + + public String getETag() { + return eTag; + } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 47b50c042a27..9ab110aa2b55 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -106,7 +106,7 @@ public class OzoneVolume extends WithMetadata { private long refCount; protected OzoneVolume(Builder builder) { - this.metadata = builder.metadata; + super(builder); this.proxy = builder.proxy; this.name = builder.name; this.admin = builder.admin; @@ -409,8 +409,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneVolume. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String name; @@ -482,8 +481,9 @@ public Builder setRefCount(long refCount) { return this; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index 9d683c5393c2..a1c9cd55bb3f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -18,10 +18,13 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -36,8 +39,8 @@ public final class VolumeArgs { private final String owner; private final long quotaInBytes; private final long quotaInNamespace; - private final List acls; - private Map metadata; + private final ImmutableList acls; + private final ImmutableMap metadata; /** * Private constructor, constructed via builder. @@ -58,8 +61,8 @@ private VolumeArgs(String admin, this.owner = owner; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; - this.acls = acls; - this.metadata = metadata; + this.acls = acls == null ? ImmutableList.of() : ImmutableList.copyOf(acls); + this.metadata = metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(metadata); } /** @@ -107,34 +110,20 @@ public List getAcls() { return acls; } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ public static VolumeArgs.Builder newBuilder() { return new VolumeArgs.Builder(); } /** - * Builder for OmVolumeArgs. + * Builder for VolumeArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String adminName; private String ownerName; - private long quotaInBytes; - private long quotaInNamespace; - private List listOfAcls; - private Map metadata = new HashMap<>(); - - /** - * Constructs a builder. - */ - public Builder() { - quotaInBytes = OzoneConsts.QUOTA_RESET; - quotaInNamespace = OzoneConsts.QUOTA_RESET; - } + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private List acls; + private Map metadata; public VolumeArgs.Builder setAdmin(String admin) { this.adminName = admin; @@ -157,12 +146,18 @@ public VolumeArgs.Builder setQuotaInNamespace(long quota) { } public VolumeArgs.Builder addMetadata(String key, String value) { + if (metadata == null) { + metadata = new HashMap<>(); + } metadata.put(key, value); return this; } - public VolumeArgs.Builder setAcls(List acls) + public VolumeArgs.Builder addAcl(OzoneAcl acl) throws IOException { - this.listOfAcls = acls; + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } @@ -172,7 +167,7 @@ public VolumeArgs.Builder setAcls(List acls) */ public VolumeArgs build() { return new VolumeArgs(adminName, ownerName, quotaInBytes, - quotaInNamespace, listOfAcls, metadata); + quotaInNamespace, acls, metadata); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java index e0b82bebc3a8..220bef71491a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECBlockChecksumComputer.java @@ -25,12 +25,13 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.util.DataChecksum; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.List; @@ -42,8 +43,8 @@ public class ECBlockChecksumComputer extends AbstractBlockChecksumComputer { private static final Logger LOG = LoggerFactory.getLogger(ECBlockChecksumComputer.class); - private List chunkInfoList; - private OmKeyInfo keyInfo; + private final List chunkInfoList; + private final OmKeyInfo keyInfo; public ECBlockChecksumComputer( @@ -68,7 +69,7 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) } - private void computeMd5Crc() throws IOException { + private void computeMd5Crc() { Preconditions.checkArgument(chunkInfoList.size() > 0); final ContainerProtos.ChunkInfo firstChunkInfo = chunkInfoList.get(0); @@ -77,32 +78,28 @@ private void computeMd5Crc() throws IOException { // Total parity checksum bytes per stripe to remove int parityBytes = getParityBytes(chunkSize, bytesPerCrc); - ByteArrayOutputStream out = new ByteArrayOutputStream(); + final MessageDigest digester = MD5Hash.getDigester(); for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); - byte[] checksumBytes = stripeChecksum.toByteArray(); - - Preconditions.checkArgument(checksumBytes.length % 4 == 0, + final int checksumSize = stripeChecksum.size(); + Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); - ByteBuffer byteWrap = ByteBuffer - .wrap(checksumBytes, 0, checksumBytes.length - parityBytes); - byte[] currentChecksum = new byte[4]; - - while (byteWrap.hasRemaining()) { - byteWrap.get(currentChecksum); - out.write(currentChecksum); - } + final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); + byteWrap.limit(checksumSize - parityBytes); + digester.update(byteWrap); } - MD5Hash fileMD5 = MD5Hash.digest(out.toByteArray()); - setOutBytes(fileMD5.getDigest()); + final byte[] fileMD5 = digester.digest(); + setOutBytes(digester.digest()); - LOG.debug("Number of chunks={}, md5hash={}", - chunkInfoList.size(), fileMD5); + if (LOG.isDebugEnabled()) { + LOG.debug("Number of chunks={}, md5hash={}", + chunkInfoList.size(), StringUtils.bytes2HexString(fileMD5)); + } } private void computeCompositeCrc() throws IOException { @@ -149,17 +146,15 @@ private void computeCompositeCrc() throws IOException { ByteString stripeChecksum = chunkInfo.getStripeChecksum(); Preconditions.checkNotNull(stripeChecksum); - byte[] checksumBytes = stripeChecksum.toByteArray(); - - Preconditions.checkArgument(checksumBytes.length % 4 == 0, + final int checksumSize = stripeChecksum.size(); + Preconditions.checkArgument(checksumSize % 4 == 0, "Checksum Bytes size does not match"); CrcComposer chunkCrcComposer = CrcComposer.newCrcComposer(dataChecksumType, bytesPerCrc); // Limit parity bytes as they do not contribute to fileChecksum - ByteBuffer byteWrap = ByteBuffer - .wrap(checksumBytes, 0, checksumBytes.length - parityBytes); - byte[] currentChecksum = new byte[4]; + final ByteBuffer byteWrap = stripeChecksum.asReadOnlyByteBuffer(); + byteWrap.limit(checksumSize - parityBytes); long chunkOffsetIndex = 1; while (byteWrap.hasRemaining()) { @@ -177,8 +172,7 @@ private void computeCompositeCrc() throws IOException { currentChunkOffset = bytesPerCrcOffset; } - byteWrap.get(currentChecksum); - int checksumDataCrc = CrcUtil.readInt(currentChecksum, 0); + final int checksumDataCrc = byteWrap.getInt(); //To handle last chunk when it size is lower than 1524K in the case // of rs-3-2-1524k. long chunkSizePerChecksum = Math.min(Math.min(keySize, bytesPerCrc), diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java index cf976e3bd39c..2c0fc0c0d36a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java @@ -26,8 +26,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.ByteBuffer; +import java.security.MessageDigest; import java.util.List; /** @@ -39,7 +40,13 @@ public class ReplicatedBlockChecksumComputer extends private static final Logger LOG = LoggerFactory.getLogger(ReplicatedBlockChecksumComputer.class); - private List chunkInfoList; + static MD5Hash digest(ByteBuffer data) { + final MessageDigest digester = MD5Hash.getDigester(); + digester.update(data); + return new MD5Hash(digester.digest()); + } + + private final List chunkInfoList; public ReplicatedBlockChecksumComputer( List chunkInfoList) { @@ -62,20 +69,20 @@ public void compute(OzoneClientConfig.ChecksumCombineMode combineMode) } // compute the block checksum, which is the md5 of chunk checksums - private void computeMd5Crc() throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - + private void computeMd5Crc() { + ByteString bytes = ByteString.EMPTY; for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { ContainerProtos.ChecksumData checksumData = chunkInfo.getChecksumData(); List checksums = checksumData.getChecksumsList(); for (ByteString checksum : checksums) { - baos.write(checksum.toByteArray()); + bytes = bytes.concat(checksum); } } - MD5Hash fileMD5 = MD5Hash.digest(baos.toByteArray()); + final MD5Hash fileMD5 = digest(bytes.asReadOnlyByteBuffer()); + setOutBytes(fileMD5.getDigest()); LOG.debug("number of chunks={}, md5out={}", @@ -121,7 +128,7 @@ private void computeCompositeCrc() throws IOException { Preconditions.checkArgument(remainingChunkSize <= checksums.size() * chunkSize); for (ByteString checksum : checksums) { - int checksumDataCrc = CrcUtil.readInt(checksum.toByteArray(), 0); + final int checksumDataCrc = checksum.asReadOnlyByteBuffer().getInt(); chunkCrcComposer.update(checksumDataCrc, Math.min(bytesPerCrc, remainingChunkSize)); remainingChunkSize -= bytesPerCrc; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 9bdec27f534f..ba3850ff3947 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -21,6 +21,8 @@ import java.io.OutputStream; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdds.client.BlockID; @@ -37,6 +39,7 @@ import org.apache.hadoop.security.token.Token; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.JavaUtils; /** * A BlockOutputStreamEntry manages the data writes into the DataNodes. @@ -60,33 +63,30 @@ public class BlockOutputStreamEntry extends OutputStream { private long currentPosition; private final Token token; - private BufferPool bufferPool; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - BlockOutputStreamEntry( - BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, - Pipeline pipeline, - long length, - BufferPool bufferPool, - Token token, - OzoneClientConfig config, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; + private final BufferPool bufferPool; + private final ContainerClientMetrics clientMetrics; + private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; + + BlockOutputStreamEntry(Builder b) { + this.config = b.config; this.outputStream = null; - this.blockID = blockID; - this.key = key; - this.xceiverClientManager = xceiverClientManager; - this.pipeline = pipeline; - this.token = token; - this.length = length; + this.blockID = b.blockID; + this.key = b.key; + this.xceiverClientManager = b.xceiverClientManager; + this.pipeline = b.pipeline; + this.token = b.token; + this.length = b.length; this.currentPosition = 0; - this.bufferPool = bufferPool; - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + this.bufferPool = b.bufferPool; + this.clientMetrics = b.clientMetrics; + this.streamBufferArgs = b.streamBufferArgs; + this.executorServiceSupplier = b.executorServiceSupplier; + } + + @Override + public String toString() { + return JavaUtils.getClassSimpleName(getClass()) + ":" + key + " " + blockID; } /** @@ -108,13 +108,18 @@ void checkStream() throws IOException { */ void createOutputStream() throws IOException { outputStream = new RatisBlockOutputStream(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, + executorServiceSupplier); } ContainerClientMetrics getClientMetrics() { return clientMetrics; } + Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } @@ -361,6 +366,15 @@ public static class Builder { private OzoneClientConfig config; private ContainerClientMetrics clientMetrics; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; + + public Pipeline getPipeline() { + return pipeline; + } + + public long getLength() { + return length; + } public Builder setBlockID(BlockID bID) { this.blockID = bID; @@ -402,23 +416,24 @@ public Builder setToken(Token bToken) { this.token = bToken; return this; } + public Builder setClientMetrics(ContainerClientMetrics clientMetrics) { this.clientMetrics = clientMetrics; return this; } + public Builder setStreamBufferArgs(StreamBufferArgs streamBufferArgs) { this.streamBufferArgs = streamBufferArgs; return this; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + public BlockOutputStreamEntry build() { - return new BlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new BlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 52ef31daf590..1b7918a45a71 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -25,9 +25,10 @@ import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.scm.ContainerClientMetrics; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -63,7 +64,7 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { /** * List of stream entries that are used to write a block of data. */ - private final List streamEntries; + private final List streamEntries = new ArrayList<>(); private final OzoneClientConfig config; /** * The actual stream entry we are writing into. Note that a stream entry is @@ -74,7 +75,6 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final OzoneManagerProtocol omClient; private final OmKeyArgs keyArgs; private final XceiverClientFactory xceiverClientFactory; - private final String requestID; /** * A {@link BufferPool} shared between all * {@link org.apache.hadoop.hdds.scm.storage.BlockOutputStream}s managed by @@ -86,42 +86,36 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final ExcludeList excludeList; private final ContainerClientMetrics clientMetrics; private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; // update blocks on OM private ContainerBlockID lastUpdatedBlockId = new ContainerBlockID(-1, -1); - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public BlockOutputStreamEntryPool( - OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, - boolean isMultipart, OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.xceiverClientFactory = xceiverClientFactory; - streamEntries = new ArrayList<>(); + public BlockOutputStreamEntryPool(KeyOutputStream.Builder b) { + this.config = b.getClientConfig(); + this.xceiverClientFactory = b.getXceiverManager(); currentStreamIndex = 0; - this.omClient = omClient; + this.omClient = b.getOmClient(); + final OmKeyInfo info = b.getOpenHandler().getKeyInfo(); this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName()) .setBucketName(info.getBucketName()).setKeyName(info.getKeyName()) - .setReplicationConfig(replicationConfig).setDataSize(info.getDataSize()) - .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID) - .setMultipartUploadPartNumber(partNumber).build(); - this.requestID = requestId; - this.openID = openID; + .setReplicationConfig(b.getReplicationConfig()) + .setDataSize(info.getDataSize()) + .setIsMultipartKey(b.isMultipartKey()) + .setMultipartUploadID(b.getMultipartUploadID()) + .setMultipartUploadPartNumber(b.getMultipartNumber()) + .build(); + this.openID = b.getOpenHandler().getId(); this.excludeList = createExcludeList(); + this.streamBufferArgs = b.getStreamBufferArgs(); this.bufferPool = new BufferPool(streamBufferArgs.getStreamBufferSize(), (int) (streamBufferArgs.getStreamBufferMaxSize() / streamBufferArgs .getStreamBufferSize()), ByteStringConversion - .createByteBufferConversion(unsafeByteBufferConversion)); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + .createByteBufferConversion(b.isUnsafeByteBufferConversionEnabled())); + this.clientMetrics = b.getClientMetrics(); + this.executorServiceSupplier = b.getExecutorServiceSupplier(); } ExcludeList createExcludeList() { @@ -129,25 +123,6 @@ ExcludeList createExcludeList() { Clock.system(ZoneOffset.UTC)); } - BlockOutputStreamEntryPool(ContainerClientMetrics clientMetrics, - OzoneClientConfig clientConfig, StreamBufferArgs streamBufferArgs) { - streamEntries = new ArrayList<>(); - omClient = null; - keyArgs = null; - xceiverClientFactory = null; - config = clientConfig; - streamBufferArgs.setStreamBufferFlushDelay(false); - requestID = null; - int chunkSize = 0; - bufferPool = new BufferPool(chunkSize, 1); - - currentStreamIndex = 0; - openID = -1; - excludeList = createExcludeList(); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = null; - } - /** * When a key is opened, it is possible that there are some blocks already * allocated to it for this open session. In this case, to make use of these @@ -159,10 +134,8 @@ ExcludeList createExcludeList() { * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) @@ -193,6 +166,7 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setToken(subKeyInfo.getToken()) .setClientMetrics(clientMetrics) .setStreamBufferArgs(streamBufferArgs) + .setExecutorServiceSupplier(executorServiceSupplier) .build(); } @@ -263,6 +237,10 @@ StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + /** * Discards the subsequent pre allocated blocks and removes the streamEntries * from the streamEntries list for the container which is closed. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java index 07d0f46069ca..241754a57f19 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java @@ -23,17 +23,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.security.token.Token; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,19 +68,10 @@ public class ECBlockOutputStreamEntry extends BlockOutputStreamEntry { private int currentStreamIdx = 0; private long successfulBlkGrpAckedLen; - @SuppressWarnings({"parameternumber", "squid:S00107"}) - ECBlockOutputStreamEntry(BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, Pipeline pipeline, long length, - BufferPool bufferPool, Token token, - OzoneClientConfig config, ContainerClientMetrics clientMetrics, - StreamBufferArgs streamBufferArgs) { - super(blockID, key, xceiverClientManager, pipeline, length, bufferPool, - token, config, clientMetrics, streamBufferArgs); - assertInstanceOf( - pipeline.getReplicationConfig(), ECReplicationConfig.class); - this.replicationConfig = - (ECReplicationConfig) pipeline.getReplicationConfig(); - this.length = replicationConfig.getData() * length; + ECBlockOutputStreamEntry(Builder b) { + super(b); + this.replicationConfig = assertInstanceOf(b.getPipeline().getReplicationConfig(), ECReplicationConfig.class); + this.length = replicationConfig.getData() * b.getLength(); } @Override @@ -101,7 +85,8 @@ void checkStream() throws IOException { streams[i] = new ECBlockOutputStream(getBlockID(), getXceiverClientManager(), createSingleECBlockPipeline(getPipeline(), nodes.get(i), i + 1), - getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs()); + getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs(), + getExecutorServiceSupplier()); } blockOutputStreams = streams; } @@ -433,82 +418,9 @@ public ByteString calculateChecksum() throws IOException { /** * Builder class for ChunkGroupOutputStreamEntry. * */ - public static class Builder { - private BlockID blockID; - private String key; - private XceiverClientFactory xceiverClientManager; - private Pipeline pipeline; - private long length; - private BufferPool bufferPool; - private Token token; - private OzoneClientConfig config; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - public ECBlockOutputStreamEntry.Builder setBlockID(BlockID bID) { - this.blockID = bID; - return this; - } - - public ECBlockOutputStreamEntry.Builder setKey(String keys) { - this.key = keys; - return this; - } - - public ECBlockOutputStreamEntry.Builder setXceiverClientManager( - XceiverClientFactory - xClientManager) { - this.xceiverClientManager = xClientManager; - return this; - } - - public ECBlockOutputStreamEntry.Builder setPipeline(Pipeline ppln) { - this.pipeline = ppln; - return this; - } - - public ECBlockOutputStreamEntry.Builder setLength(long len) { - this.length = len; - return this; - } - - public ECBlockOutputStreamEntry.Builder setBufferPool(BufferPool pool) { - this.bufferPool = pool; - return this; - } - - public ECBlockOutputStreamEntry.Builder setConfig( - OzoneClientConfig clientConfig) { - this.config = clientConfig; - return this; - } - - public ECBlockOutputStreamEntry.Builder setToken( - Token bToken) { - this.token = bToken; - return this; - } - - public ECBlockOutputStreamEntry.Builder setClientMetrics( - ContainerClientMetrics containerClientMetrics) { - this.clientMetrics = containerClientMetrics; - return this; - } - - public ECBlockOutputStreamEntry.Builder setStreamBufferArgs( - StreamBufferArgs args) { - this.streamBufferArgs = args; - return this; - } - + public static class Builder extends BlockOutputStreamEntry.Builder { public ECBlockOutputStreamEntry build() { - return new ECBlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new ECBlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java index e551605d842d..6eb9aed0d3ad 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java @@ -17,19 +17,7 @@ */ package org.apache.hadoop.ozone.client.io; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; - -import java.time.Clock; -import java.time.ZoneOffset; /** * {@link BlockOutputStreamEntryPool} is responsible to manage OM communication @@ -44,37 +32,14 @@ * @see ECBlockOutputStreamEntry */ public class ECBlockOutputStreamEntryPool extends BlockOutputStreamEntryPool { - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public ECBlockOutputStreamEntryPool(OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, - ReplicationConfig replicationConfig, - String uploadID, - int partNumber, - boolean isMultipart, - OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, - long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs) { - super(config, omClient, requestId, replicationConfig, uploadID, partNumber, - isMultipart, info, unsafeByteBufferConversion, xceiverClientFactory, - openID, clientMetrics, streamBufferArgs); - assert replicationConfig instanceof ECReplicationConfig; - } - - @Override - ExcludeList createExcludeList() { - return new ExcludeList(getConfig().getExcludeNodesExpiryTime(), - Clock.system(ZoneOffset.UTC)); + public ECBlockOutputStreamEntryPool(ECKeyOutputStream.Builder builder) { + super(builder); } @Override - BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { - return - new ECBlockOutputStreamEntry.Builder() - .setBlockID(subKeyInfo.getBlockID()) + ECBlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setBlockID(subKeyInfo.getBlockID()) .setKey(getKeyName()) .setXceiverClientManager(getXceiverClientFactory()) .setPipeline(subKeyInfo.getPipeline()) @@ -84,7 +49,8 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setToken(subKeyInfo.getToken()) .setClientMetrics(getClientMetrics()) .setStreamBufferArgs(getStreamBufferArgs()) - .build(); + .setExecutorServiceSupplier(getExecutorServiceSupplier()); + return b.build(); } @Override diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index b5c36474ff9e..0cb3973e0411 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -17,41 +17,16 @@ */ package org.apache.hadoop.ozone.client.io; -import java.io.IOException; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; import org.apache.hadoop.io.ByteBufferPool; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.om.protocol.S3Auth; import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder; import org.apache.ozone.erasurecode.rawcoder.util.CodecUtil; @@ -59,6 +34,21 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + /** * ECKeyOutputStream handles the EC writes by writing the data into underlying * block output streams chunk by chunk. @@ -74,7 +64,6 @@ public final class ECKeyOutputStream extends KeyOutputStream private final int numParityBlks; private final ByteBufferPool bufferPool; private final RawErasureEncoder encoder; - private final ExecutorService flushExecutor; private final Future flushFuture; private final AtomicLong flushCheckpoint; @@ -100,22 +89,6 @@ private enum StripeWriteStatus { private long offset; // how much data has been ingested into the stream private long writeOffset; - private final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool; - - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); - } - - @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); - } - - @VisibleForTesting - public List getLocationInfoList() { - return blockOutputStreamEntryPool.getLocationInfoList(); - } @VisibleForTesting public void insertFlushCheckpoint(long version) throws IOException { @@ -128,8 +101,7 @@ public long getFlushCheckpoint() { } private ECKeyOutputStream(Builder builder) { - super(builder.getReplicationConfig(), builder.getClientMetrics(), - builder.getClientConfig(), builder.getStreamBufferArgs()); + super(builder.getReplicationConfig(), new ECBlockOutputStreamEntryPool(builder)); this.config = builder.getClientConfig(); this.bufferPool = builder.getByteBufferPool(); // For EC, cell/chunk size and buffer size can be same for now. @@ -140,46 +112,24 @@ private ECKeyOutputStream(Builder builder) { ecChunkSize, numDataBlks, numParityBlks, bufferPool); chunkIndex = 0; ecStripeQueue = new ArrayBlockingQueue<>(config.getEcStripeQueueSize()); - OmKeyInfo info = builder.getOpenHandler().getKeyInfo(); - blockOutputStreamEntryPool = - new ECBlockOutputStreamEntryPool(config, - builder.getOmClient(), builder.getRequestID(), - builder.getReplicationConfig(), - builder.getMultipartUploadID(), builder.getMultipartNumber(), - builder.isMultipartKey(), - info, builder.isUnsafeByteBufferConversionEnabled(), - builder.getXceiverManager(), builder.getOpenHandler().getId(), - builder.getClientMetrics(), builder.getStreamBufferArgs()); this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( builder.getReplicationConfig()); - this.flushExecutor = Executors.newSingleThreadExecutor(); S3Auth s3Auth = builder.getS3CredentialsProvider().get(); ThreadLocal s3CredentialsProvider = builder.getS3CredentialsProvider(); - flushExecutor.submit(() -> s3CredentialsProvider.set(s3Auth)); - this.flushFuture = this.flushExecutor.submit(this::flushStripeFromQueue); + this.flushFuture = builder.getExecutorServiceSupplier().get().submit(() -> { + s3CredentialsProvider.set(s3Auth); + return flushStripeFromQueue(); + }); this.flushCheckpoint = new AtomicLong(0); this.atomicKeyCreation = builder.getAtomicKeyCreation(); } - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); + @Override + protected ECBlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return (ECBlockOutputStreamEntryPool) super.getBlockOutputStreamEntryPool(); } /** @@ -218,6 +168,7 @@ private void rollbackAndReset(ECChunkBuffers stripe) throws IOException { final ByteBuffer[] dataBuffers = stripe.getDataBuffers(); offset -= Arrays.stream(dataBuffers).mapToInt(Buffer::limit).sum(); + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); final ECBlockOutputStreamEntry failedStreamEntry = blockOutputStreamEntryPool.getCurrentStreamEntry(); failedStreamEntry.resetToFirstEntry(); @@ -256,8 +207,7 @@ private void logStreamError(List failedStreams, private StripeWriteStatus commitStripeWrite(ECChunkBuffers stripe) throws IOException { - ECBlockOutputStreamEntry streamEntry = - blockOutputStreamEntryPool.getCurrentStreamEntry(); + final ECBlockOutputStreamEntry streamEntry = getBlockOutputStreamEntryPool().getCurrentStreamEntry(); List failedStreams = streamEntry.streamsWithWriteFailure(); if (!failedStreams.isEmpty()) { @@ -297,6 +247,7 @@ private void excludePipelineAndFailedDN(Pipeline pipeline, List failedStreams) { // Exclude the failed pipeline + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.getExcludeList().addPipeline(pipeline.getId()); // If the failure is NOT caused by other reasons (e.g. container full), @@ -362,6 +313,7 @@ private void generateParityCells() throws IOException { } private void writeDataCells(ECChunkBuffers stripe) throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.allocateBlockIfNeeded(); ByteBuffer[] dataCells = stripe.getDataBuffers(); for (int i = 0; i < numDataBlks; i++) { @@ -374,6 +326,7 @@ private void writeDataCells(ECChunkBuffers stripe) throws IOException { private void writeParityCells(ECChunkBuffers stripe) { // Move the stream entry cursor to parity block index + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool .getCurrentStreamEntry().forceToFirstParityBlock(); ByteBuffer[] parityCells = stripe.getParityBuffers(); @@ -413,7 +366,7 @@ private void handleOutputStreamWrite(ByteBuffer buffer, boolean isParity) { // The len cannot be bigger than cell buffer size. assert buffer.limit() <= ecChunkSize : "The buffer size: " + buffer.limit() + " should not exceed EC chunk size: " + ecChunkSize; - writeToOutputStream(blockOutputStreamEntryPool.getCurrentStreamEntry(), + writeToOutputStream(getBlockOutputStreamEntryPool().getCurrentStreamEntry(), buffer.array(), buffer.limit(), 0, isParity); } catch (Exception e) { markStreamAsFailed(e); @@ -449,8 +402,7 @@ private void handleException(BlockOutputStreamEntry streamEntry, Preconditions.checkNotNull(t); boolean containerExclusionException = checkIfContainerToExclude(t); if (containerExclusionException) { - blockOutputStreamEntryPool.getExcludeList() - .addPipeline(streamEntry.getPipeline().getId()); + getBlockOutputStreamEntryPool().getExcludeList().addPipeline(streamEntry.getPipeline().getId()); } markStreamAsFailed(exception); } @@ -460,7 +412,7 @@ private void markStreamClosed() { } private void markStreamAsFailed(Exception e) { - blockOutputStreamEntryPool.getCurrentStreamEntry().markFailed(e); + getBlockOutputStreamEntryPool().getCurrentStreamEntry().markFailed(e); } @Override @@ -470,6 +422,7 @@ public void flush() { private void closeCurrentStreamEntry() throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); if (!blockOutputStreamEntryPool.isEmpty()) { while (true) { try { @@ -503,6 +456,7 @@ public void close() throws IOException { return; } closed = true; + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); try { if (!closing) { // If stripe buffer is not empty, encode and flush the stripe. @@ -539,7 +493,6 @@ public void close() throws IOException { } catch (InterruptedException e) { throw new IOException("Flushing thread was interrupted", e); } finally { - flushExecutor.shutdownNow(); closeCurrentStreamEntry(); blockOutputStreamEntryPool.cleanup(); } @@ -614,20 +567,6 @@ public static void padBufferToLimit(ByteBuffer buf, int limit) { buf.position(limit); } - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return blockOutputStreamEntryPool.getCommitUploadPartInfo(); - } - - @VisibleForTesting - public ExcludeList getExcludeList() { - return blockOutputStreamEntryPool.getExcludeList(); - } - - @Override - public Map getMetadata() { - return this.blockOutputStreamEntryPool.getMetadata(); - } - /** * Builder class of ECKeyOutputStream. */ @@ -682,9 +621,8 @@ public ECKeyOutputStream build() { */ private void checkNotClosed() throws IOException { if (closing || closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " - + blockOutputStreamEntryPool.getKeyName()); + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + + getBlockOutputStreamEntryPool().getKeyName()); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 6b6be1abd40e..15e84cf37037 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -25,6 +25,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -59,9 +60,9 @@ private static List createStreams( OmKeyInfo keyInfo, List blockInfos, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { boolean isHsyncFile = keyInfo.getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID); List partStreams = new ArrayList<>(); for (int i = 0; i < blockInfos.size(); i++) { @@ -99,9 +100,9 @@ private static List createStreams( omKeyLocationInfo, omKeyLocationInfo.getPipeline(), omKeyLocationInfo.getToken(), - verifyChecksum, xceiverClientFactory, - retry); + retry, + config); partStreams.add(stream); } return partStreams; @@ -125,13 +126,13 @@ private static BlockLocationInfo getBlockLocationInfo(OmKeyInfo newKeyInfo, private static LengthInputStream getFromOmKeyInfo( OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, BlockInputStreamFactory blockStreamFactory, - List locationInfos) { + List locationInfos, + OzoneClientConfig config) { List streams = createStreams(keyInfo, - locationInfos, xceiverClientFactory, verifyChecksum, retryFunction, - blockStreamFactory); + locationInfos, xceiverClientFactory, retryFunction, + blockStreamFactory, config); KeyInputStream keyInputStream = new KeyInputStream(keyInfo.getKeyName(), streams); return new LengthInputStream(keyInputStream, keyInputStream.getLength()); @@ -142,20 +143,22 @@ private static LengthInputStream getFromOmKeyInfo( */ public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo, XceiverClientFactory xceiverClientFactory, - boolean verifyChecksum, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + Function retryFunction, + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); - return getFromOmKeyInfo(keyInfo, xceiverClientFactory, verifyChecksum, - retryFunction, blockStreamFactory, keyLocationInfos); + return getFromOmKeyInfo(keyInfo, xceiverClientFactory, + retryFunction, blockStreamFactory, keyLocationInfos, config); } public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, - XceiverClientFactory xceiverClientFactory, boolean verifyChecksum, + XceiverClientFactory xceiverClientFactory, Function retryFunction, - BlockInputStreamFactory blockStreamFactory) { + BlockInputStreamFactory blockStreamFactory, + OzoneClientConfig config) { List keyLocationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); @@ -170,7 +173,8 @@ public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo, // Create a KeyInputStream for each part. for (List locationInfo : partsToBlocksMap.values()) { lengthInputStreams.add(getFromOmKeyInfo(keyInfo, xceiverClientFactory, - verifyChecksum, retryFunction, blockStreamFactory, locationInfo)); + retryFunction, blockStreamFactory, locationInfo, + config)); } return lengthInputStreams; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 8b128e9cd945..d9e735cd7c8c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -24,7 +24,9 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutorService; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.hadoop.fs.FSExceptionMessages; @@ -69,7 +71,6 @@ public class KeyOutputStream extends OutputStream implements Syncable, KeyMetadataAware { - private OzoneClientConfig config; private final ReplicationConfig replication; /** @@ -105,11 +106,8 @@ enum StreamAction { */ private boolean atomicKeyCreation; - public KeyOutputStream(ReplicationConfig replicationConfig, - ContainerClientMetrics clientMetrics, OzoneClientConfig clientConfig, - StreamBufferArgs streamBufferArgs) { + public KeyOutputStream(ReplicationConfig replicationConfig, BlockOutputStreamEntryPool blockOutputStreamEntryPool) { this.replication = replicationConfig; - this.config = clientConfig; closed = false; this.retryPolicyMap = HddsClientUtils.getExceptionList() .stream() @@ -117,18 +115,16 @@ public KeyOutputStream(ReplicationConfig replicationConfig, e -> RetryPolicies.TRY_ONCE_THEN_FAIL)); retryCount = 0; offset = 0; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool(clientMetrics, clientConfig, streamBufferArgs); + this.blockOutputStreamEntryPool = blockOutputStreamEntryPool; } - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); + protected BlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return blockOutputStreamEntryPool; } @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); + public List getStreamEntries() { + return blockOutputStreamEntryPool.getStreamEntries(); } @VisibleForTesting @@ -146,39 +142,18 @@ public long getClientID() { return clientID; } - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public KeyOutputStream( - OzoneClientConfig config, - OpenKeySession handler, - XceiverClientFactory xceiverClientManager, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, boolean isMultipart, - boolean unsafeByteBufferConversion, - ContainerClientMetrics clientMetrics, - boolean atomicKeyCreation, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.replication = replicationConfig; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool( - config, - omClient, - requestId, replicationConfig, - uploadID, partNumber, - isMultipart, handler.getKeyInfo(), - unsafeByteBufferConversion, - xceiverClientManager, - handler.getId(), - clientMetrics, streamBufferArgs); + public KeyOutputStream(Builder b) { + this.replication = b.replicationConfig; + this.blockOutputStreamEntryPool = new BlockOutputStreamEntryPool(b); + final OzoneClientConfig config = b.getClientConfig(); this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException( config.getMaxRetryCount(), config.getRetryInterval()); this.retryCount = 0; this.isException = false; this.writeOffset = 0; - this.clientID = handler.getId(); - this.atomicKeyCreation = atomicKeyCreation; - this.streamBufferArgs = streamBufferArgs; + this.clientID = b.getOpenHandler().getId(); + this.atomicKeyCreation = b.getAtomicKeyCreation(); + this.streamBufferArgs = b.getStreamBufferArgs(); } /** @@ -192,10 +167,8 @@ public KeyOutputStream( * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); } @@ -615,6 +588,7 @@ public static class Builder { private ContainerClientMetrics clientMetrics; private boolean atomicKeyCreation = false; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; public String getMultipartUploadID() { return multipartUploadID; @@ -728,21 +702,17 @@ public boolean getAtomicKeyCreation() { return atomicKeyCreation; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + public KeyOutputStream build() { - return new KeyOutputStream( - clientConfig, - openHandler, - xceiverManager, - omClient, - requestID, - replicationConfig, - multipartUploadID, - multipartNumber, - isMultipartKey, - unsafeByteBufferConversion, - clientMetrics, - atomicKeyCreation, - streamBufferArgs); + return new KeyOutputStream(this); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 46e7e20b51b0..492cd31b6722 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1092,6 +1092,19 @@ Map> getKeysEveryReplicas( String createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException; + /** + * Rename snapshot. + * + * @param volumeName Vol to be used + * @param bucketName Bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException; + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 7e1e6fe45602..0806ffb84725 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -145,6 +145,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -155,7 +156,6 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -195,6 +195,8 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + private static final int WRITE_POOL_MIN_SIZE = 1; + private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; private final XceiverClientFactory xceiverClientManager; @@ -213,8 +215,9 @@ public class RpcClient implements ClientProtocol { private final ByteBufferPool byteBufferPool; private final BlockInputStreamFactory blockInputStreamFactory; private final OzoneManagerVersion omVersion; - private volatile ExecutorService ecReconstructExecutor; + private final MemoizedSupplier ecReconstructExecutor; private final ContainerClientMetrics clientMetrics; + private final MemoizedSupplier writeExecutor; private final AtomicBoolean isS3GRequest = new AtomicBoolean(false); /** @@ -237,6 +240,11 @@ public RpcClient(ConfigurationSource conf, String omServiceId) this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); + this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, clientConfig.getEcReconstructStripeReadPoolLimit(), + "ec-reconstruct-reader-TID-%d")); + this.writeExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + WRITE_POOL_MIN_SIZE, Integer.MAX_VALUE, "client-write-TID-%d")); OmTransport omTransport = createOmTransport(omServiceId); OzoneManagerProtocolClientSideTranslatorPB @@ -311,8 +319,10 @@ public void onRemoval( }).build(); this.byteBufferPool = new ElasticByteBufferPool(); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, this::getECReconstructExecutor); + .getInstance(byteBufferPool, ecReconstructExecutor); this.clientMetrics = ContainerClientMetrics.acquire(); + + TracingUtil.initTracing("client", conf); } public XceiverClientFactory getXceiverClientManager() { @@ -423,15 +433,16 @@ public void createVolume(String volumeName, VolumeArgs volArgs) List listOfAcls = new ArrayList<>(); //User ACL listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - owner, userRights, ACCESS)); + owner, ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(owner).getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); + new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, groupRights))); //ACLs from VolumeArgs - if (volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + listOfAcls.addAll(volumeAcls); } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); @@ -626,8 +637,7 @@ public void createBucket( ugi.getShortUserName() : bucketArgs.getOwner(); } - Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? - Boolean.FALSE : bucketArgs.getVersioning(); + boolean isVersionEnabled = bucketArgs.getVersioning(); StorageType storageType = bucketArgs.getStorageType() == null ? StorageType.DEFAULT : bucketArgs.getStorageType(); BucketLayout bucketLayout = bucketArgs.getBucketLayout(); @@ -748,10 +758,7 @@ private List getAclList() { * @return OzoneAcl */ private OzoneAcl linkBucketDefaultAcl() { - BitSet aclRights = new BitSet(); - aclRights.set(READ.ordinal()); - aclRights.set(WRITE.ordinal()); - return new OzoneAcl(ACLIdentityType.WORLD, "", aclRights, ACCESS); + return new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE); } /** @@ -964,6 +971,31 @@ public String createSnapshot(String volumeName, bucketName, snapshotName); } + /** + * Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + @Override + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + Preconditions.checkArgument(StringUtils.isNotBlank(volumeName), + "volume can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(bucketName), + "bucket can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotOldName), + "old snapshot name can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotNewName), + "new snapshot name can't be null or empty."); + + ozoneManagerClient.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete Snapshot. * @param volumeName vol to be used @@ -1397,7 +1429,7 @@ public OzoneDataStreamOutput createStreamKey( if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } - HddsClientUtils.checkNotNull(keyName, replicationConfig); + HddsClientUtils.checkNotNull(keyName); OmKeyArgs.Builder builder = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -1752,9 +1784,11 @@ private OmKeyInfo getKeyInfo(OmKeyArgs keyArgs) throws IOException { @Override public void close() throws IOException { - if (ecReconstructExecutor != null) { - ecReconstructExecutor.shutdownNow(); - ecReconstructExecutor = null; + if (ecReconstructExecutor.isInitialized()) { + ecReconstructExecutor.get().shutdownNow(); + } + if (writeExecutor.isInitialized()) { + writeExecutor.get().shutdownNow(); } IOUtils.cleanupWithLogger(LOG, ozoneManagerClient, xceiverClientManager); keyProviderCache.invalidateAll(); @@ -1782,7 +1816,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, HddsClientUtils.checkNotNull(keyName); if (omVersion .compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) { - if (replicationConfig.getReplicationType() + if (replicationConfig != null && replicationConfig.getReplicationType() == HddsProtos.ReplicationType.EC) { throw new IOException("Can not set the replication of the file to" + " Erasure Coded replication, as OzoneManager does not support" @@ -1952,7 +1986,8 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, ozoneMultipartUploadPartListParts.addPart( new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); + omPartInfo.getModificationTime(), omPartInfo.getSize(), + omPartInfo.getETag())); } return ozoneMultipartUploadPartListParts; @@ -2224,9 +2259,8 @@ private OzoneInputStream createInputStream( if (feInfo == null) { LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); try { final GDPRSymmetricKey gk = getGDPRSymmetricKey( keyInfo.getMetadata(), Cipher.DECRYPT_MODE); @@ -2241,9 +2275,8 @@ private OzoneInputStream createInputStream( } else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) { // Regular Key with FileEncryptionInfo LengthInputStream lengthInputStream = KeyInputStream - .getFromOmKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); final CryptoInputStream cryptoIn = new CryptoInputStream(lengthInputStream.getWrappedStream(), @@ -2253,9 +2286,8 @@ private OzoneInputStream createInputStream( } else { // Multipart Key with FileEncryptionInfo List lengthInputStreams = KeyInputStream - .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, - clientConfig.isChecksumVerify(), retryFunction, - blockInputStreamFactory); + .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, retryFunction, + blockInputStreamFactory, clientConfig); final KeyProvider.KeyVersion decrypted = getDEK(feInfo); List cryptoInputStreams = new ArrayList<>(); @@ -2374,6 +2406,7 @@ private KeyOutputStream.Builder createKeyOutputStream( .setConfig(clientConfig) .setAtomicKeyCreation(isS3GRequest.get()) .setClientMetrics(clientMetrics) + .setExecutorServiceSupplier(writeExecutor) .setStreamBufferArgs(streamBufferArgs); } @@ -2495,26 +2528,11 @@ public void setTimes(OzoneObj obj, String keyName, long mtime, long atime) ozoneManagerClient.setTimes(builder.build(), mtime, atime); } - public ExecutorService getECReconstructExecutor() { - // local ref to a volatile to ensure access - // to a completed initialized object - ExecutorService executor = ecReconstructExecutor; - if (executor == null) { - synchronized (this) { - executor = ecReconstructExecutor; - if (executor == null) { - ecReconstructExecutor = new ThreadPoolExecutor( - EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - clientConfig.getEcReconstructStripeReadPoolLimit(), - 60, TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder() - .setNameFormat("ec-reconstruct-reader-TID-%d") - .build(), - new ThreadPoolExecutor.CallerRunsPolicy()); - executor = ecReconstructExecutor; - } - } - } - return executor; + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).setDaemon(true).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java index 393e8cdb3112..caa3996a09ff 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockDatanodeStorage.java @@ -162,10 +162,10 @@ public void writeChunk( if (data.containsKey(blockKey)) { block = data.get(blockKey); assert block.size() == chunkInfo.getOffset(); - data.put(blockKey, block.concat(bytes)); + data.put(blockKey, block.concat(ByteString.copyFrom(bytes.asReadOnlyByteBuffer()))); } else { assert chunkInfo.getOffset() == 0; - data.put(blockKey, bytes); + data.put(blockKey, ByteString.copyFrom(bytes.asReadOnlyByteBuffer())); } fullBlockData diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java index 7e5de329d129..0d82f0f8bbb2 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java @@ -129,21 +129,26 @@ private ContainerProtos.ListBlockResponseProto listBlock(long containerID) { } private PutBlockResponseProto putBlock(PutBlockRequestProto putBlock) { + return PutBlockResponseProto.newBuilder() + .setCommittedBlockLength( + doPutBlock(putBlock.getBlockData())) + .build(); + } + + private GetCommittedBlockLengthResponseProto doPutBlock( + ContainerProtos.BlockData blockData) { long length = 0; - for (ChunkInfo chunk : putBlock.getBlockData().getChunksList()) { + for (ChunkInfo chunk : blockData.getChunksList()) { length += chunk.getLen(); } - datanodeStorage.putBlock(putBlock.getBlockData().getBlockID(), - putBlock.getBlockData()); + datanodeStorage.putBlock(blockData.getBlockID(), + blockData); - return PutBlockResponseProto.newBuilder() - .setCommittedBlockLength( - GetCommittedBlockLengthResponseProto.newBuilder() - .setBlockID(putBlock.getBlockData().getBlockID()) + return GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) .setBlockLength(length) - .build()) - .build(); + .build(); } private XceiverClientReply result( @@ -166,8 +171,15 @@ private WriteChunkResponseProto writeChunk( datanodeStorage .writeChunk(writeChunk.getBlockID(), writeChunk.getChunkData(), writeChunk.getData()); - return WriteChunkResponseProto.newBuilder() - .build(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + builder.setCommittedBlockLength(doPutBlock(blockData)); + } + return builder.build(); } @Override diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java index 7760e88e484a..718e724e5854 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java @@ -63,10 +63,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( @@ -101,10 +101,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java index 6af5c4b4e0d8..4d4a1ab4cbae 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java @@ -20,8 +20,10 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; @@ -39,7 +41,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.MB; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -49,6 +50,8 @@ */ public class TestKeyInputStreamEC { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadAgainstLargeBlockGroup() throws IOException { int dataBlocks = 10; @@ -68,10 +71,13 @@ public void testReadAgainstLargeBlockGroup() throws IOException { BlockInputStreamFactory mockStreamFactory = mock(BlockInputStreamFactory.class); when(mockStreamFactory.create(any(), any(), any(), any(), - anyBoolean(), any(), any())).thenReturn(blockInputStream); + any(), any(), any())).thenReturn(blockInputStream); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); try (LengthInputStream kis = KeyInputStream.getFromOmKeyInfo(keyInfo, - null, true, null, mockStreamFactory)) { + null, null, mockStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = kis.read(buf, 0, 100); assertEquals(100, readBytes); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index c6e410bb45bd..d2f68f1e4d81 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -320,6 +320,7 @@ public static boolean isReadOnly( case SetRangerServiceVersion: case CreateSnapshot: case DeleteSnapshot: + case RenameSnapshot: case SnapshotMoveDeletedKeys: case SnapshotPurge: case RecoverLease: @@ -743,6 +744,47 @@ public static String normalizeKey(String keyName, return keyName; } + /** + * Normalizes a given path up to the bucket level. + * + * This method takes a path as input and normalises uptil the bucket level. + * It handles empty, removes leading slashes, and splits the path into + * segments. It then extracts the volume and bucket names, forming a + * normalized path with a single slash. Finally, any remaining segments are + * joined as the key name, returning the complete standardized path. + * + * @param path The path string to be normalized. + * @return The normalized path string. + */ + public static String normalizePathUptoBucket(String path) { + if (path == null || path.isEmpty()) { + return OM_KEY_PREFIX; // Handle empty path + } + + // Remove leading slashes + path = path.replaceAll("^/*", ""); + + String[] segments = path.split(OM_KEY_PREFIX, -1); + + String volumeName = segments[0]; + String bucketName = segments.length > 1 ? segments[1] : ""; + + // Combine volume and bucket. + StringBuilder normalizedPath = new StringBuilder(volumeName); + if (!bucketName.isEmpty()) { + normalizedPath.append(OM_KEY_PREFIX).append(bucketName); + } + + // Add remaining segments as the key + if (segments.length > 2) { + normalizedPath.append(OM_KEY_PREFIX).append( + String.join(OM_KEY_PREFIX, + Arrays.copyOfRange(segments, 2, segments.length))); + } + + return normalizedPath.toString(); + } + /** * For a given service ID, return list of configured OM hosts. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 7ca0634949c0..8ab39a9ff99b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -19,7 +19,8 @@ package org.apache.hadoop.ozone; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo.OzoneAclScope; @@ -29,10 +30,15 @@ import java.util.ArrayList; import java.util.BitSet; +import java.util.EnumSet; import java.util.List; import java.util.Objects; +import java.util.function.Consumer; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; + /** * OzoneACL classes define bucket ACLs used in OZONE. * @@ -43,93 +49,80 @@ *

  • world::rw * */ -@JsonIgnoreProperties(value = {"aclBitSet"}) public class OzoneAcl { private static final String ACL_SCOPE_REGEX = ".*\\[(ACCESS|DEFAULT)\\]"; - private ACLIdentityType type; - private String name; - private BitSet aclBitSet; - private AclScope aclScope; + private final ACLIdentityType type; + private final String name; + @JsonIgnore + private final BitSet aclBitSet; + private final AclScope aclScope; private static final List EMPTY_LIST = new ArrayList<>(0); - public static final BitSet ZERO_BITSET = new BitSet(0); - /** - * Default constructor. - */ - public OzoneAcl() { + public OzoneAcl(ACLIdentityType type, String name, AclScope scope, ACLType... acls) { + this(type, name, scope, bitSetOf(acls)); } - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acl - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, ACLType acl, - AclScope scope) { - this.name = name; - this.aclBitSet = new BitSet(ACLType.getNoOfAcls()); - aclBitSet.set(acl.ordinal(), true); + public OzoneAcl(ACLIdentityType type, String name, AclScope scope, EnumSet acls) { + this(type, name, scope, bitSetOf(acls.toArray(new ACLType[0]))); + } + + private OzoneAcl(ACLIdentityType type, String name, AclScope scope, BitSet acls) { + this.name = validateNameAndType(type, name); this.type = type; - if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { - if (!name.equals(ACLIdentityType.WORLD.name()) && - !name.equals(ACLIdentityType.ANONYMOUS.name()) && - name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); + this.aclScope = scope; + this.aclBitSet = acls; + } + + private static BitSet bitSetOf(ACLType... acls) { + BitSet bits = new BitSet(); + if (acls != null && acls.length > 0) { + for (ACLType acl : acls) { + bits.set(acl.ordinal()); } - // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); } - if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) - && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); - } - aclScope = scope; + return bits; } - /** - * Constructor for OzoneAcl. - * - * @param type - Type - * @param name - Name of user - * @param acls - Rights - * @param scope - AclScope - */ - public OzoneAcl(ACLIdentityType type, String name, BitSet acls, - AclScope scope) { - Objects.requireNonNull(type); + private static BitSet validateAndCopy(BitSet acls) { Objects.requireNonNull(acls); if (acls.cardinality() > ACLType.getNoOfAcls()) { throw new IllegalArgumentException("Acl bitset passed has unexpected " + - "size. bitset size:" + acls.cardinality() + ", bitset:" - + acls.toString()); + "size. bitset size:" + acls.cardinality() + ", bitset:" + acls); } - this.aclBitSet = (BitSet) acls.clone(); - this.name = name; - this.type = type; + return copyBitSet(acls); + } + + private static BitSet copyBitSet(BitSet acls) { + return (BitSet) acls.clone(); + } + + private static String validateNameAndType(ACLIdentityType type, String name) { + Objects.requireNonNull(type); + if (type == ACLIdentityType.WORLD || type == ACLIdentityType.ANONYMOUS) { if (!name.equals(ACLIdentityType.WORLD.name()) && !name.equals(ACLIdentityType.ANONYMOUS.name()) && name.length() != 0) { - throw new IllegalArgumentException("Unexpected name:{" + name + - "} for type WORLD, ANONYMOUS. It should be WORLD & " + - "ANONYMOUS respectively."); + throw new IllegalArgumentException("Expected name " + type.name() + ", but was: " + name); } // For type WORLD and ANONYMOUS we allow only one acl to be set. - this.name = type.name(); + return type.name(); } + if (((type == ACLIdentityType.USER) || (type == ACLIdentityType.GROUP)) && (name.length() == 0)) { - throw new IllegalArgumentException("User or group name is required"); + throw new IllegalArgumentException(type + " name is required"); } - aclScope = scope; + + return name; + } + + public OzoneAcl withScope(final AclScope scope) { + return scope == aclScope ? this + : new OzoneAcl(type, name, scope, copyBitSet(aclBitSet)); } /** @@ -151,7 +144,6 @@ public static OzoneAcl parseAcl(String acl) } ACLIdentityType aclType = ACLIdentityType.valueOf(parts[0].toUpperCase()); - BitSet acls = new BitSet(ACLType.getNoOfAcls()); String bits = parts[2]; @@ -166,14 +158,14 @@ public static OzoneAcl parseAcl(String acl) parts[2].indexOf("]"))); } - // Set all acl bits. + EnumSet acls = EnumSet.noneOf(ACLType.class); for (char ch : bits.toCharArray()) { - acls.set(ACLType.getACLRight(String.valueOf(ch)).ordinal()); + acls.add(ACLType.getACLRight(String.valueOf(ch))); } // TODO : Support sanitation of these user names by calling into // userAuth Interface. - return new OzoneAcl(aclType, parts[1], acls, aclScope); + return new OzoneAcl(aclType, parts[1], aclScope, acls); } /** @@ -205,44 +197,14 @@ public static OzoneAclInfo toProtobuf(OzoneAcl acl) { .setName(acl.getName()) .setType(OzoneAclType.valueOf(acl.getType().name())) .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name())) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); + .setRights(ByteString.copyFrom(acl.getAclByteArray())); return builder.build(); } public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, - AclScope.valueOf(protoAcl.getAclScope().name())); - } - - /** - * Helper function to convert a proto message of type {@link OzoneAclInfo} - * to {@link OzoneAcl} with acl scope of type ACCESS. - * - * @param protoAcl - * @return OzoneAcl - * */ - public static OzoneAcl fromProtobufWithAccessType(OzoneAclInfo protoAcl) { - BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); - return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, AclScope.ACCESS); - } - - /** - * Helper function to convert an {@link OzoneAcl} to proto message of type - * {@link OzoneAclInfo} with acl scope of type ACCESS. - * - * @param acl - * @return OzoneAclInfo - * */ - public static OzoneAclInfo toProtobufWithAccessType(OzoneAcl acl) { - OzoneAclInfo.Builder builder = OzoneAclInfo.newBuilder() - .setName(acl.getName()) - .setType(OzoneAclType.valueOf(acl.getType().name())) - .setAclScope(OzoneAclScope.ACCESS) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); - return builder.build(); + return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), protoAcl.getName(), + AclScope.valueOf(protoAcl.getAclScope().name()), validateAndCopy(aclRights)); } public AclScope getAclScope() { @@ -266,7 +228,7 @@ public String toString() { */ @Override public int hashCode() { - return Objects.hash(this.getName(), this.getAclBitSet(), + return Objects.hash(this.getName(), aclBitSet, this.getType().toString(), this.getAclScope()); } @@ -279,17 +241,45 @@ public String getName() { return name; } - /** - * Returns Rights. - * - * @return - Rights - */ - public BitSet getAclBitSet() { - return aclBitSet; + @JsonIgnore + public boolean isEmpty() { + return aclBitSet.isEmpty(); + } + + @VisibleForTesting + public boolean isSet(ACLType acl) { + return aclBitSet.get(acl.ordinal()); + } + + public boolean checkAccess(ACLType acl) { + return (isSet(acl) || isSet(ALL)) && !isSet(NONE); + } + + public OzoneAcl add(OzoneAcl other) { + return apply(bits -> bits.or(other.aclBitSet)); + } + + public OzoneAcl remove(OzoneAcl other) { + return apply(bits -> bits.andNot(other.aclBitSet)); + } + + /** @return copy of this {@code OzoneAcl} after applying the given {@code op}, + * or this instance if {@code op} makes no difference */ + private OzoneAcl apply(Consumer op) { + final BitSet cloneBits = copyBitSet(aclBitSet); + op.accept(cloneBits); + return cloneBits.equals(aclBitSet) + ? this + : new OzoneAcl(type, name, aclScope, cloneBits); + } + + @JsonIgnore + public byte[] getAclByteArray() { + return aclBitSet.toByteArray(); } public List getAclList() { - if (aclBitSet != null) { + if (aclBitSet != null) { return aclBitSet.stream().mapToObj(a -> ACLType.values()[a]).collect(Collectors.toList()); } @@ -315,24 +305,19 @@ public ACLIdentityType getType() { */ @Override public boolean equals(Object obj) { - if (obj == null) { - return false; + if (obj == this) { + return true; } - if (getClass() != obj.getClass()) { + if (obj == null || getClass() != obj.getClass()) { return false; } OzoneAcl otherAcl = (OzoneAcl) obj; return otherAcl.getName().equals(this.getName()) && otherAcl.getType().equals(this.getType()) && - otherAcl.getAclBitSet().equals(this.getAclBitSet()) && + Objects.equals(aclBitSet, otherAcl.aclBitSet) && otherAcl.getAclScope().equals(this.getAclScope()); } - public OzoneAcl setAclScope(AclScope scope) { - this.aclScope = scope; - return this; - } - /** * Scope of ozone acl. * */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index ec001587de54..b1f9f5bd6890 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -28,21 +28,9 @@ * Ozone Manager Constants. */ public final class OMConfigKeys { - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE = - "ozone.om.snapshot.sst_dumptool.pool.size"; - public static final int - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT = 1; - public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB = "ozone.om.snapshot.load.native.lib"; public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE = - "ozone.om.snapshot.sst_dumptool.buffer.size"; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT = "8KB"; - /** * Never constructed. */ @@ -294,6 +282,8 @@ private OMConfigKeys() { + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" + ".kerberos.principal"; + public static final String OZONE_OM_KERBEROS_PRINCIPAL_PATTERN_KEY = + "ozone.om.kerberos.principal.pattern"; public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = "ozone.om.http.auth.kerberos.keytab"; public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index 9c9a5027774f..044cc17f5e57 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -28,29 +28,37 @@ /** * Lightweight OmKeyInfo class. */ -public class BasicOmKeyInfo { - - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private long creationTime; - private long modificationTime; - private ReplicationConfig replicationConfig; - private boolean isFile; - - @SuppressWarnings("parameternumber") - public BasicOmKeyInfo(String volumeName, String bucketName, String keyName, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, boolean isFile) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - this.isFile = isFile; +public final class BasicOmKeyInfo { + + private final String volumeName; + private final String bucketName; + private final String keyName; + private final long dataSize; + private final long creationTime; + private final long modificationTime; + private final ReplicationConfig replicationConfig; + private final boolean isFile; + + private BasicOmKeyInfo(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.isFile = b.isFile; + } + + private BasicOmKeyInfo(OmKeyInfo b) { + this.volumeName = b.getVolumeName(); + this.bucketName = b.getBucketName(); + this.keyName = b.getKeyName(); + this.dataSize = b.getDataSize(); + this.creationTime = b.getCreationTime(); + this.modificationTime = b.getModificationTime(); + this.replicationConfig = b.getReplicationConfig(); + this.isFile = b.isFile(); } public String getVolumeName() { @@ -139,8 +147,7 @@ public Builder setIsFile(boolean isFile) { } public BasicOmKeyInfo build() { - return new BasicOmKeyInfo(volumeName, bucketName, keyName, dataSize, - creationTime, modificationTime, replicationConfig, isFile); + return new BasicOmKeyInfo(this); } } @@ -233,14 +240,6 @@ public int hashCode() { } public static BasicOmKeyInfo fromOmKeyInfo(OmKeyInfo omKeyInfo) { - return new BasicOmKeyInfo( - omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), - omKeyInfo.getKeyName(), - omKeyInfo.getDataSize(), - omKeyInfo.getCreationTime(), - omKeyInfo.getModificationTime(), - omKeyInfo.getReplicationConfig(), - omKeyInfo.isFile()); + return new BasicOmKeyInfo(omKeyInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java new file mode 100644 index 000000000000..a97ca6816828 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/LeaseKeyInfo.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +/** + * This class represents LeaseKeyInfo. + */ +public class LeaseKeyInfo { + private final OmKeyInfo keyInfo; + /** + * isKeyInfo = true indicates keyInfo is from keyTable. + * isKeyInfo = false indicates keyInfo is from openKeyTable. + */ + private boolean isKeyInfo; + + public LeaseKeyInfo(OmKeyInfo info, boolean isKeyInfo) { + this.keyInfo = info; + this.isKeyInfo = isKeyInfo; + } + + public boolean getIsKeyInfo() { + return this.isKeyInfo; + } + + public OmKeyInfo getKeyInfo() { + return keyInfo; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index e382377dff45..40c28ed5adee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -44,44 +44,40 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { /** * Bucket Version flag. */ - private Boolean isVersionEnabled; + private final Boolean isVersionEnabled; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Bucket encryption key info if encryption is enabled. */ - private BucketEncryptionKeyInfo bekInfo; - private long quotaInBytes = OzoneConsts.QUOTA_RESET; - private long quotaInNamespace = OzoneConsts.QUOTA_RESET; - private boolean quotaInBytesSet = false; - private boolean quotaInNamespaceSet = false; - private DefaultReplicationConfig defaultReplicationConfig = null; + private final BucketEncryptionKeyInfo bekInfo; + private final long quotaInBytes; + private final long quotaInNamespace; + private final boolean quotaInBytesSet; + private final boolean quotaInNamespaceSet; + private final DefaultReplicationConfig defaultReplicationConfig; /** * Bucket Owner Name. */ - private String ownerName; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketArgs(String volumeName, String bucketName, - Boolean isVersionEnabled, StorageType storageType, - Map metadata, String ownerName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.metadata = metadata; - this.ownerName = ownerName; + private final String ownerName; + + private OmBucketArgs(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.ownerName = b.ownerName; + this.defaultReplicationConfig = b.defaultReplicationConfig; + this.quotaInBytesSet = b.quotaInBytesSet; + this.quotaInBytes = quotaInBytesSet ? b.quotaInBytes : OzoneConsts.QUOTA_RESET; + this.quotaInNamespaceSet = b.quotaInNamespaceSet; + this.quotaInNamespace = quotaInNamespaceSet ? b.quotaInNamespace : OzoneConsts.QUOTA_RESET; + this.bekInfo = b.bekInfo; } /** @@ -149,7 +145,6 @@ public boolean hasQuotaInNamespace() { /** * Returns Bucket default replication config. - * @return */ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; @@ -159,30 +154,6 @@ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { return bekInfo; } - /** - * Sets the Bucket default replication config. - */ - private void setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - this.defaultReplicationConfig = defaultReplicationConfig; - } - - private void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytesSet = true; - this.quotaInBytes = quotaInBytes; - } - - private void setQuotaInNamespace(long quotaInNamespace) { - this.quotaInNamespaceSet = true; - this.quotaInNamespace = quotaInNamespace; - } - - @Deprecated - private void setBucketEncryptionKey( - BucketEncryptionKeyInfo bucketEncryptionKey) { - this.bekInfo = bucketEncryptionKey; - } - /** * Returns Bucket Owner Name. * @@ -206,7 +177,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.VOLUME, this.volumeName); auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, String.valueOf(this.isVersionEnabled)); if (this.storageType != null) { @@ -215,18 +186,38 @@ public Map toAuditMap() { if (this.ownerName != null) { auditMap.put(OzoneConsts.OWNER, this.ownerName); } + if (this.quotaInBytesSet && quotaInBytes > 0 || + (this.quotaInBytes != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, + String.valueOf(this.quotaInBytes)); + } + if (this.quotaInNamespaceSet && quotaInNamespace > 0 || + (this.quotaInNamespace != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); + } + if (this.bekInfo != null) { + auditMap.put(OzoneConsts.BUCKET_ENCRYPTION_KEY, + this.bekInfo.getKeyName()); + } + if (this.defaultReplicationConfig != null) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, String.valueOf( + this.defaultReplicationConfig.getType())); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + this.defaultReplicationConfig.getReplicationConfig() + .getReplication()); + } return auditMap; } /** * Builder for OmBucketArgs. */ - public static class Builder { + public static class Builder extends WithMetadata.Builder { private String volumeName; private String bucketName; private Boolean isVersionEnabled; private StorageType storageType; - private Map metadata; private boolean quotaInBytesSet = false; private long quotaInBytes; private boolean quotaInNamespaceSet = false; @@ -259,12 +250,15 @@ public Builder setIsVersionEnabled(Boolean versionFlag) { @Deprecated public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { - this.bekInfo = info; + if (info == null || info.getKeyName() != null) { + this.bekInfo = info; + } return this; } - public Builder addMetadata(Map metadataMap) { - this.metadata = metadataMap; + @Override + public Builder addAllMetadata(Map map) { + super.addAllMetadata(map); return this; } @@ -303,20 +297,7 @@ public Builder setOwnerName(String owner) { public OmBucketArgs build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - OmBucketArgs omBucketArgs = - new OmBucketArgs(volumeName, bucketName, isVersionEnabled, - storageType, metadata, ownerName); - omBucketArgs.setDefaultReplicationConfig(defaultReplicationConfig); - if (quotaInBytesSet) { - omBucketArgs.setQuotaInBytes(quotaInBytes); - } - if (quotaInNamespaceSet) { - omBucketArgs.setQuotaInNamespace(quotaInNamespace); - } - if (bekInfo != null && bekInfo.getKeyName() != null) { - omBucketArgs.setBucketEncryptionKey(bekInfo); - } - return omBucketArgs; + return new OmBucketArgs(this); } } @@ -348,7 +329,7 @@ public BucketArgs getProtobuf() { builder.setOwnerName(ownerName); } - if (bekInfo != null && bekInfo.getKeyName() != null) { + if (bekInfo != null) { builder.setBekInfo(OMPBHelper.convert(bekInfo)); } @@ -357,39 +338,42 @@ public BucketArgs getProtobuf() { /** * Parses BucketInfo protobuf and creates OmBucketArgs. - * @param bucketArgs * @return instance of OmBucketArgs */ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - OmBucketArgs omBucketArgs = - new OmBucketArgs(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.hasIsVersionEnabled() ? - bucketArgs.getIsVersionEnabled() : null, - bucketArgs.hasStorageType() ? StorageType.valueOf( - bucketArgs.getStorageType()) : null, - KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()), - bucketArgs.hasOwnerName() ? - bucketArgs.getOwnerName() : null); - // OmBucketArgs ctor already has more arguments, so setting the default - // replication config separately. + final OmBucketArgs.Builder builder = newBuilder() + .setVolumeName(bucketArgs.getVolumeName()) + .setBucketName(bucketArgs.getBucketName()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList())); + + if (bucketArgs.hasIsVersionEnabled()) { + builder.setIsVersionEnabled(bucketArgs.getIsVersionEnabled()); + } + if (bucketArgs.hasStorageType()) { + builder.setStorageType(StorageType.valueOf(bucketArgs.getStorageType())); + } + if (bucketArgs.hasOwnerName()) { + builder.setOwnerName(bucketArgs.getOwnerName()); + } + if (bucketArgs.hasDefaultReplicationConfig()) { - omBucketArgs.setDefaultReplicationConfig( + builder.setDefaultReplicationConfig( DefaultReplicationConfig.fromProto( bucketArgs.getDefaultReplicationConfig())); } if (bucketArgs.hasQuotaInBytes()) { - omBucketArgs.setQuotaInBytes(bucketArgs.getQuotaInBytes()); + builder.setQuotaInBytes(bucketArgs.getQuotaInBytes()); } if (bucketArgs.hasQuotaInNamespace()) { - omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); + builder.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); } if (bucketArgs.hasBekInfo()) { - omBucketArgs.setBucketEncryptionKey( + builder.setBucketEncryptionKey( OMPBHelper.convert(bucketArgs.getBekInfo())); } - return omBucketArgs; + + return builder.build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index cc811053eb27..8dfb2f88f98b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -19,14 +19,13 @@ import java.util.ArrayList; -import java.util.BitSet; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.stream.Collectors; +import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.Codec; @@ -109,68 +108,25 @@ public static Codec getCodec() { private String owner; - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param acls - list of ACLs. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - * @param creationTime - Bucket creation time. - * @param modificationTime - Bucket modification time. - * @param metadata - metadata. - * @param bekInfo - bucket encryption key info. - * @param sourceVolume - source volume for bucket links, null otherwise - * @param sourceBucket - source bucket for bucket links, null otherwise - * @param usedBytes - Bucket Quota Usage in bytes. - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - * @param bucketLayout Bucket Layout. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - long modificationTime, - long objectID, - long updateID, - Map metadata, - BucketEncryptionKeyInfo bekInfo, - String sourceVolume, - String sourceBucket, - long usedBytes, - long usedNamespace, - long quotaInBytes, - long quotaInNamespace, - BucketLayout bucketLayout, - String owner, - DefaultReplicationConfig defaultReplicationConfig) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.acls = acls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; - this.metadata = metadata; - this.bekInfo = bekInfo; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.usedBytes = usedBytes; - this.usedNamespace = usedNamespace; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private OmBucketInfo(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.acls = b.acls; + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.bekInfo = b.bekInfo; + this.sourceVolume = b.sourceVolume; + this.sourceBucket = b.sourceBucket; + this.usedBytes = b.usedBytes; + this.usedNamespace = b.usedNamespace; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.bucketLayout = b.bucketLayout; + this.owner = b.owner; + this.defaultReplicationConfig = b.defaultReplicationConfig; } /** @@ -194,7 +150,7 @@ public String getBucketName() { * @return {@literal List} */ public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } /** @@ -351,7 +307,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.BUCKET_LAYOUT, String.valueOf(this.bucketLayout)); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.ACLS, (this.acls != null) ? this.acls.toString() : null); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, @@ -370,6 +326,17 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.USED_BYTES, String.valueOf(this.usedBytes)); auditMap.put(OzoneConsts.USED_NAMESPACE, String.valueOf(this.usedNamespace)); + auditMap.put(OzoneConsts.OWNER, this.owner); + auditMap.put(OzoneConsts.REPLICATION_TYPE, + (this.defaultReplicationConfig != null) ? + String.valueOf(this.defaultReplicationConfig.getType()) : null); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + (this.defaultReplicationConfig != null) ? + this.defaultReplicationConfig.getReplicationConfig() + .getReplication() : null); + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes)); + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); return auditMap; } @@ -383,11 +350,6 @@ public OmBucketInfo copyObject() { builder.setBucketEncryptionKey(bekInfo.copy()); } - builder.acls.clear(); - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - if (defaultReplicationConfig != null) { builder.setDefaultReplicationConfig(defaultReplicationConfig.copy()); } @@ -396,20 +358,17 @@ public OmBucketInfo copyObject() { } public Builder toBuilder() { - return new Builder() + return new Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType) .setIsVersionEnabled(isVersionEnabled) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) .setBucketEncryptionKey(bekInfo) .setSourceVolume(sourceVolume) .setSourceBucket(sourceBucket) .setAcls(acls) - .addAllMetadata(metadata) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) .setQuotaInBytes(quotaInBytes) @@ -422,37 +381,30 @@ public Builder toBuilder() { /** * Builder for OmBucketInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String volumeName; private String bucketName; - private List acls; - private Boolean isVersionEnabled; - private StorageType storageType; + private final List acls = new ArrayList<>(); + private boolean isVersionEnabled; + private StorageType storageType = StorageType.DISK; private long creationTime; private long modificationTime; - private long objectID; - private long updateID; - private Map metadata; private BucketEncryptionKeyInfo bekInfo; private String sourceVolume; private String sourceBucket; private long usedBytes; private long usedNamespace; - private long quotaInBytes; - private long quotaInNamespace; - private BucketLayout bucketLayout; + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private BucketLayout bucketLayout = BucketLayout.DEFAULT; private String owner; private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - //Default values - this.acls = new ArrayList<>(); - this.isVersionEnabled = false; - this.storageType = StorageType.DISK; - this.metadata = new HashMap<>(); - this.quotaInBytes = OzoneConsts.QUOTA_RESET; - this.quotaInNamespace = OzoneConsts.QUOTA_RESET; - this.bucketLayout = BucketLayout.DEFAULT; + } + + private Builder(OmBucketInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -483,7 +435,7 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } - public Builder setIsVersionEnabled(Boolean versionFlag) { + public Builder setIsVersionEnabled(boolean versionFlag) { this.isVersionEnabled = versionFlag; return this; } @@ -503,25 +455,27 @@ public Builder setModificationTime(long modifiedOn) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } @@ -531,31 +485,37 @@ public Builder setBucketEncryptionKey( return this; } + /** @param volume - source volume for bucket links, null otherwise */ public Builder setSourceVolume(String volume) { this.sourceVolume = volume; return this; } + /** @param bucket - source bucket for bucket links, null otherwise */ public Builder setSourceBucket(String bucket) { this.sourceBucket = bucket; return this; } + /** @param quotaUsage - Bucket Quota Usage in bytes. */ public Builder setUsedBytes(long quotaUsage) { this.usedBytes = quotaUsage; return this; } + /** @param quotaUsage - Bucket Quota Usage in counts. */ public Builder setUsedNamespace(long quotaUsage) { this.usedNamespace = quotaUsage; return this; } + /** @param quota Bucket quota in bytes. */ public Builder setQuotaInBytes(long quota) { this.quotaInBytes = quota; return this; } + /** @param quota Bucket quota in counts. */ public Builder setQuotaInNamespace(long quota) { this.quotaInNamespace = quota; return this; @@ -585,13 +545,8 @@ public OmBucketInfo build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(acls); - Preconditions.checkNotNull(isVersionEnabled); Preconditions.checkNotNull(storageType); - return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, - storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo, sourceVolume, sourceBucket, usedBytes, - usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout, owner, - defaultReplicationConfig); + return new OmBucketInfo(this); } } @@ -607,11 +562,11 @@ public BucketInfo getProtobuf() { .setStorageType(storageType.toProto()) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace); if (bucketLayout != null) { @@ -739,13 +694,13 @@ public boolean equals(Object o) { Objects.equals(acls, that.acls) && Objects.equals(isVersionEnabled, that.isVersionEnabled) && storageType == that.storageType && - objectID == that.objectID && - updateID == that.updateID && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID() && usedBytes == that.usedBytes && usedNamespace == that.usedNamespace && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && - Objects.equals(metadata, that.metadata) && + Objects.equals(getMetadata(), that.getMetadata()) && Objects.equals(bekInfo, that.bekInfo) && Objects.equals(owner, that.owner) && Objects.equals(defaultReplicationConfig, that.defaultReplicationConfig); @@ -768,9 +723,9 @@ public String toString() { ", bekInfo=" + bekInfo + ", sourceVolume='" + sourceVolume + "'" + ", sourceBucket='" + sourceBucket + "'" + - ", objectID=" + objectID + - ", updateID=" + updateID + - ", metadata=" + metadata + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + + ", metadata=" + getMetadata() + ", usedBytes=" + usedBytes + ", usedNamespace=" + usedNamespace + ", quotaInBytes=" + quotaInBytes + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 3d1940bd7ce2..1c4a37631e3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -25,8 +25,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo; -import java.util.BitSet; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -56,12 +54,9 @@ public static Codec getCodec() { private final List acls; public OmDirectoryInfo(Builder builder) { + super(builder); this.name = builder.name; this.acls = builder.acls; - this.metadata = builder.metadata; - this.objectID = builder.objectID; - this.updateID = builder.updateID; - this.parentObjectID = builder.parentObjectID; this.creationTime = builder.creationTime; this.modificationTime = builder.modificationTime; } @@ -78,38 +73,34 @@ public static OmDirectoryInfo.Builder newBuilder() { /** * Builder for Directory Info. */ - public static class Builder { - private long parentObjectID; // pointer to parent directory - - private long objectID; - private long updateID; - + public static class Builder extends WithParentObjectId.Builder { private String name; private long creationTime; private long modificationTime; private final List acls; - private final Map metadata; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); } + @Override public Builder setParentObjectID(long parentObjectId) { - this.parentObjectID = parentObjectId; + super.setParentObjectID(parentObjectId); return this; } + @Override public Builder setObjectID(long objectId) { - this.objectID = objectId; + super.setObjectID(objectId); return this; } + @Override public Builder setUpdateID(long updateId) { - this.updateID = updateId; + super.setUpdateID(updateId); return this; } @@ -142,15 +133,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } @@ -164,10 +155,6 @@ public String toString() { return getPath() + ":" + getObjectID(); } - public long getParentObjectID() { - return parentObjectID; - } - public String getPath() { return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName(); } @@ -196,10 +183,10 @@ public DirectoryInfo getProtobuf() { DirectoryInfo.newBuilder().setName(name) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); if (acls != null) { pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); } @@ -245,16 +232,16 @@ public boolean equals(Object o) { return creationTime == omDirInfo.creationTime && modificationTime == omDirInfo.modificationTime && name.equals(omDirInfo.name) && - Objects.equals(metadata, omDirInfo.metadata) && + Objects.equals(getMetadata(), omDirInfo.getMetadata()) && Objects.equals(acls, omDirInfo.acls) && - objectID == omDirInfo.objectID && - updateID == omDirInfo.updateID && - parentObjectID == omDirInfo.parentObjectID; + getObjectID() == omDirInfo.getObjectID() && + getUpdateID() == omDirInfo.getUpdateID() && + getParentObjectID() == omDirInfo.getParentObjectID(); } @Override public int hashCode() { - return Objects.hash(objectID, parentObjectID, name); + return Objects.hash(getObjectID(), getParentObjectID(), name); } /** @@ -266,16 +253,13 @@ public OmDirectoryInfo copyObject() { .setName(name) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setParentObjectID(parentObjectID) - .setObjectID(objectID) - .setUpdateID(updateID); - - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); + .setAcls(acls) + .setParentObjectID(getParentObjectID()) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); - if (metadata != null) { - builder.addAllMetadata(metadata); + if (getMetadata() != null) { + builder.addAllMetadata(getMetadata()); } return builder.build(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 453dc3b957c0..132c39c4d00e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -45,39 +45,31 @@ public final class OmKeyArgs implements Auditable { private final boolean isMultipartKey; private final String multipartUploadID; private final int multipartUploadPartNumber; - private Map metadata; - private boolean sortDatanodesInPipeline; - private List acls; - private boolean latestVersionLocation; - private boolean recursive; - private boolean headOp; - private boolean forceUpdateContainerCacheFromSCM; - - @SuppressWarnings("parameternumber") - private OmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationConfig replicationConfig, - List locationInfoList, boolean isMultipart, - String uploadID, int partNumber, - Map metadataMap, - List acls, boolean sortDatanode, - boolean latestVersionLocation, boolean recursive, boolean headOp, - boolean forceUpdateContainerCacheFromSCM) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.replicationConfig = replicationConfig; - this.locationInfoList = locationInfoList; - this.isMultipartKey = isMultipart; - this.multipartUploadID = uploadID; - this.multipartUploadPartNumber = partNumber; - this.metadata = metadataMap; - this.acls = acls; - this.sortDatanodesInPipeline = sortDatanode; - this.latestVersionLocation = latestVersionLocation; - this.recursive = recursive; - this.headOp = headOp; - this.forceUpdateContainerCacheFromSCM = forceUpdateContainerCacheFromSCM; + private final Map metadata; + private final boolean sortDatanodesInPipeline; + private final List acls; + private final boolean latestVersionLocation; + private final boolean recursive; + private final boolean headOp; + private final boolean forceUpdateContainerCacheFromSCM; + + private OmKeyArgs(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.replicationConfig = b.replicationConfig; + this.locationInfoList = b.locationInfoList; + this.isMultipartKey = b.isMultipartKey; + this.multipartUploadID = b.multipartUploadID; + this.multipartUploadPartNumber = b.multipartUploadPartNumber; + this.metadata = b.metadata; + this.acls = b.acls; + this.sortDatanodesInPipeline = b.sortDatanodesInPipeline; + this.latestVersionLocation = b.latestVersionLocation; + this.recursive = b.recursive; + this.headOp = b.headOp; + this.forceUpdateContainerCacheFromSCM = b.forceUpdateContainerCacheFromSCM; } public boolean getIsMultipartKey() { @@ -124,10 +116,6 @@ public Map getMetadata() { return metadata; } - public void setMetadata(Map metadata) { - this.metadata = metadata; - } - public void setLocationInfoList(List locationInfoList) { this.locationInfoList = locationInfoList; } @@ -224,7 +212,7 @@ public static class Builder { private boolean isMultipartKey; private String multipartUploadID; private int multipartUploadPartNumber; - private Map metadata = new HashMap<>(); + private final Map metadata = new HashMap<>(); private boolean sortDatanodesInPipeline; private boolean latestVersionLocation; private List acls; @@ -326,12 +314,7 @@ public Builder setForceUpdateContainerCacheFromSCM(boolean value) { } public OmKeyArgs build() { - return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, - replicationConfig, locationInfoList, isMultipartKey, - multipartUploadID, - multipartUploadPartNumber, metadata, acls, - sortDatanodesInPipeline, latestVersionLocation, recursive, headOp, - forceUpdateContainerCacheFromSCM); + return new OmKeyArgs(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index c3a1a4a3d77b..5186dd65fd3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -19,12 +19,12 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.BitSet; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import com.google.common.collect.ImmutableList; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileChecksum; @@ -98,47 +98,23 @@ public static Codec getCodec(boolean ignorePipeline) { /** * ACL Information. */ - private List acls; - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long objectID, long updateID, FileChecksum fileChecksum) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.keyLocationVersions = versions; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - this.metadata = metadata; - this.encInfo = encInfo; - this.acls = acls; - this.objectID = objectID; - this.updateID = updateID; - this.fileChecksum = fileChecksum; - } - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - String fileName, List versions, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long parentObjectID, long objectID, long updateID, - FileChecksum fileChecksum, boolean isFile) { - this(volumeName, bucketName, keyName, versions, dataSize, - creationTime, modificationTime, replicationConfig, metadata, - encInfo, acls, objectID, updateID, fileChecksum); - this.fileName = fileName; - this.parentObjectID = parentObjectID; - this.isFile = isFile; + private final List acls; + + private OmKeyInfo(Builder b) { + super(b); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.keyLocationVersions = b.omKeyLocationInfoGroups; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.encInfo = b.encInfo; + this.acls = b.acls; + this.fileChecksum = b.fileChecksum; + this.fileName = b.fileName; + this.isFile = b.isFile; } public String getVolumeName() { @@ -181,11 +157,6 @@ public String getFileName() { return fileName; } - public long getParentObjectID() { - return parentObjectID; - } - - public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { return keyLocationVersions.size() == 0 ? null : keyLocationVersions.get(keyLocationVersions.size() - 1); @@ -200,10 +171,6 @@ public void setKeyLocationVersions( this.keyLocationVersions = keyLocationVersions; } - public void updateModifcationTime() { - this.modificationTime = Time.monotonicNow(); - } - public void setFile(boolean file) { isFile = file; } @@ -213,7 +180,7 @@ public boolean isFile() { } public boolean isHsync() { - return metadata.containsKey(OzoneConsts.HSYNC_CLIENT_ID); + return getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID); } /** @@ -398,7 +365,7 @@ public FileEncryptionInfo getFileEncryptionInfo() { } public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } public boolean addAcl(OzoneAcl acl) { @@ -413,10 +380,6 @@ public boolean setAcls(List newAcls) { return OzoneAclUtil.setAcl(acls, newAcls); } - public void setParentObjectID(long parentObjectID) { - this.parentObjectID = parentObjectID; - } - public void setReplicationConfig(ReplicationConfig repConfig) { this.replicationConfig = repConfig; } @@ -447,32 +410,29 @@ public String toString() { /** * Builder of OmKeyInfo. */ - public static class Builder { + public static class Builder extends WithParentObjectId.Builder { private String volumeName; private String bucketName; private String keyName; private long dataSize; - private List omKeyLocationInfoGroups = + private final List omKeyLocationInfoGroups = new ArrayList<>(); private long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; - private Map metadata; private FileEncryptionInfo encInfo; - private List acls; - private long objectID; - private long updateID; + private final List acls = new ArrayList<>(); // not persisted to DB. FileName will be the last element in path keyName. private String fileName; - private long parentObjectID; private FileChecksum fileChecksum; private boolean isFile; public Builder() { - this.metadata = new HashMap<>(); - omKeyLocationInfoGroups = new ArrayList<>(); - acls = new ArrayList<>(); + } + + public Builder(OmKeyInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -526,13 +486,15 @@ public Builder setReplicationConfig(ReplicationConfig replConfig) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map newMetadata) { - metadata.putAll(newMetadata); + super.addAllMetadata(newMetadata); return this; } @@ -555,13 +517,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -570,8 +534,9 @@ public Builder setFileName(String keyFileName) { return this; } + @Override public Builder setParentObjectID(long parentID) { - this.parentObjectID = parentID; + super.setParentObjectID(parentID); return this; } @@ -586,11 +551,7 @@ public Builder setFile(boolean isAFile) { } public OmKeyInfo build() { - return new OmKeyInfo( - volumeName, bucketName, keyName, fileName, - omKeyLocationInfoGroups, dataSize, creationTime, - modificationTime, replicationConfig, metadata, encInfo, acls, - parentObjectID, objectID, updateID, fileChecksum, isFile); + return new OmKeyInfo(this); } } @@ -674,11 +635,11 @@ private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName, .addAllKeyLocationList(keyLocations) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .addAllAcls(OzoneAclUtil.toProtobuf(acls)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); FileChecksumProto fileChecksumProto = OMPBHelper.convert(fileChecksum); if (fileChecksumProto != null) { @@ -753,8 +714,8 @@ public String getObjectInfo() { ", key='" + keyName + '\'' + ", dataSize='" + dataSize + '\'' + ", creationTime='" + creationTime + '\'' + - ", objectID='" + objectID + '\'' + - ", parentID='" + parentObjectID + '\'' + + ", objectID='" + getObjectID() + '\'' + + ", parentID='" + getParentObjectID() + '\'' + ", replication='" + replicationConfig + '\'' + ", fileChecksum='" + fileChecksum + '}'; @@ -770,12 +731,12 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, volumeName.equals(omKeyInfo.volumeName) && bucketName.equals(omKeyInfo.bucketName) && replicationConfig.equals(omKeyInfo.replicationConfig) && - Objects.equals(metadata, omKeyInfo.metadata) && + Objects.equals(getMetadata(), omKeyInfo.getMetadata()) && Objects.equals(acls, omKeyInfo.acls) && - objectID == omKeyInfo.objectID; + getObjectID() == omKeyInfo.getObjectID(); if (isEqual && checkUpdateID) { - isEqual = updateID == omKeyInfo.updateID; + isEqual = getUpdateID() == omKeyInfo.getUpdateID(); } if (isEqual && checkModificationTime) { @@ -783,7 +744,7 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, } if (isEqual && checkPath) { - isEqual = parentObjectID == omKeyInfo.parentObjectID && + isEqual = getParentObjectID() == omKeyInfo.getParentObjectID() && keyName.equals(omKeyInfo.keyName); } @@ -808,7 +769,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(volumeName, bucketName, keyName, parentObjectID); + return Objects.hash(volumeName, bucketName, keyName, getParentObjectID()); } /** @@ -816,7 +777,7 @@ public int hashCode() { */ @Override public OmKeyInfo copyObject() { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() + OmKeyInfo.Builder builder = new OmKeyInfo.Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -825,9 +786,7 @@ public OmKeyInfo copyObject() { .setDataSize(dataSize) .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentObjectID(parentObjectID) + .setAcls(acls) .setFileName(fileName) .setFile(isFile); @@ -837,12 +796,8 @@ public OmKeyInfo copyObject() { keyLocationVersion.getLocationList(), keyLocationVersion.isMultipartKey()))); - acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - - if (metadata != null) { - metadata.forEach((k, v) -> builder.addMetadata(k, v)); + if (getMetadata() != null) { + getMetadata().forEach((k, v) -> builder.addMetadata(k, v)); } if (fileChecksum != null) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java index 646cb421e434..bbf1a1bdae53 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java @@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo { private final String partName; - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; + private final String eTag; + + public OmMultipartCommitUploadPartInfo(String partName, String eTag) { + this.partName = partName; + this.eTag = eTag; + } + + public String getETag() { + return eTag; } public String getPartName() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 4f57e075bd70..d5bf7fa596f7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -155,37 +155,33 @@ public PartKeyInfo lastEntry() { * multiKey1 | 1026 | 1025 | * ------------------------------------------| */ - private long parentID; + private final long parentID; /** * Construct OmMultipartKeyInfo object which holds multipart upload * information for a key. */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - PartKeyInfoMap sortedMap, long objectID, long updateID, - long parentObjId) { - this.uploadID = id; - this.creationTime = creationTime; - this.replicationConfig = replicationConfig; - this.partKeyInfoMap = sortedMap; - this.objectID = objectID; - this.updateID = updateID; - this.parentID = parentObjId; + private OmMultipartKeyInfo(Builder b) { + super(b); + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + this.partKeyInfoMap = new PartKeyInfoMap(b.partKeyInfoList); + this.parentID = b.parentID; } - /** - * Construct OmMultipartKeyInfo object which holds multipart upload - * information for a key. - */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - SortedMap list, long objectID, long updateID, - long parentObjId) { - this(id, creationTime, replicationConfig, new PartKeyInfoMap(list), - objectID, updateID, parentObjId); + /** Copy constructor. */ + private OmMultipartKeyInfo(OmMultipartKeyInfo b) { + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo + // is added, it returns a new shallow copy of the PartKeyInfoMap Object + // so here we can directly pass in partKeyInfoMap + this.partKeyInfoMap = b.partKeyInfoMap; + setObjectID(b.getObjectID()); + setUpdateID(b.getUpdateID()); + this.parentID = b.parentID; } /** @@ -228,13 +224,11 @@ public ReplicationConfig getReplicationConfig() { /** * Builder of OmMultipartKeyInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String uploadID; private long creationTime; private ReplicationConfig replicationConfig; - private TreeMap partKeyInfoList; - private long objectID; - private long updateID; + private final TreeMap partKeyInfoList; private long parentID; public Builder() { @@ -271,12 +265,12 @@ public Builder addPartKeyInfoList(int partNum, PartKeyInfo partKeyInfo) { } public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -286,8 +280,7 @@ public Builder setParentID(long parentObjId) { } public OmMultipartKeyInfo build() { - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoList, objectID, updateID, parentID); + return new OmMultipartKeyInfo(this); } } @@ -308,10 +301,15 @@ public static OmMultipartKeyInfo getFromProto( multipartKeyInfo.getEcReplicationConfig() ); - return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(), - multipartKeyInfo.getCreationTime(), replicationConfig, - list, multipartKeyInfo.getObjectID(), - multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID()); + return new Builder() + .setUploadID(multipartKeyInfo.getUploadID()) + .setCreationTime(multipartKeyInfo.getCreationTime()) + .setReplicationConfig(replicationConfig) + .setPartKeyInfoList(list) + .setObjectID(multipartKeyInfo.getObjectID()) + .setUpdateID(multipartKeyInfo.getUpdateID()) + .setParentID(multipartKeyInfo.getParentID()) + .build(); } /** @@ -323,8 +321,8 @@ public MultipartKeyInfo getProto() { .setUploadID(uploadID) .setCreationTime(creationTime) .setType(replicationConfig.getReplicationType()) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setParentID(parentID); if (replicationConfig instanceof ECReplicationConfig) { @@ -358,11 +356,7 @@ public int hashCode() { } public OmMultipartKeyInfo copyObject() { - // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo - // is added, it returns a new shallow copy of the PartKeyInfoMap Object - // so here we can directly pass in partKeyInfoMap - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoMap, objectID, updateID, parentID); + return new OmMultipartKeyInfo(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 63e6353c1850..ff39661d01b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -56,8 +56,9 @@ public Map getMultipartMap() { */ public List getPartsList() { List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); + multipartMap.forEach((partNumber, eTag) -> partList.add(Part + // set partName equal to eTag for back compatibility (partName is a required property) + .newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java index fbf519c22682..0ba0e26acda2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java @@ -79,6 +79,7 @@ public void addPartList(List partInfos) { public void addProtoPartList(List partInfos) { partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); + partInfo.getModificationTime(), partInfo.getSize(), + partInfo.getETag()))); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index 2d753a5caa5a..35d97cd4ffdc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -23,17 +23,20 @@ /** * Class that defines information about each part of a multipart upload key. */ -public class OmPartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; +public final class OmPartInfo { + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; - public OmPartInfo(int number, String name, long time, long size) { + public OmPartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -52,9 +55,19 @@ public long getSize() { return size; } + public String getETag() { + return eTag; + } + public PartInfo getProto() { - return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) - .setModificationTime(modificationTime) - .setSize(size).build(); + PartInfo.Builder builder = PartInfo.newBuilder() + .setPartNumber(partNumber) + .setPartName(partName) + .setModificationTime(modificationTime) + .setSize(size); + if (eTag != null) { + builder.setETag(eTag); + } + return builder.build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index c5c8f5ca8e2b..8eb931410eff 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -18,13 +18,13 @@ package org.apache.hadoop.ozone.om.helpers; import java.util.ArrayList; -import java.util.BitSet; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import com.google.common.collect.ImmutableList; import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.hdds.utils.db.DelegatedCodec; @@ -102,15 +102,29 @@ private OmVolumeArgs(String adminName, String ownerName, String volume, this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; this.usedNamespace = usedNamespace; - this.metadata = metadata; + setMetadata(metadata); this.acls = acls; this.creationTime = creationTime; this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; + setObjectID(objectID); + setUpdateID(updateID); this.refCount = refCount; } + private OmVolumeArgs(Builder b) { + super(b); + this.adminName = b.adminName; + this.ownerName = b.ownerName; + this.volume = b.volume; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.usedNamespace = b.usedNamespace; + this.acls = b.acls; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.refCount = b.refCount; + } + public long getRefCount() { Preconditions.checkState(refCount >= 0L, "refCount should not be negative"); return refCount; @@ -221,7 +235,7 @@ public long getQuotaInNamespace() { } public List getAcls() { - return acls; + return ImmutableList.copyOf(acls); } public List getDefaultAcls() { @@ -286,18 +300,18 @@ public boolean equals(Object o) { return false; } OmVolumeArgs that = (OmVolumeArgs) o; - return Objects.equals(this.objectID, that.objectID); + return Objects.equals(this.getObjectID(), that.getObjectID()); } @Override public int hashCode() { - return Objects.hash(this.objectID); + return Objects.hash(getObjectID()); } /** * Builder for OmVolumeArgs. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String adminName; private String ownerName; private String volume; @@ -306,30 +320,18 @@ public static class Builder { private long quotaInBytes; private long quotaInNamespace; private long usedNamespace; - private Map metadata; private List acls; - private long objectID; - private long updateID; private long refCount; - /** - * Sets the Object ID for this Object. - * Object ID are unique and immutable identifier for each object in the - * System. - * @param id - long - */ + @Override public Builder setObjectID(long id) { - this.objectID = id; + super.setObjectID(id); return this; } - /** - * Sets the update ID for this Object. Update IDs are monotonically - * increasing values which are updated each time there is an update. - * @param id - long - */ + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -337,8 +339,7 @@ public Builder setUpdateID(long id) { * Constructs a builder. */ public Builder() { - metadata = new HashMap<>(); - acls = new ArrayList(); + acls = new ArrayList<>(); quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } @@ -383,15 +384,15 @@ public Builder setUsedNamespace(long namespaceUsage) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); // overwrite if present. + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetaData) { - if (additionalMetaData != null) { - metadata.putAll(additionalMetaData); - } + super.addAllMetadata(additionalMetaData); return this; } @@ -406,17 +407,11 @@ public void setRefCount(long refCount) { this.refCount = refCount; } - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ public OmVolumeArgs build() { Preconditions.checkNotNull(adminName); Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInNamespace, usedNamespace, metadata, acls, creationTime, - modificationTime, objectID, updateID, refCount); + return new OmVolumeArgs(this); } } @@ -430,13 +425,13 @@ public VolumeInfo getProtobuf() { .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .addAllVolumeAcls(aclList) .setCreationTime( creationTime == 0 ? System.currentTimeMillis() : creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setRefCount(refCount) .build(); } @@ -476,18 +471,12 @@ public String getObjectInfo() { @Override public OmVolumeArgs copyObject() { Map cloneMetadata = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> cloneMetadata.put(k, v)); + if (getMetadata() != null) { + cloneMetadata.putAll(getMetadata()); } - List cloneAcls = new ArrayList(acls.size()); - - acls.forEach(acl -> cloneAcls.add(new OzoneAcl(acl.getType(), - acl.getName(), (BitSet) acl.getAclBitSet().clone(), - acl.getAclScope()))); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInNamespace, usedNamespace, cloneMetadata, cloneAcls, - creationTime, modificationTime, objectID, updateID, refCount); + quotaInNamespace, usedNamespace, cloneMetadata, new ArrayList<>(acls), + creationTime, modificationTime, getObjectID(), getUpdateID(), refCount); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 134675cdce84..517f0c14ce09 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -27,17 +27,17 @@ import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.List; +import java.util.Objects; import java.util.stream.Collectors; +import java.util.stream.Stream; + import org.apache.hadoop.security.UserGroupInformation; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; /** * Helper class for ozone acls operations. @@ -60,11 +60,11 @@ public static List getAclList(String userName, List listOfAcls = new ArrayList<>(); // User ACL. - listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); + listOfAcls.add(new OzoneAcl(USER, userName, ACCESS, userRights)); if (userGroups != null) { // Group ACLs of the User. Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); } return listOfAcls; } @@ -91,23 +91,22 @@ public static List filterAclList(String identityName, private static boolean checkAccessInAcl(OzoneAcl a, UserGroupInformation ugi, ACLType aclToCheck) { - BitSet rights = a.getAclBitSet(); switch (a.getType()) { case USER: if (a.getName().equals(ugi.getShortUserName())) { - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } break; case GROUP: for (String grp : ugi.getGroupNames()) { if (a.getName().equals(grp)) { - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } } break; default: - return checkIfAclBitIsSet(aclToCheck, rights); + return a.checkAccess(aclToCheck); } return false; } @@ -137,56 +136,30 @@ public static boolean checkAclRights(List acls, } /** - * Helper function to check if bit for given acl is set. - * @param acl - * @param bitset - * @return True of acl bit is set else false. - * */ - public static boolean checkIfAclBitIsSet(IAccessAuthorizer.ACLType acl, - BitSet bitset) { - if (bitset == null) { - return false; - } - - return ((bitset.get(acl.ordinal()) - || bitset.get(ALL.ordinal())) - && !bitset.get(NONE.ordinal())); - } - - /** - * Helper function to inherit default ACL as access ACL for child object. - * 1. deep copy of OzoneAcl to avoid unexpected parent default ACL change - * 2. merge inherited access ACL with existing access ACL via - * OzoneUtils.addAcl(). - * @param acls - * @param parentAcls - * @return true if acls inherited DEFAULT acls from parentAcls successfully, - * false otherwise. + * Helper function to inherit default ACL with given {@code scope} for child object. + * @param acls child object ACL list + * @param parentAcls parent object ACL list + * @param scope scope applied to inherited ACL + * @return true if any ACL was inherited from parent, false otherwise */ public static boolean inheritDefaultAcls(List acls, - List parentAcls) { - List inheritedAcls = null; + List parentAcls, OzoneAcl.AclScope scope) { if (parentAcls != null && !parentAcls.isEmpty()) { - inheritedAcls = parentAcls.stream() - .filter(a -> a.getAclScope() == DEFAULT) - .map(acl -> new OzoneAcl(acl.getType(), acl.getName(), - acl.getAclBitSet(), ACCESS)) - .collect(Collectors.toList()); - } - if (inheritedAcls != null && !inheritedAcls.isEmpty()) { - inheritedAcls.stream().forEach(acl -> addAcl(acls, acl)); - return true; + Stream aclStream = parentAcls.stream() + .filter(a -> a.getAclScope() == DEFAULT); + + if (scope != DEFAULT) { + aclStream = aclStream.map(acl -> acl.withScope(scope)); + } + + List inheritedAcls = aclStream.collect(Collectors.toList()); + if (!inheritedAcls.isEmpty()) { + inheritedAcls.forEach(acl -> addAcl(acls, acl)); + return true; + } } - return false; - } - /** - * Helper function to convert the scope of ACLs to DEFAULT. - * This method is called in ACL inheritance scenarios. - * @param acls - */ - public static void toDefaultScope(List acls) { - acls.forEach(a -> a.setAclScope(DEFAULT)); + return false; } /** @@ -217,8 +190,6 @@ public static List toProtobuf(List protoAcls) { /** * Add an OzoneAcl to existing list of OzoneAcls. - * @param existingAcls - * @param acl * @return true if current OzoneAcls are changed, false otherwise. */ public static boolean addAcl(List existingAcls, OzoneAcl acl) { @@ -226,17 +197,17 @@ public static boolean addAcl(List existingAcls, OzoneAcl acl) { return false; } - for (OzoneAcl a: existingAcls) { + for (int i = 0; i < existingAcls.size(); i++) { + final OzoneAcl a = existingAcls.get(i); if (a.getName().equals(acl.getName()) && a.getType().equals(acl.getType()) && a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.or(acl.getAclBitSet()); - if (current.equals(original)) { - return false; + final OzoneAcl updated = a.add(acl); + final boolean changed = !Objects.equals(updated, a); + if (changed) { + existingAcls.set(i, updated); } - return true; + return changed; } } @@ -246,8 +217,6 @@ public static boolean addAcl(List existingAcls, OzoneAcl acl) { /** * remove OzoneAcl from existing list of OzoneAcls. - * @param existingAcls - * @param acl * @return true if current OzoneAcls are changed, false otherwise. */ public static boolean removeAcl(List existingAcls, OzoneAcl acl) { @@ -255,22 +224,19 @@ public static boolean removeAcl(List existingAcls, OzoneAcl acl) { return false; } - for (OzoneAcl a: existingAcls) { + for (int i = 0; i < existingAcls.size(); i++) { + final OzoneAcl a = existingAcls.get(i); if (a.getName().equals(acl.getName()) && a.getType().equals(acl.getType()) && a.getAclScope().equals(acl.getAclScope())) { - BitSet current = a.getAclBitSet(); - BitSet original = (BitSet) current.clone(); - current.andNot(acl.getAclBitSet()); - - if (current.equals(original)) { - return false; + final OzoneAcl updated = a.remove(acl); + final boolean changed = !Objects.equals(updated, a); + if (updated.isEmpty()) { + existingAcls.remove(i); + } else if (changed) { + existingAcls.set(i, updated); } - - if (current.isEmpty()) { - existingAcls.remove(a); - } - return true; + return changed; } } return false; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 56103ccb3ab8..47a48c37e8e0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -49,8 +49,8 @@ * This class is used for storing info related to Snapshots. * * Each snapshot created has an associated SnapshotInfo entry - * containing the snapshotid, snapshot path, - * snapshot checkpoint directory, previous snapshotid + * containing the snapshotId, snapshot path, + * snapshot checkpoint directory, previous snapshotId * for the snapshot path & global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { @@ -125,65 +125,26 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; - /** - * Private constructor, constructed via builder. - * @param snapshotId - Snapshot UUID. - * @param name - snapshot name. - * @param volumeName - volume name. - * @param bucketName - bucket name. - * @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED - * @param creationTime - Snapshot creation time. - * @param deletionTime - Snapshot deletion time. - * @param pathPreviousSnapshotId - Snapshot path previous snapshot id. - * @param globalPreviousSnapshotId - Snapshot global previous snapshot id. - * @param snapshotPath - Snapshot path, bucket .snapshot path. - * @param checkpointDir - Snapshot checkpoint directory. - * @param dbTxSequenceNumber - RDB latest transaction sequence number. - * @param deepCleaned - To be deep cleaned status for snapshot. - * @param referencedSize - Snapshot referenced size. - * @param referencedReplicatedSize - Snapshot referenced size w/ replication. - * @param exclusiveSize - Snapshot exclusive size. - * @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private SnapshotInfo(UUID snapshotId, - String name, - String volumeName, - String bucketName, - SnapshotStatus snapshotStatus, - long creationTime, - long deletionTime, - UUID pathPreviousSnapshotId, - UUID globalPreviousSnapshotId, - String snapshotPath, - String checkpointDir, - long dbTxSequenceNumber, - boolean deepCleaned, - boolean sstFiltered, - long referencedSize, - long referencedReplicatedSize, - long exclusiveSize, - long exclusiveReplicatedSize, - boolean deepCleanedDeletedDir) { - this.snapshotId = snapshotId; - this.name = name; - this.volumeName = volumeName; - this.bucketName = bucketName; - this.snapshotStatus = snapshotStatus; - this.creationTime = creationTime; - this.deletionTime = deletionTime; - this.pathPreviousSnapshotId = pathPreviousSnapshotId; - this.globalPreviousSnapshotId = globalPreviousSnapshotId; - this.snapshotPath = snapshotPath; - this.checkpointDir = checkpointDir; - this.dbTxSequenceNumber = dbTxSequenceNumber; - this.deepClean = deepCleaned; - this.sstFiltered = sstFiltered; - this.referencedSize = referencedSize; - this.referencedReplicatedSize = referencedReplicatedSize; - this.exclusiveSize = exclusiveSize; - this.exclusiveReplicatedSize = exclusiveReplicatedSize; - this.deepCleanedDeletedDir = deepCleanedDeletedDir; + private SnapshotInfo(Builder b) { + this.snapshotId = b.snapshotId; + this.name = b.name; + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.snapshotStatus = b.snapshotStatus; + this.creationTime = b.creationTime; + this.deletionTime = b.deletionTime; + this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; + this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; + this.snapshotPath = b.snapshotPath; + this.checkpointDir = b.checkpointDir; + this.dbTxSequenceNumber = b.dbTxSequenceNumber; + this.deepClean = b.deepClean; + this.sstFiltered = b.sstFiltered; + this.referencedSize = b.referencedSize; + this.referencedReplicatedSize = b.referencedReplicatedSize; + this.exclusiveSize = b.exclusiveSize; + this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; + this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; } public void setName(String name) { @@ -338,66 +299,79 @@ public Builder() { this.snapshotStatus = SnapshotStatus.DEFAULT; } + /** @param snapshotId - Snapshot UUID. */ public Builder setSnapshotId(UUID snapshotId) { this.snapshotId = snapshotId; return this; } + /** @param name - snapshot name. */ public Builder setName(String name) { this.name = name; return this; } + /** @param volumeName - volume name. */ public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; return this; } + /** @param bucketName - bucket name. */ public Builder setBucketName(String bucketName) { this.bucketName = bucketName; return this; } + /** @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED */ public Builder setSnapshotStatus(SnapshotStatus snapshotStatus) { this.snapshotStatus = snapshotStatus; return this; } + /** @param crTime - Snapshot creation time. */ public Builder setCreationTime(long crTime) { this.creationTime = crTime; return this; } + /** @param delTime - Snapshot deletion time. */ public Builder setDeletionTime(long delTime) { this.deletionTime = delTime; return this; } + /** @param pathPreviousSnapshotId - Snapshot path previous snapshot id. */ public Builder setPathPreviousSnapshotId(UUID pathPreviousSnapshotId) { this.pathPreviousSnapshotId = pathPreviousSnapshotId; return this; } + /** @param globalPreviousSnapshotId - Snapshot global previous snapshot id. */ public Builder setGlobalPreviousSnapshotId(UUID globalPreviousSnapshotId) { this.globalPreviousSnapshotId = globalPreviousSnapshotId; return this; } + /** @param snapshotPath - Snapshot path, bucket .snapshot path. */ public Builder setSnapshotPath(String snapshotPath) { this.snapshotPath = snapshotPath; return this; } + /** @param checkpointDir - Snapshot checkpoint directory. */ public Builder setCheckpointDir(String checkpointDir) { this.checkpointDir = checkpointDir; return this; } + /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { this.dbTxSequenceNumber = dbTxSequenceNumber; return this; } + /** @param deepClean - To be deep cleaned status for snapshot. */ public Builder setDeepClean(boolean deepClean) { this.deepClean = deepClean; return this; @@ -408,21 +382,25 @@ public Builder setSstFiltered(boolean sstFiltered) { return this; } + /** @param referencedSize - Snapshot referenced size. */ public Builder setReferencedSize(long referencedSize) { this.referencedSize = referencedSize; return this; } + /** @param referencedReplicatedSize - Snapshot referenced size w/ replication. */ public Builder setReferencedReplicatedSize(long referencedReplicatedSize) { this.referencedReplicatedSize = referencedReplicatedSize; return this; } + /** @param exclusiveSize - Snapshot exclusive size. */ public Builder setExclusiveSize(long exclusiveSize) { this.exclusiveSize = exclusiveSize; return this; } + /** @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. */ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { this.exclusiveReplicatedSize = exclusiveReplicatedSize; return this; @@ -435,27 +413,7 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { public SnapshotInfo build() { Preconditions.checkNotNull(name); - return new SnapshotInfo( - snapshotId, - name, - volumeName, - bucketName, - snapshotStatus, - creationTime, - deletionTime, - pathPreviousSnapshotId, - globalPreviousSnapshotId, - snapshotPath, - checkpointDir, - dbTxSequenceNumber, - deepClean, - sstFiltered, - referencedSize, - referencedReplicatedSize, - exclusiveSize, - exclusiveReplicatedSize, - deepCleanedDeletedDir - ); + return new SnapshotInfo(this); } } @@ -756,4 +714,29 @@ public SnapshotInfo copyObject() { .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .build(); } + + @Override + public String toString() { + return "SnapshotInfo{" + + "snapshotId: '" + snapshotId + '\'' + + ", name: '" + name + '\'' + + ", volumeName: '" + volumeName + '\'' + + ", bucketName: '" + bucketName + '\'' + + ", snapshotStatus: '" + snapshotStatus + '\'' + + ", creationTime: '" + creationTime + '\'' + + ", deletionTime: '" + deletionTime + '\'' + + ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + + ", snapshotPath: '" + snapshotPath + '\'' + + ", checkpointDir: '" + checkpointDir + '\'' + + ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + + ", deepClean: '" + deepClean + '\'' + + ", sstFiltered: '" + sstFiltered + '\'' + + ", referencedSize: '" + referencedSize + '\'' + + ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + + ", exclusiveSize: '" + exclusiveSize + '\'' + + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + + '}'; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java index 5c49a15a12bf..c0481c212e5f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java @@ -23,23 +23,65 @@ /** * Mixin class to handle custom metadata. */ -public class WithMetadata { +public abstract class WithMetadata { - @SuppressWarnings("visibilitymodifier") - protected Map metadata = new HashMap<>(); + private Map metadata; + + protected WithMetadata() { + metadata = new HashMap<>(); + } + + protected WithMetadata(Builder b) { + metadata = b.metadata; + } /** * Custom key value metadata. */ - public Map getMetadata() { + public final Map getMetadata() { return metadata; } /** * Set custom key value metadata. */ - public void setMetadata(Map metadata) { + public final void setMetadata(Map metadata) { this.metadata = metadata; } + /** Builder for {@link WithMetadata}. */ + public static class Builder { + private final Map metadata; + + protected Builder() { + metadata = new HashMap<>(); + } + + protected Builder(WithObjectID obj) { + metadata = new HashMap<>(obj.getMetadata()); + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + public Builder setMetadata(Map map) { + metadata.clear(); + addAllMetadata(map); + return this; + } + + protected Map getMetadata() { + return metadata; + } + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index 0ea1a1c0e6a7..af9508196260 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -22,34 +22,34 @@ /** * Mixin class to handle ObjectID and UpdateID. */ -public class WithObjectID extends WithMetadata { +public abstract class WithObjectID extends WithMetadata { + + private long objectID; + private long updateID; + + protected WithObjectID() { + super(); + } + + protected WithObjectID(Builder b) { + super(b); + objectID = b.objectID; + updateID = b.updateID; + } /** * ObjectIDs are unique and immutable identifier for each object in the * System. */ - @SuppressWarnings("visibilitymodifier") - protected long objectID; - /** - * UpdateIDs are monotonically increasing values which are updated - * each time there is an update. - */ - @SuppressWarnings("visibilitymodifier") - protected long updateID; - - /** - * Returns objectID. - * @return long - */ - public long getObjectID() { + public final long getObjectID() { return objectID; } /** - * Returns updateID. - * @return long + * UpdateIDs are monotonically increasing values which are updated + * each time there is an update. */ - public long getUpdateID() { + public final long getUpdateID() { return updateID; } @@ -62,7 +62,7 @@ public long getUpdateID() { * * @param obId - long */ - public void setObjectID(long obId) { + public final void setObjectID(long obId) { if (this.objectID != 0 && obId != OBJECT_ID_RECLAIM_BLOCKS) { throw new UnsupportedOperationException("Attempt to modify object ID " + "which is not zero. Current Object ID is " + this.objectID); @@ -76,7 +76,7 @@ public void setObjectID(long obId) { * @param updateId long * @param isRatisEnabled boolean */ - public void setUpdateID(long updateId, boolean isRatisEnabled) { + public final void setUpdateID(long updateId, boolean isRatisEnabled) { // Because in non-HA, we have multiple rpc handler threads and // transactionID is generated in OzoneManagerServerSideTranslatorPB. @@ -103,21 +103,65 @@ public void setUpdateID(long updateId, boolean isRatisEnabled) { // Main reason, in non-HA transaction Index after restart starts from 0. // And also because of this same reason we don't do replay checks in non-HA. - if (isRatisEnabled && updateId < this.updateID) { + if (isRatisEnabled && updateId < this.getUpdateID()) { throw new IllegalArgumentException(String.format( "Trying to set updateID to %d which is not greater than the " + - "current value of %d for %s", updateId, this.updateID, + "current value of %d for %s", updateId, this.getUpdateID(), getObjectInfo())); } - this.updateID = updateId; - } - - public boolean isUpdateIDset() { - return this.updateID > 0; + this.setUpdateID(updateId); } + /** Hook method, customized in subclasses. */ public String getObjectInfo() { return this.toString(); } + + public final void setUpdateID(long updateID) { + this.updateID = updateID; + } + + /** Builder for {@link WithObjectID}. */ + public static class Builder extends WithMetadata.Builder { + private long objectID; + private long updateID; + + protected Builder() { + super(); + } + + protected Builder(WithObjectID obj) { + super(obj); + objectID = obj.getObjectID(); + updateID = obj.getUpdateID(); + } + + /** + * Sets the Object ID for this Object. + * Object ID are unique and immutable identifier for each object in the + * System. + */ + public Builder setObjectID(long obId) { + this.objectID = obId; + return this; + } + + /** + * Sets the update ID for this Object. Update IDs are monotonically + * increasing values which are updated each time there is an update. + */ + public Builder setUpdateID(long id) { + this.updateID = id; + return this; + } + + public long getObjectID() { + return objectID; + } + + public long getUpdateID() { + return updateID; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java index 79a135af1726..3e228e790405 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java @@ -22,6 +22,16 @@ * Object ID with additional parent ID field. */ public class WithParentObjectId extends WithObjectID { + private long parentObjectID; + + public WithParentObjectId() { + } + + public WithParentObjectId(Builder builder) { + super(builder); + parentObjectID = builder.getParentObjectID(); + } + /** * Object ID with additional parent ID field. * @@ -45,11 +55,34 @@ public class WithParentObjectId extends WithObjectID { * key1 | 1026 | 1025 | * ------------------------------------------| */ - @SuppressWarnings("visibilitymodifier") - protected long parentObjectID; - - public long getParentObjectID() { + public final long getParentObjectID() { return parentObjectID; } + public final void setParentObjectID(long parentObjectID) { + this.parentObjectID = parentObjectID; + } + + /** Builder for {@link WithParentObjectId}. */ + public static class Builder extends WithObjectID.Builder { + private long parentObjectID; + + protected Builder() { + super(); + } + + protected Builder(WithParentObjectId obj) { + super(obj); + parentObjectID = obj.getParentObjectID(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + protected long getParentObjectID() { + return parentObjectID; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index f41f89b181dd..b1f572358362 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -706,6 +707,21 @@ default String createSnapshot(String volumeName, "this to be implemented"); } + /** + * Rename snapshot. + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + default void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented"); + } + /** * Delete snapshot. * @param volumeName vol to be used @@ -1112,10 +1128,10 @@ EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, * @param bucketName - The bucket name. * @param keyName - The key user want to recover. * @param force - force recover the file. - * @return OmKeyInfo KeyInfo of file under recovery + * @return LeaseKeyInfo KeyInfo of file under recovery * @throws IOException if an error occurs */ - OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException; + LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException; /** * Update modification time and access time of a file. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index bd40dfcf0240..0a17f4527fc3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -86,6 +87,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; @@ -107,6 +110,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest; @@ -122,7 +127,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; @@ -130,16 +134,16 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListOpenFilesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListOpenFilesResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; @@ -164,6 +168,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProtoLight; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequestArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; @@ -175,12 +184,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RevokeS3SecretRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; @@ -188,12 +199,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetTimesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignAdminRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdResponse; @@ -205,8 +219,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantRevokeUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -229,6 +241,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_CALLER_CONTEXT_PREFIX; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_IN_SAFE_MODE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; @@ -256,8 +269,11 @@ public final class OzoneManagerProtocolClientSideTranslatorPB private OmTransport transport; private ThreadLocal threadLocalS3Auth = new ThreadLocal<>(); - private boolean s3AuthCheck; + + public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; + public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 3000; + public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { this.clientID = clientId; @@ -728,8 +744,7 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setCreateKeyRequest(req) .build(); - CreateKeyResponse keyResponse = - handleError(submitRequest(omRequest)).getCreateKeyResponse(); + CreateKeyResponse keyResponse = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateKeyResponse(); return new OpenKeySession(keyResponse.getID(), OmKeyInfo.getFromProtobuf(keyResponse.getKeyInfo()), keyResponse.getOpenVersion()); @@ -774,8 +789,7 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientId, .setAllocateBlockRequest(req) .build(); - AllocateBlockResponse resp = handleError(submitRequest(omRequest)) - .getAllocateBlockResponse(); + AllocateBlockResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getAllocateBlockResponse(); return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); } @@ -1037,7 +1051,7 @@ public ListKeysLightResult listKeysLight(String volumeName, reqBuilder.setBucketName(bucketName); reqBuilder.setCount(maxKeys); - if (StringUtils.isNotEmpty(startKey)) { + if (startKey != null) { reqBuilder.setStartKey(startKey); } @@ -1228,6 +1242,26 @@ public String createSnapshot(String volumeName, return snapshotInfo.getName(); } + /** + * {@inheritDoc} + */ + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) throws IOException { + RenameSnapshotRequest.Builder requestBuilder = + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName); + + final OMRequest omRequest = createOMRequest(Type.RenameSnapshot) + .setRenameSnapshotRequest(requestBuilder) + .build(); + final OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + /** * {@inheritDoc} */ @@ -1636,7 +1670,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( .getCommitMultiPartUploadResponse(); OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); + OmMultipartCommitUploadPartInfo(response.getPartName(), + response.getETag()); return info; } @@ -2243,12 +2278,38 @@ public OpenKeySession createFile(OmKeyArgs args, OMRequest omRequest = createOMRequest(Type.CreateFile) .setCreateFileRequest(createFileRequest) .build(); - CreateFileResponse resp = - handleError(submitRequest(omRequest)).getCreateFileResponse(); + CreateFileResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateFileResponse(); + return new OpenKeySession(resp.getID(), OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); } + + @Nonnull + private OMResponse handleSubmitRequestAndSCMSafeModeRetry(OMRequest omRequest) throws IOException { + int retryCount = BLOCK_ALLOCATION_RETRY_COUNT; + while (true) { + try { + return handleError(submitRequest(omRequest)); + } catch (OMException e) { + if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount > 0) { + System.err.println("SCM is in safe mode. Will retry in " + + BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS + "ms"); + retryCount--; + try { + Thread.sleep(BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS); + continue; + } catch (InterruptedException ex) { + throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + } else if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount == 0) { + throw new OMException(e.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + throw e; + } + } + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) @@ -2260,16 +2321,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setSortDatanodes(args.getSortDatanodes()) .setLatestVersionLocation(args.getLatestVersionLocation()) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatus) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2296,16 +2350,9 @@ public List listStatusLight(OmKeyArgs args, .setSortDatanodes(false) .setLatestVersionLocation(true) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatusLight) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2322,6 +2369,26 @@ public List listStatusLight(OmKeyArgs args, return statusList; } + private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs, boolean recursive, String startKey, + long numEntries, boolean allowPartialPrefixes) { + ListStatusRequest.Builder listStatusRequestBuilder = + ListStatusRequest.newBuilder() + .setKeyArgs(keyArgs) + .setRecursive(recursive) + .setNumEntries(numEntries); + + if (startKey != null) { + listStatusRequestBuilder.setStartKey(startKey); + } else { + listStatusRequestBuilder.setStartKey(""); + } + + if (allowPartialPrefixes) { + listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); + } + return listStatusRequestBuilder; + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { @@ -2476,7 +2543,7 @@ public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, } @Override - public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) + public LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) throws IOException { RecoverLeaseRequest recoverLeaseRequest = RecoverLeaseRequest.newBuilder() @@ -2492,7 +2559,8 @@ public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyNa RecoverLeaseResponse recoverLeaseResponse = handleError(submitRequest(omRequest)).getRecoverLeaseResponse(); - return OmKeyInfo.getFromProtobuf(recoverLeaseResponse.getKeyInfo()); + return new LeaseKeyInfo(OmKeyInfo.getFromProtobuf(recoverLeaseResponse.getKeyInfo()), + recoverLeaseResponse.getIsKeyInfo()); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 09c8743137d4..ca32c96855dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -186,6 +186,16 @@ public static Builder fromKeyArgs(OmKeyArgs args) { .setResType(ResourceType.KEY); } + public static Builder fromOzoneObj(OzoneObj obj) { + return new Builder() + .setVolumeName(obj.getVolumeName()) + .setBucketName(obj.getBucketName()) + .setKeyName(obj.getKeyName()) + .setResType(obj.getResourceType()) + .setStoreType(obj.getStoreType()) + .setOzonePrefixPath(obj.getOzonePrefixPathViewer()); + } + public Builder setResType(OzoneObj.ResourceType res) { this.resType = res; return this; diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 08ae1fbc65b8..0b93404aff2f 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -130,132 +130,132 @@ void testAclParse() { void testAclValues() { OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ_ACL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertFalse(acl.isSet(ALL)); + assertFalse(acl.isSet(READ_ACL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:a"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(WRITE)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:r"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:w"); assertEquals("bilbo", acl.getName()); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(WRITE)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.USER, acl.getType()); acl = OzoneAcl.parseAcl("group:hobbit:a"); assertEquals(acl.getName(), "hobbit"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(READ.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(READ)); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::a"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(ALL.ordinal())); - assertFalse(acl.getAclBitSet().get(WRITE.ordinal())); + assertTrue(acl.isSet(ALL)); + assertFalse(acl.isSet(WRITE)); assertEquals(ACLIdentityType.WORLD, acl.getType()); acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy"); assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.GROUP, acl.getType()); acl = OzoneAcl.parseAcl("world::rwdlncxy"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.WORLD, acl.getType()); // Acls with scope info. acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[DEFAULT]"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.DEFAULT); acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); assertEquals(acl.getName(), "bilbo"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.ACCESS); acl = OzoneAcl.parseAcl("group:hadoop:rwdlncxy[ACCESS]"); assertEquals(acl.getName(), "hadoop"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.GROUP, acl.getType()); assertEquals(acl.getAclScope(), OzoneAcl.AclScope.ACCESS); acl = OzoneAcl.parseAcl("world::rwdlncxy[DEFAULT]"); assertEquals(acl.getName(), "WORLD"); - assertTrue(acl.getAclBitSet().get(READ.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE.ordinal())); - assertTrue(acl.getAclBitSet().get(DELETE.ordinal())); - assertTrue(acl.getAclBitSet().get(LIST.ordinal())); - assertTrue(acl.getAclBitSet().get(NONE.ordinal())); - assertTrue(acl.getAclBitSet().get(CREATE.ordinal())); - assertTrue(acl.getAclBitSet().get(READ_ACL.ordinal())); - assertTrue(acl.getAclBitSet().get(WRITE_ACL.ordinal())); - assertFalse(acl.getAclBitSet().get(ALL.ordinal())); + assertTrue(acl.isSet(READ)); + assertTrue(acl.isSet(WRITE)); + assertTrue(acl.isSet(DELETE)); + assertTrue(acl.isSet(LIST)); + assertTrue(acl.isSet(NONE)); + assertTrue(acl.isSet(CREATE)); + assertTrue(acl.isSet(READ_ACL)); + assertTrue(acl.isSet(WRITE_ACL)); + assertFalse(acl.isSet(ALL)); assertEquals(ACLIdentityType.WORLD, acl.getType()); assertEquals(OzoneAcl.AclScope.DEFAULT, acl.getAclScope()); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java index c38c09360f01..638dd3414e86 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmBucketInfo.java @@ -81,8 +81,7 @@ public void testClone() { .setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "defaultUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL ))) .build(); @@ -97,8 +96,7 @@ public void testClone() { omBucketInfo.setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "newUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL ))); assertNotEquals( omBucketInfo.getAcls().get(0), @@ -115,8 +113,7 @@ public void testClone() { omBucketInfo.removeAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "newUser", - IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL )); assertEquals(0, omBucketInfo.getAcls().size()); assertEquals(1, cloneBucketInfo.getAcls().size()); @@ -131,8 +128,8 @@ public void getProtobufMessageEC() { .setStorageType(StorageType.ARCHIVE).setAcls(Collections .singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - "defaultUser", IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS))).build(); + "defaultUser", OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL + ))).build(); OzoneManagerProtocolProtos.BucketInfo protobuf = omBucketInfo.getProtobuf(); // No EC Config assertFalse(protobuf.hasDefaultReplicationConfig()); @@ -150,8 +147,8 @@ public void getProtobufMessageEC() { .setStorageType(StorageType.ARCHIVE) .setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - "defaultUser", IAccessAuthorizer.ACLType.WRITE_ACL, - OzoneAcl.AclScope.ACCESS))) + "defaultUser", OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL + ))) .setDefaultReplicationConfig( new DefaultReplicationConfig( new ECReplicationConfig(3, 2))).build(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java index 6396f0318dcc..4aead0cd8bcb 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmKeyInfo.java @@ -182,7 +182,7 @@ private void createdAndTest(boolean isMPU) { key.setAcls(Arrays.asList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE, ACCESS))); + ACCESS, IAccessAuthorizer.ACLType.WRITE))); // Change acls and check. assertNotEquals(key, cloneKey); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java index f5c854d9e709..41757d957a86 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmVolumeArgs.java @@ -46,7 +46,7 @@ public void testClone() throws Exception { .addMetadata("key1", "value1").addMetadata("key2", "value2") .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.READ, ACCESS)).build(); + ACCESS, IAccessAuthorizer.ACLType.READ)).build(); OmVolumeArgs cloneVolumeArgs = omVolumeArgs.copyObject(); @@ -55,7 +55,7 @@ public void testClone() throws Exception { // add user acl to write. omVolumeArgs.addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.WRITE)); // Now check clone acl assertNotEquals(cloneVolumeArgs.getAcls().get(0), @@ -64,7 +64,7 @@ public void testClone() throws Exception { // Set user acl to Write_ACL. omVolumeArgs.setAcls(Collections.singletonList(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE_ACL, ACCESS))); + ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL))); assertNotEquals(cloneVolumeArgs.getAcls().get(0), omVolumeArgs.getAcls().get(0)); @@ -78,7 +78,7 @@ public void testClone() throws Exception { omVolumeArgs.removeAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "user1", - IAccessAuthorizer.ACLType.WRITE_ACL, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.WRITE_ACL)); // Removing acl, in original omVolumeArgs it should have no acls. assertEquals(0, omVolumeArgs.getAcls().size()); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java index 7f157860e695..5781a68b58df 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOzoneAclUtil.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.List; import static org.apache.hadoop.hdds.conf.OzoneConfiguration.newInstanceOf; @@ -35,6 +34,7 @@ import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -49,13 +49,13 @@ public class TestOzoneAclUtil { getDefaultAcls(); private static final OzoneAcl USER1 = new OzoneAcl(USER, "user1", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); private static final OzoneAcl USER2 = new OzoneAcl(USER, "user2", - ACLType.WRITE, ACCESS); + ACCESS, ACLType.WRITE); private static final OzoneAcl GROUP1 = new OzoneAcl(GROUP, "group1", - ACLType.ALL, ACCESS); + ACCESS, ACLType.ALL); @Test public void testAddAcl() throws IOException { @@ -65,7 +65,7 @@ public void testAddAcl() throws IOException { // Add new permission to existing acl entry. OzoneAcl oldAcl = currentAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); addAndVerifyAcl(currentAcls, newAcl, true, DEFAULT_ACLS.size()); // Add same permission again and verify result @@ -97,7 +97,7 @@ public void testRemoveAcl() { // Add new permission to existing acl entru. OzoneAcl oldAcl = currentAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); // Remove non existing acl entry removeAndVerifyAcl(currentAcls, USER1, false, DEFAULT_ACLS.size()); @@ -143,9 +143,12 @@ private boolean verifyAclRemoved(List acls, OzoneAcl removedAcl) { if (acl.getName().equals(removedAcl.getName()) && acl.getType().equals(removedAcl.getType()) && acl.getAclScope().equals(removedAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(removedAcl.getAclBitSet()); - return !temp.equals(removedAcl.getAclBitSet()); + for (ACLType t : removedAcl.getAclList()) { + if (acl.isSet(t)) { + return false; + } + } + return true; } } return true; @@ -156,9 +159,12 @@ private boolean verifyAclAdded(List acls, OzoneAcl newAcl) { if (acl.getName().equals(newAcl.getName()) && acl.getType().equals(newAcl.getType()) && acl.getAclScope().equals(newAcl.getAclScope())) { - BitSet temp = (BitSet) acl.getAclBitSet().clone(); - temp.and(newAcl.getAclBitSet()); - return temp.equals(newAcl.getAclBitSet()); + for (ACLType t : newAcl.getAclList()) { + if (!acl.isSet(t)) { + return false; + } + } + return true; } } return false; @@ -185,11 +191,11 @@ private static List getDefaultAcls() { IAccessAuthorizer.ACLType groupRights = aclConfig.getGroupDefaultRights(); OzoneAclUtil.addAcl(ozoneAcls, new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); + ugi.getUserName(), ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(ugi.getGroupNames()); userGroups.stream().forEach((group) -> OzoneAclUtil.addAcl(ozoneAcls, - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); return ozoneAcls; } @@ -226,7 +232,7 @@ public void testAddDefaultAcl() { assertEquals(2, ozoneAcls.size()); assertNotEquals(ozoneAcls.get(0).getAclScope(), ozoneAcls.get(1).getAclScope()); - assertEquals(ozoneAcls.get(0).getAclBitSet(), - ozoneAcls.get(1).getAclBitSet()); + assertArrayEquals(ozoneAcls.get(0).getAclByteArray(), + ozoneAcls.get(1).getAclByteArray()); } } diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index b28db73aed6e..d40a995ab920 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -46,6 +46,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.j2objc j2objc-annotations + + com.google.code.findbugs + jsr305 + @@ -62,6 +66,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + com.google.code.findbugs + jsr305 + 3.0.2 + provided + com.google.guava guava @@ -109,6 +119,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + com.google.code.findbugs + jsr305 + diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 5139dddcd8c1..4fca7bb6aaee 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -35,7 +35,7 @@ find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ > "${tempfile}" if [[ "${CHECK:-unit}" == "integration" ]]; then - find "." -not -path '*/iteration*' -name '*-output.txt' -print0 \ + find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ >> "${tempfile}" diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index cb8b6f8f9151..18ae39059755 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -42,7 +42,7 @@ cat "${REPORT_DIR}/output.log" find "." -name checkstyle-errors.xml -print0 \ | xargs -0 sed '$!N; //d' \ + -e '//d' \ -e '//dev/null 2>&1 && pwd )" CHECK=integration -source "${DIR}/junit.sh" -pl :ozone-integration-test,:mini-chaos-tests "$@" +source "${DIR}/junit.sh" "$@" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index 768a1f32a38b..9d2efd8ac641 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -50,10 +50,8 @@ if [[ -f hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh ]]; then ozone_java_setup fi -if [[ "${CHECK}" == "integration" ]] || [[ ${ITERATIONS} -gt 1 ]]; then - if [[ ${OZONE_REPO_CACHED} == "false" ]]; then - mvn ${MAVEN_OPTIONS} -DskipTests clean install - fi +if [[ ${ITERATIONS} -gt 1 ]] && [[ ${OZONE_REPO_CACHED} == "false" ]]; then + mvn ${MAVEN_OPTIONS} -DskipTests clean install fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/${CHECK}"} diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index 1eeca5c0f3d9..89206b3bdf2f 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,20 +19,5 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native -zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) -if [[ -z "${zlib_version}" ]]; then - echo "ERROR zlib.version not defined in pom.xml" - exit 1 -fi - -bzip2_version=$(mvn -N help:evaluate -Dexpression=bzip2.version -q -DforceStdout) -if [[ -z "${bzip2_version}" ]]; then - echo "ERROR bzip2.version not defined in pom.xml" - exit 1 -fi - -source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ - -Dbzip2.url="https://github.com/libarchive/bzip2/archive/refs/tags/bzip2-${bzip2_version}.tar.gz" \ - -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ - -DexcludedGroups="unhealthy" \ +source "${DIR}/junit.sh" -Pnative -Drocks_tools_native -DexcludedGroups="unhealthy" \ "$@" diff --git a/hadoop-ozone/dev-support/checks/rat.sh b/hadoop-ozone/dev-support/checks/rat.sh index 32a10349026c..2bdb66ba1198 100755 --- a/hadoop-ozone/dev-support/checks/rat.sh +++ b/hadoop-ozone/dev-support/checks/rat.sh @@ -24,13 +24,7 @@ mkdir -p "$REPORT_DIR" REPORT_FILE="$REPORT_DIR/summary.txt" -dirs="hadoop-hdds hadoop-ozone" - -for d in $dirs; do - pushd "$d" || exit 1 - mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:0.13:check - popd -done +mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@" grep -r --include=rat.txt "!????" $dirs | tee "$REPORT_FILE" diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh index d2d50c5ff03f..8e540fa9e141 100755 --- a/hadoop-ozone/dev-support/checks/unit.sh +++ b/hadoop-ozone/dev-support/checks/unit.sh @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#checks:unit - DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=unit source "${DIR}/junit.sh" \ diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..df9c4c0ab3e6 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@ - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 26f896663b81..3c12bab4323b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -261,10 +261,10 @@ protected void initializeConfiguration() throws IOException { TimeUnit.SECONDS); conf.setInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, 2); conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ReplicationManagerConfiguration replicationConf = @@ -273,34 +273,21 @@ protected void initializeConfiguration() throws IOException { replicationConf.setEventTimeout(Duration.ofSeconds(20)); replicationConf.setDatanodeTimeoutOffset(0); conf.setFromObject(replicationConf); - conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); + conf.setInt(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys. OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, 100); } - /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - @Override - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - @Override public MiniOzoneChaosCluster build() throws IOException { DefaultMetricsSystem.setMiniClusterMode(true); DatanodeStoreCache.setMiniClusterMode(); initializeConfiguration(); - if (numOfOMs > 1) { + if (numberOfOzoneManagers() > 1) { initOMRatisConf(); } @@ -313,8 +300,7 @@ public MiniOzoneChaosCluster build() throws IOException { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), null); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneChaosCluster cluster = new MiniOzoneChaosCluster(conf, omService, scmService, hddsDatanodes, @@ -323,6 +309,7 @@ public MiniOzoneChaosCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + prepareForNextBuild(); return cluster; } } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java index 6894aed25ab6..5be5c3ef0c5b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java @@ -130,7 +130,9 @@ public static void init() throws Exception { .setOMServiceID(omServiceId) .setNumStorageContainerManagers(numStorageContainerManagerss) .setSCMServiceID(scmServiceId) - .setNumDataVolumes(numDataVolumes); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(numDataVolumes) + .build()); failureClasses.forEach(chaosBuilder::addFailures); cluster = chaosBuilder.build(); diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 7de9bcc297da..432faab48777 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -33,4 +33,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> mini-chaos-tests + + + org.mockito + mockito-inline + test + + + diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index f56b4006d852..582e6c1034a7 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -201,7 +201,7 @@ org.apache.maven.plugins maven-eclipse-plugin - 2.6 + 2.10 org.apache.maven.plugins @@ -242,10 +242,6 @@ - - org.apache.rat - apache-rat-plugin - org.apache.maven.plugins maven-antrun-plugin diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 913cd639bf7c..7de7b2f936ec 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -119,11 +119,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.platform junit-platform-launcher diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java index 51d75c07d2d0..3bb387440736 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,6 +43,7 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java index ff5ed3b0624b..9845caad45d1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java @@ -20,6 +20,7 @@ import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,6 +45,7 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB private Path zeroByteFile; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java index 0d6c30e52c0f..8beaff7e8b2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java @@ -25,6 +25,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; import org.apache.commons.io.FileUtils; @@ -44,6 +45,7 @@ public abstract class AbstractContractCopyFromLocalTest extends private static final Charset ASCII = StandardCharsets.US_ASCII; private File file; + @AfterEach @Override public void teardown() throws Exception { super.teardown(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index 21290d1e889f..12cfba2312a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import static org.apache.hadoop.fs.contract.ContractTestUtils.createSubdirs; @@ -63,6 +64,7 @@ public abstract class AbstractContractGetFileStatusTest extends private static final int TREE_FILES = 4; private static final int TREE_FILESIZE = 512; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index 86363b55ccff..51474945fb81 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -30,6 +30,8 @@ import com.google.common.base.Charsets; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,6 +85,7 @@ public abstract class AbstractContractMultipartUploaderTest extends private UploadHandle activeUpload; private Path activeUploadPath; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -96,6 +99,7 @@ public void setup() throws Exception { uploader1 = fs.createMultipartUploader(testPath).build(); } + @AfterEach @Override public void teardown() throws Exception { MultipartUploader uploader = getUploader(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java index 166e8e301e40..51ebd4437b9f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java @@ -45,6 +45,7 @@ import static org.assertj.core.api.Assertions.fail; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; /** @@ -62,6 +63,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java index 3ff3f72cc6e6..d164a7144b0a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.assertj.core.api.Assertions; import org.slf4j.Logger; @@ -58,6 +59,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class); public static final int OBJECTSTORE_RETRY_TIMEOUT = 30000; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java index 618025dc06f7..af259f600e27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java @@ -26,6 +26,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,6 +55,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas private Path zeroByteFile; private FSDataInputStream instream; + @BeforeEach @Override public void setup() throws Exception { super.setup(); @@ -74,6 +77,7 @@ protected Configuration createConfiguration() { return conf; } + @AfterEach @Override public void teardown() throws Exception { IOUtils.closeStream(instream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java index b9a86ae366cd..82efce828142 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java @@ -21,6 +21,7 @@ import java.io.FileNotFoundException; import org.apache.hadoop.fs.Path; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,6 +39,7 @@ public abstract class AbstractContractSetTimesTest extends private Path testPath; private Path target; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 07c4f26543a8..3a58d2124d8e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.contract; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -38,6 +39,7 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes private Path file; private byte[] fileBytes; + @BeforeEach @Override public void setup() throws Exception { super.setup(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index a9fc2710ce31..430ec4e03fd2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -110,6 +110,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -427,6 +428,19 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { assertTrue(fs.getFileStatus(parent).isDirectory(), "Parent directory does not appear to be a directory"); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + Path root = new Path("/" + volumeName + "/" + bucketName); + Path testKeyPath = new Path(root, "testKey"); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test public void testDeleteCreatesFakeParentDir() throws Exception { Path grandparent = new Path("/testDeleteCreatesFakeParentDir"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index d44342acc432..a092890ae2a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneAcl; @@ -70,6 +71,9 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneAclConfig; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.tools.mapred.CopyMapper; import org.apache.hadoop.util.ToolRunner; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; @@ -78,6 +82,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -88,9 +94,9 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collection; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; @@ -109,6 +115,7 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -122,7 +129,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -321,6 +327,19 @@ void testCreateDoesNotAddParentDirKeys() throws Exception { fs.delete(grandparent, true); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + String testKeyName = "testKey"; + Path testKeyPath = new Path(bucketPath, testKeyName); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test void testListStatusWithIntermediateDirWithECEnabled() throws Exception { @@ -1183,21 +1202,15 @@ void testSharedTmpDir() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access // ACL admin owner, world read+write - BitSet aclRights = new BitSet(); - aclRights.set(READ.ordinal()); - aclRights.set(WRITE.ordinal()); - List objectAcls = new ArrayList<>(); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); + EnumSet aclRights = EnumSet.of(READ, WRITE); // volume acls have all access to admin and read+write access to world // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() + VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin("admin") .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, aclRights)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, userRights)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); // Sanity check @@ -1228,20 +1241,11 @@ void testSharedTmpDir() throws IOException { } // set acls for shared tmp mount under the tmp volume - objectAcls.clear(); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); - aclRights.clear(DELETE.ordinal()); - aclRights.set(LIST.ordinal()); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); // bucket acls have all access to admin and read+write+list access to world - BucketArgs bucketArgs = new BucketArgs.Builder() .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE, LIST)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, userRights)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); @@ -1301,10 +1305,10 @@ void testTempMount() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", - userRights, ACCESS); + ACCESS, userRights); // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .setQuotaInNamespace(1000).build(); // Sanity check assertNull(volumeArgs.getOwner()); @@ -2302,10 +2306,10 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { ACLType userRights = aclConfig.getUserDefaultRights(); // Construct ACL for world access OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", - userRights, ACCESS); + ACCESS, userRights); // Construct VolumeArgs, set ACL to world access - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .build(); proxy.createVolume(volume, volumeArgs); @@ -2336,6 +2340,20 @@ private void createLinkBucket(String linkVolume, String linkBucket, ozoneVolume.createBucket(linkBucket, builder.build()); } + private Path createAndGetBucketPath() + throws IOException { + BucketArgs.Builder builder = BucketArgs.newBuilder(); + builder.setStorageType(StorageType.DISK); + builder.setBucketLayout(bucketLayout); + BucketArgs omBucketArgs = builder.build(); + String vol = UUID.randomUUID().toString(); + String buck = UUID.randomUUID().toString(); + final OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(client, vol, buck, omBucketArgs); + Path volume = new Path(OZONE_URI_DELIMITER, bucket.getVolumeName()); + return new Path(volume, bucket.getName()); + } + @Test void testSnapshotRead() throws Exception { if (useOnlyCache) { @@ -2477,4 +2495,113 @@ void testSetTimes() throws Exception { assertEquals(mtime, fileStatus.getModificationTime()); } + @Test + public void testSetTimesForLinkedBucketPath() throws Exception { + // Create a file + OzoneBucket sourceBucket = + TestDataUtil.createVolumeAndBucket(client, bucketLayout); + Path volumePath1 = + new Path(OZONE_URI_DELIMITER, sourceBucket.getVolumeName()); + Path sourceBucketPath = new Path(volumePath1, sourceBucket.getName()); + Path path = new Path(sourceBucketPath, "key1"); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + OzoneVolume sourceVol = client.getObjectStore().getVolume(sourceBucket.getVolumeName()); + String linkBucketName = UUID.randomUUID().toString(); + createLinkBucket(sourceVol.getName(), linkBucketName, + sourceVol.getName(), sourceBucket.getName()); + + Path linkedBucketPath = new Path(volumePath1, linkBucketName); + Path keyInLinkedBucket = new Path(linkedBucketPath, "key1"); + + // test setTimes in linked bucket path + long mtime = 1000; + fs.setTimes(keyInLinkedBucket, mtime, 2000); + + FileStatus fileStatus = fs.getFileStatus(path); + // verify that mtime is updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + + long mtimeDontUpdate = -1; + fs.setTimes(keyInLinkedBucket, mtimeDontUpdate, 2000); + + fileStatus = fs.getFileStatus(keyInLinkedBucket); + // verify that mtime is NOT updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + } + + @ParameterizedTest(name = "Source Replication Factor = {0}") + @ValueSource(shorts = { 1, 3 }) + public void testDistcp(short sourceRepFactor) throws Exception { + Path srcBucketPath = createAndGetBucketPath(); + Path insideSrcBucket = new Path(srcBucketPath, "*"); + Path dstBucketPath = createAndGetBucketPath(); + // create 2 files on source + List fileNames = createFiles(srcBucketPath, 2, sourceRepFactor); + // Create target directory/bucket + fs.mkdirs(dstBucketPath); + + // perform distcp + DistCpOptions options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).build(); + options.appendToConf(conf); + Job distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + FileStatus sourceFileStatus = fs.listStatus(srcBucketPath)[0]; + FileStatus dstFileStatus = fs.listStatus(dstBucketPath)[0]; + assertEquals(sourceRepFactor, sourceFileStatus.getReplication()); + // without preserve distcp should create file with default replication + assertEquals(fs.getDefaultReplication(dstBucketPath), + dstFileStatus.getReplication()); + + deleteFiles(dstBucketPath, fileNames); + + // test preserve option + options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).preserve(DistCpOptions.FileAttribute.REPLICATION) + .build(); + options.appendToConf(conf); + distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + dstFileStatus = fs.listStatus(dstBucketPath)[0]; + // src and dst should have same replication + assertEquals(sourceRepFactor, dstFileStatus.getReplication()); + + // test if copy is skipped due to matching checksums + assertFalse(options.shouldSkipCRC()); + distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 0, 2); + } + + private void verifyCopy(Path dstBucketPath, Job distcpJob, + long expectedFilesToBeCopied, long expectedTotalFilesInDest) throws IOException { + long filesCopied = + distcpJob.getCounters().findCounter(CopyMapper.Counter.COPY).getValue(); + FileStatus[] destinationFileStatus = fs.listStatus(dstBucketPath); + assertEquals(expectedTotalFilesInDest, destinationFileStatus.length); + assertEquals(expectedFilesToBeCopied, filesCopied); + } + + private List createFiles(Path srcBucketPath, int fileCount, short factor) throws IOException { + List createdFiles = new ArrayList<>(); + for (int i = 1; i <= fileCount; i++) { + String keyName = "key" + RandomStringUtils.randomNumeric(5); + Path file = new Path(srcBucketPath, keyName); + try (FSDataOutputStream fsDataOutputStream = fs.create(file, factor)) { + fsDataOutputStream.writeBytes("Hello"); + } + createdFiles.add(keyName); + } + return createdFiles; + } + + private void deleteFiles(Path base, List fileNames) throws IOException { + for (String key : fileNames) { + fs.delete(new Path(base, key)); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index d729251267ea..47c584e048a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.io.IOException; @@ -30,6 +31,8 @@ import java.util.TreeSet; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -95,4 +98,17 @@ private static void listStatusIterator(FileSystem subject, assertEquals(total, iCount); } + + static void createKeyWithECReplicationConfiguration(OzoneConfiguration inputConf, Path keyPath) + throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(inputConf); + conf.set(OZONE_REPLICATION, "rs-3-2-1024k"); + conf.set(OZONE_REPLICATION_TYPE, "EC"); + URI uri = FileSystem.getDefaultUri(conf); + conf.setBoolean( + String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + try (FileSystem fileSystem = FileSystem.get(uri, conf)) { + ContractTestUtils.touch(fileSystem, keyPath); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 87f114bd7115..382f4b72034c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -55,13 +55,14 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -126,15 +127,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -534,16 +533,14 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + + return count.get() == expectedCount; } private void checkPath(Path path) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index a8c450e3cc99..daa433f68f8a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -33,27 +33,27 @@ import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.Encryptor; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -73,6 +73,9 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; +import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -81,9 +84,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestWithFSO; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; @@ -104,6 +104,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_LIST_INCREMENTAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; @@ -151,9 +152,11 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); // Reduce KeyDeletingService interval CONF.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); CONF.setBoolean("ozone.client.incremental.chunk.list", true); + CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true); CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) @@ -167,7 +170,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -176,11 +178,11 @@ public static void init() throws Exception { bucket = TestDataUtil.createVolumeAndBucket(client, layout); // Enable DEBUG level logging for relevant classes - GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(BlockManagerImpl.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(AbstractDatanodeStore.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(KeyValueHandler.LOG, Level.DEBUG); } @AfterAll @@ -548,7 +550,8 @@ static void runTestHSync(FileSystem fs, Path file, break; } for (int i = 0; i < n; i++) { - assertEquals(data[offset + i], buffer[i]); + assertEquals(data[offset + i], buffer[i], + "expected at offset " + offset + " i=" + i); } offset += n; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index 4b45bb5fa0d1..64029b05180e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -59,8 +59,11 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_READ_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; @@ -116,9 +119,11 @@ public void init() throws IOException, InterruptedException, conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); conf.set(OzoneConfigKeys.OZONE_OM_LEASE_SOFT_LIMIT, "0s"); // make sure flush will write data to DN conf.setBoolean("ozone.client.stream.buffer.flush.delay", false); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -131,7 +136,6 @@ public void init() throws IOException, InterruptedException, cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -190,6 +194,42 @@ public void testRecovery(int dataSize) throws Exception { verifyData(data, dataSize * 2, file, fs); } + @Test + public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { + RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); + + int blockSize = (int) cluster.getOzoneManager().getConfiguration().getStorageSize( + OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); + + final byte[] data = getData(blockSize / 2 + 1); + + final FSDataOutputStream stream = fs.create(file, true); + try { + stream.write(data); + stream.hsync(); + assertFalse(fs.isFileClosed(file)); + + // It will write into new block as well + // Don't do hsync/flush + stream.write(data); + + int count = 0; + while (count++ < 15 && !fs.recoverLease(file)) { + Thread.sleep(1000); + } + // The lease should have been recovered. + assertTrue(fs.isFileClosed(file), "File should be closed"); + + // A second call to recoverLease should succeed too. + assertTrue(fs.recoverLease(file)); + } finally { + closeIgnoringKeyNotFound(stream); + } + + // open it again, make sure the data is correct + verifyData(data, blockSize / 2 + 1, file, fs); + } + @Test public void testOBSRecoveryShouldFail() throws Exception { // Set the fs.defaultFS diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index c6893c57e969..78c4bf4961d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs.ozone; import java.io.BufferedInputStream; +import java.io.EOFException; import java.io.File; import java.io.FileInputStream; import java.io.IOException; @@ -50,14 +51,18 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.assertj.core.api.Assertions; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Test OzoneFSInputStream by reading through multiple interfaces. @@ -162,6 +167,124 @@ public void testO3FSSingleByteRead() throws IOException { } } + @Test + public void testByteBufferPositionedRead() throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + int bufferCapacity = 20; + ByteBuffer buffer = ByteBuffer.allocate(bufferCapacity); + long currentPos = inputStream.getPos(); + // Read positional data from 50th index + int position = 50; + int readBytes = inputStream.read(position, buffer); + + // File position should not be changed after positional read + assertEquals(currentPos, inputStream.getPos()); + // Total read bytes should be equal to bufferCapacity + // As file has more data than bufferCapacity + assertEquals(readBytes, bufferCapacity); + byte[] value1 = new byte[readBytes]; + System.arraycopy(buffer.array(), 0, value1, 0, readBytes); + byte[] value2 = new byte[readBytes]; + System.arraycopy(data, position, value2, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value1, value2, "value mismatch"); + buffer.clear(); + + // Read positional from 8th index again using same inputStream + position = 8; + readBytes = inputStream.read(position, buffer); + assertEquals(currentPos, inputStream.getPos()); + assertEquals(readBytes, bufferCapacity); + byte[] value3 = new byte[readBytes]; + System.arraycopy(buffer.array(), 0, value3, 0, readBytes); + byte[] value4 = new byte[readBytes]; + System.arraycopy(data, position, value4, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value3, value4, "value mismatch"); + + // Buffer size more than actual data, still read should succeed + ByteBuffer buffer1 = ByteBuffer.allocate(30 * 1024 * 1024 * 2); + // Read positional from 12th index + position = 12; + readBytes = inputStream.read(position, buffer1); + assertEquals(currentPos, inputStream.getPos()); + // Total read bytes should be (total file bytes - position) as buffer is not filled completely + assertEquals(readBytes, 30 * 1024 * 1024 - position); + + byte[] value5 = new byte[readBytes]; + System.arraycopy(buffer1.array(), 0, value5, 0, readBytes); + byte[] value6 = new byte[readBytes]; + System.arraycopy(data, position, value6, 0, readBytes); + // Verify input and positional read data + assertArrayEquals(value5, value6, "value mismatch"); + } + } + + @ParameterizedTest + @ValueSource(ints = { -1, 30 * 1024 * 1024, 30 * 1024 * 1024 + 1 }) + public void testByteBufferPositionedReadWithInvalidPosition(int position) throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(20); + assertEquals(-1, inputStream.read(position, buffer)); + // File position should not be changed + assertEquals(currentPos, inputStream.getPos()); + } + } + + @Test + public void testByteBufferPositionedReadFully() throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + int bufferCapacity = 20; + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(bufferCapacity); + // Read positional data from 50th index + int position = 50; + inputStream.readFully(position, buffer); + // File position should not be changed after positional readFully + assertEquals(currentPos, inputStream.getPos()); + // Make sure buffer is full after readFully + Assertions.assertThat((!buffer.hasRemaining())); + + byte[] value1 = new byte[bufferCapacity]; + System.arraycopy(buffer.array(), 0, value1, 0, bufferCapacity); + byte[] value2 = new byte[bufferCapacity]; + System.arraycopy(data, position, value2, 0, bufferCapacity); + // Verify input and positional read data + assertArrayEquals(value1, value2, "value mismatch"); + buffer.clear(); + + // Read positional from 8th index again using same inputStream + position = 8; + inputStream.readFully(position, buffer); + assertEquals(currentPos, inputStream.getPos()); + Assertions.assertThat((!buffer.hasRemaining())); + byte[] value3 = new byte[bufferCapacity]; + System.arraycopy(buffer.array(), 0, value3, 0, bufferCapacity); + byte[] value4 = new byte[bufferCapacity]; + System.arraycopy(data, position, value4, 0, bufferCapacity); + // Verify input and positional read data + assertArrayEquals(value3, value4, "value mismatch"); + + // Buffer size is more than actual data, readFully should fail in this case + ByteBuffer buffer1 = ByteBuffer.allocate(30 * 1024 * 1024 * 2); + assertThrows(EOFException.class, () -> inputStream.readFully(12, buffer1)); + assertEquals(currentPos, inputStream.getPos()); + } + } + + @ParameterizedTest + @ValueSource(ints = { -1, 30 * 1024 * 1024, 30 * 1024 * 1024 + 1 }) + public void testByteBufferPositionedReadFullyWithInvalidPosition(int position) throws IOException { + try (FSDataInputStream inputStream = fs.open(filePath)) { + long currentPos = inputStream.getPos(); + ByteBuffer buffer = ByteBuffer.allocate(20); + assertThrows(EOFException.class, () -> inputStream.readFully(position, buffer)); + // File position should not be changed + assertEquals(currentPos, inputStream.getPos()); + } + } + @Test public void testO3FSMultiByteRead() throws IOException { try (FSDataInputStream inputStream = fs.open(filePath)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 6dccd604208f..a41dcd80acdc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -48,6 +49,7 @@ import java.io.FileNotFoundException; import java.net.URI; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -57,6 +59,8 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.assertj.core.api.Assertions.assertThat; @@ -293,10 +297,13 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH) + .digest(b)).toLowerCase()); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); // Should fail, as we have directory with same name. OMException ex = assertThrows(OMException.class, () -> ozoneBucket.completeMultipartUpload(keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 2a6c8c456b9c..059f7b3e03d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -44,7 +44,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; @@ -82,11 +83,12 @@ public static void init() throws Exception { final int blockSize = 2 * maxFlushSize; final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; - CONF.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -100,7 +102,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 47dc9ac0c3ba..4f14ede8fa52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -72,7 +72,7 @@ public class TestOzoneFsHAURLs { TestOzoneFsHAURLs.class); private OzoneConfiguration conf; - private static MiniOzoneCluster cluster; + private static MiniOzoneHAClusterImpl cluster; private static String omServiceId; private static OzoneManager om; private static int numOfOMs; @@ -107,12 +107,11 @@ static void initClass(@TempDir File tempDir) throws Exception { conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .build(); + .setNumDatanodes(5); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); @@ -161,8 +160,7 @@ public static void shutdown() { * @return the leader OM's RPC address in the MiniOzoneHACluster */ private String getLeaderOMNodeAddr() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); String omNodeId = omLeader.getOMNodeId(); // omLeaderAddrKey=ozone.om.address.omServiceId.omNodeId diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index ae6a24a910cf..8e0bd1ac7deb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,12 +24,14 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -47,9 +49,11 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -66,12 +70,13 @@ class TestOzoneFsSnapshot { private static final String OM_SERVICE_ID = "om-service-test1"; private static OzoneManager ozoneManager; private static OzoneFsShell shell; + private static AtomicInteger counter = new AtomicInteger(); private static final String VOLUME = - "vol-" + RandomStringUtils.randomNumeric(5); + "vol-" + counter.incrementAndGet(); private static final String BUCKET = - "buck-" + RandomStringUtils.randomNumeric(5); + "buck-" + counter.incrementAndGet(); private static final String KEY = - "key-" + RandomStringUtils.randomNumeric(5); + "key-" + counter.incrementAndGet(); private static final String BUCKET_PATH = OM_KEY_PREFIX + VOLUME + OM_KEY_PREFIX + BUCKET; private static final String BUCKET_WITH_SNAPSHOT_INDICATOR_PATH = @@ -84,9 +89,11 @@ static void initClass() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(1) .build(); @@ -128,7 +135,7 @@ private static void createVolBuckKey() @Test void testCreateSnapshotDuplicateName() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); @@ -152,7 +159,7 @@ void testCreateSnapshotWithSubDirInput() throws Exception { // rather than: // Created snapshot ofs://om/vol1/buck2/dir3/.snapshot/snap1 - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); String dirPath = BUCKET_PATH + "/dir1/"; @@ -257,7 +264,7 @@ void testCreateSnapshotFailure(String description, */ @Test void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { - String key1 = "key-" + RandomStringUtils.randomNumeric(5); + String key1 = "key-" + counter.incrementAndGet(); String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + key1; // Pause SnapshotDeletingService so that Snapshot marked deleted is not reclaimed. ozoneManager.getKeyManager().getSnapshotDeletingService().suspend(); @@ -274,7 +281,7 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String snapshotPath1 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snapshotName1; - String key2 = "key-" + RandomStringUtils.randomNumeric(5); + String key2 = "key-" + counter.incrementAndGet(); String newKeyPath2 = BUCKET_PATH + OM_KEY_PREFIX + key2; execShellCommandAndGetOutput(0, new String[]{"-put", tempFile.toString(), newKeyPath2}); @@ -413,6 +420,64 @@ void testSnapshotDeleteFailure(String description, assertThat(errorMessage).contains(expectedMessage); } + @Test + public void testSnapshotReuseSnapName() throws Exception { + String key1 = "key-" + counter.incrementAndGet(); + int res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key1}); + assertEquals(0, res); + + String snap1 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + String listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + + res = ToolRunner.run(shell, + new String[]{"-deleteSnapshot", BUCKET_PATH, snap1}); + // Asserts that delete request succeeded + assertEquals(0, res); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(VOLUME, BUCKET, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + String key2 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key2}); + assertEquals(0, res); + String snap2 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap2}); + // Asserts that create request succeeded + assertEquals(0, res); + + String key3 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key3}); + assertEquals(0, res); + + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + assertThat(listSnapOut).contains(key2); + assertThat(listSnapOut).contains(key3); + } + /** * Execute a shell command with provided arguments * and return a string of the output. @@ -453,7 +518,7 @@ private String execShellCommandAndGetOutput( } private String createSnapshot() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); // Create snapshot int res = ToolRunner.run(shell, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java index 074a8e7df4ba..de3358685ec4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java @@ -47,9 +47,9 @@ import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; -import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.fs.ozone.TestDirectoryDeletingServiceWithFSO.assertSubPathsCount; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -58,10 +58,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** * Directory deletion service test cases using rooted ozone filesystem @@ -128,15 +128,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), false); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -227,16 +225,13 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private static BucketLayout getFSOBucketLayout() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java index ef46ec99d717..76685169011b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestSafeMode.java @@ -62,7 +62,7 @@ class TestSafeMode { static void setup() { OzoneConfiguration conf = new OzoneConfiguration(); clusterProvider = new MiniOzoneClusterProvider( - conf, MiniOzoneCluster.newBuilder(conf), 2); + MiniOzoneCluster.newBuilder(conf), 2); } @BeforeEach diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java index 30c4e4cd5b4d..5dab271d9edb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java @@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -188,6 +190,23 @@ public void testReadWriteWithBCSId() throws Exception { assertEquals("data123", readData); xceiverClientManager.releaseClient(client, false); } + + @Test + public void testEcho() throws Exception { + ContainerWithPipeline container = + storageContainerLocationClient.allocateContainer( + SCMTestUtils.getReplicationType(ozoneConfig), + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + XceiverClientSpi client = xceiverClientManager + .acquireClient(container.getPipeline()); + ContainerProtocolCalls.createContainer(client, + container.getContainerInfo().getContainerID(), null); + ByteString byteString = UnsafeByteOperations.unsafeWrap(new byte[0]); + ContainerProtos.EchoResponseProto response = + ContainerProtocolCalls.echo(client, "", container.getContainerInfo().getContainerID(), byteString, 1, 0); + assertEquals(1, response.getPayload().size()); + xceiverClientManager.releaseClient(client, false); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java index 688d13ad361b..9db501edb721 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java @@ -84,7 +84,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index 6f0bd40dde0e..2829ba234ca0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.ozone.test.GenericTestUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -132,7 +132,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception { dnToStop.get().stop(); // wait long enough based on leader election min timeout Thread.sleep(4000 * conf.getTimeDuration( - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 5, TimeUnit.SECONDS)); GenericTestUtils.waitFor(() -> { try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java index 90f8375f829b..4ac44315556c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -85,9 +85,9 @@ public void setup() throws Exception { StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(4) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 0aa2599637a9..10492736144b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -94,7 +94,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(numOfOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java index f7a3aa9c9b7b..4cfc64cd4f50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java @@ -118,15 +118,16 @@ public void init() throws Exception { conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_MS + "ms"); conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms"); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder .setSCMServiceId("TestSecretKeySnapshot") .setSCMServiceId("SCMServiceId") - .setNumDatanodes(1) .setNumOfStorageContainerManagers(3) .setNumOfActiveSCMs(2) - .setNumOfOzoneManagers(1); + .setNumOfOzoneManagers(1) + .setNumDatanodes(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index eb2442cd0988..6af43c3bacde 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -326,13 +326,12 @@ public void testSecretKeyWithoutAuthorization() throws Exception { private void startCluster(int numSCMs) throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(numSCMs) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index e973c842de44..474a18694854 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -20,8 +20,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; @@ -64,20 +62,30 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor; import org.apache.hadoop.hdds.utils.HddsVersionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.net.DNSToSwitchMapping; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.StaticMapping; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneTestUtils; +import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.HeartbeatEndpointTask; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; +import org.apache.hadoop.ozone.container.metadata.DatanodeStore; +import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; @@ -94,11 +102,12 @@ import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.server.RaftServerConfigKeys; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; import org.mockito.ArgumentMatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -106,11 +115,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.Path; -import java.nio.file.Paths; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -119,6 +126,7 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Semaphore; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -128,14 +136,16 @@ import java.util.stream.Stream; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; +import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -144,7 +154,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.argThat; import static org.mockito.Mockito.doAnswer; @@ -160,7 +169,7 @@ public class TestStorageContainerManager { private static XceiverClientManager xceiverClientManager; private static final Logger LOG = LoggerFactory.getLogger( - TestStorageContainerManager.class); + TestStorageContainerManager.class); @BeforeAll public static void setup() throws IOException { @@ -199,16 +208,13 @@ private void testRpcPermissionWithConf( OzoneConfiguration ozoneConf, Predicate isAdmin, String... usernames) throws Exception { - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build(); - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build()) { cluster.waitForClusterToBeReady(); for (String username : usernames) { testRpcPermission(cluster, username, !isAdmin.test(username)); } - } finally { - cluster.shutdown(); - } + } // The cluster is automatically closed here } private void testRpcPermission(MiniOzoneCluster cluster, @@ -227,17 +233,17 @@ private void testRpcPermission(MiniOzoneCluster cluster, assertInstanceOf(ContainerNotFoundException.class, ex); } - try { - ContainerWithPipeline container2 = mockClientServer.allocateContainer( - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); - if (expectPermissionDenied) { - fail("Operation should fail, expecting an IOException here."); - } else { - assertEquals(1, container2.getPipeline().getNodes().size()); - } - } catch (Exception e) { - verifyPermissionDeniedException(e, fakeRemoteUsername); + if (expectPermissionDenied) { + Exception allocateException = assertThrows(Exception.class, () -> + mockClientServer.allocateContainer(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE)); + verifyPermissionDeniedException(allocateException, fakeRemoteUsername); + } else { + // If not expecting permission denied, validate the successful operation's result + ContainerWithPipeline container2 = assertDoesNotThrow(() -> + mockClientServer.allocateContainer(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE)); + assertEquals(1, container2.getPipeline().getNodes().size()); } Exception e = assertThrows(Exception.class, () -> mockClientServer.getContainer( @@ -290,20 +296,14 @@ public void testBlockDeletionTransactions() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(50) - .build(); - cluster.waitForClusterToBeReady(); - - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .build()) { + cluster.waitForClusterToBeReady(); DeletedBlockLog delLog = cluster.getStorageContainerManager() .getScmBlockManager().getDeletedBlockLog(); assertEquals(0, delLog.getNumOfValidTransactions()); - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = TestDataUtil.createKeys(cluster, numKeys); // Wait for container report Thread.sleep(1000); for (OmKeyInfo keyInfo : keyLocations.values()) { @@ -312,7 +312,7 @@ public void testBlockDeletionTransactions() throws Exception { } Map> containerBlocks = createDeleteTXLog( cluster.getStorageContainerManager(), - delLog, keyLocations, helper); + delLog, keyLocations, cluster, conf); // Verify a few TX gets created in the TX log. assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); @@ -333,7 +333,7 @@ public void testBlockDeletionTransactions() throws Exception { return false; } }, 1000, 22000); - assertTrue(helper.verifyBlocksWithTxnTable(containerBlocks)); + assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks)); // Continue the work, add some TXs that with known container names, // but unknown block IDs. for (Long containerID : containerBlocks.keySet()) { @@ -363,21 +363,21 @@ public void testBlockDeletionTransactions() throws Exception { return false; } }, 1000, 20000); - } finally { - cluster.shutdown(); } } @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setHbInterval(1000) - .setHbProcessorInterval(3000).setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); + + - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build()) { + cluster.waitForClusterToBeReady(); HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); StorageContainerManager scm = cluster.getStorageContainerManager(); scm.stop(); @@ -443,8 +443,6 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { assertThat(versionEndPointTaskLog.getOutput()).contains( "org.apache.hadoop.ozone.common" + ".InconsistentStorageStateException: Mismatched ClusterIDs"); - } finally { - cluster.shutdown(); } } @@ -462,16 +460,14 @@ public void testBlockDeletingThrottling() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - - try { + .build()) { + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); DeletedBlockLog delLog = cluster.getStorageContainerManager() .getScmBlockManager().getDeletedBlockLog(); assertEquals(0, delLog.getNumOfValidTransactions()); @@ -483,10 +479,7 @@ public void testBlockDeletingThrottling() throws Exception { .getScmBlockManager().getSCMBlockDeletingService(); delService.setBlockDeleteTXNum(limitSize); - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(numKeys, 4096); + Map keyLocations = TestDataUtil.createKeys(cluster, numKeys); // Wait for container report Thread.sleep(5000); for (OmKeyInfo keyInfo : keyLocations.values()) { @@ -495,7 +488,7 @@ public void testBlockDeletingThrottling() throws Exception { } createDeleteTXLog(cluster.getStorageContainerManager(), - delLog, keyLocations, helper); + delLog, keyLocations, cluster, conf); // Verify a few TX gets created in the TX log. assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); @@ -516,16 +509,13 @@ public void testBlockDeletingThrottling() throws Exception { } return false; }, 500, 10000); - } finally { - cluster.shutdown(); } } private Map> createDeleteTXLog( StorageContainerManager scm, DeletedBlockLog delLog, - Map keyLocations, - TestStorageContainerManagerHelper helper) + Map keyLocations, MiniOzoneCluster cluster, OzoneConfiguration conf) throws IOException, TimeoutException { // These keys will be written into a bunch of containers, // gets a set of container names, verify container containerBlocks @@ -544,7 +534,7 @@ private Map> createDeleteTXLog( } assertThat(totalCreatedBlocks).isGreaterThan(0); assertEquals(totalCreatedBlocks, - helper.getAllBlocks(containerNames).size()); + getAllBlocks(cluster, conf, containerNames).size()); // Create a deletion TX for each key. Map> containerBlocks = Maps.newHashMap(); @@ -568,11 +558,9 @@ private Map> createDeleteTXLog( } @Test - public void testSCMInitialization() throws Exception { + public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); UUID clusterId = UUID.randomUUID(); @@ -590,13 +578,11 @@ public void testSCMInitialization() throws Exception { } @Test - public void testSCMInitializationWithHAEnabled() throws Exception { + public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); final UUID clusterId = UUID.randomUUID(); @@ -608,54 +594,26 @@ public void testSCMInitializationWithHAEnabled() throws Exception { } @Test - public void testSCMReinitialization() throws Exception { + public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - cluster.getStorageContainerManager().stop(); - try { + + + try (MiniOzoneCluster cluster = + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build()) { + cluster.waitForClusterToBeReady(); + cluster.getStorageContainerManager().stop(); final UUID clusterId = UUID.randomUUID(); // This will initialize SCM StorageContainerManager.scmInit(conf, clusterId.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); assertNotEquals(clusterId.toString(), scmStore.getClusterID()); assertTrue(scmStore.isSCMHAEnabled()); - } finally { - cluster.shutdown(); } } - // Unsupported Test case. Non Ratis SCM -> Ratis SCM not supported - //@Test - public void testSCMReinitializationWithHAUpgrade() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertEquals(clusterId.toString(), scmStore.getClusterID()); - assertFalse(scmStore.isSCMHAEnabled()); - - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, clusterId.toString()); - scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); - validateRatisGroupExists(conf, clusterId.toString()); - - } - @VisibleForTesting public static void validateRatisGroupExists(OzoneConfiguration conf, String clusterId) throws IOException { @@ -696,45 +654,10 @@ public static void validateRatisGroupExists(OzoneConfiguration conf, } } - // Non Ratis SCM -> Ratis SCM is not supported {@see HDDS-6695} - // Invalid Testcase - // @Test - public void testSCMReinitializationWithHAEnabled() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - final String path = GenericTestUtils.getTempPath( - UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); - try { - final String clusterId = - cluster.getStorageContainerManager().getClusterId(); - // validate there is no ratis group pre existing - assertThrows(IOException.class, () -> validateRatisGroupExists(conf, clusterId)); - - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - // This will re-initialize SCM - StorageContainerManager.scmInit(conf, clusterId); - cluster.getStorageContainerManager().start(); - // Ratis group with cluster id exists now - validateRatisGroupExists(conf, clusterId); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); - } finally { - cluster.shutdown(); - } - } - @Test - void testSCMInitializationFailure() { + void testSCMInitializationFailure(@TempDir Path tempDir) { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - Path scmPath = Paths.get(path, "scm-meta"); + Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); Exception e = assertThrows(SCMException.class, () -> HddsTestUtils.getScmSimple(conf)); @@ -742,32 +665,27 @@ void testSCMInitializationFailure() { } @Test - public void testScmInfo() throws Exception { + public void testScmInfo(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final String path = - GenericTestUtils.getTempPath(UUID.randomUUID().toString()); - try { - Path scmPath = Paths.get(path, "scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); - //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - assertEquals(clusterId, scmInfo.getClusterId()); - assertEquals(scmId, scmInfo.getScmId()); - - String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); - String actualVersion = scm.getSoftwareVersion(); - assertEquals(expectedVersion, actualVersion); - } finally { - FileUtils.deleteQuietly(new File(path)); - } + Path scmPath = tempDir.resolve("scm-meta"); + + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + String clusterId = UUID.randomUUID().toString(); + String scmId = UUID.randomUUID().toString(); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); + //Reads the SCM Info from SCM instance + ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); + assertEquals(clusterId, scmInfo.getClusterId()); + assertEquals(scmId, scmInfo.getScmId()); + + String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); + String actualVersion = scm.getSoftwareVersion(); + assertEquals(expectedVersion, actualVersion); } /** @@ -778,18 +696,16 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); - StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( - Collections.singleton(HddsUtils.getHostName(conf))).get(0), + StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), "/rack1"); final int datanodeNum = 3; - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(datanodeNum) - .build(); - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - try { + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(datanodeNum) + .build()) { + cluster.waitForClusterToBeReady(); + StorageContainerManager scm = cluster.getStorageContainerManager(); // first sleep 10s Thread.sleep(10000); // verify datanode heartbeats are well processed @@ -809,8 +725,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { datanodeInfo.getNetworkName()); assertEquals("/rack1", datanodeInfo.getNetworkLocation()); } - } finally { - cluster.shutdown(); } } @@ -826,20 +740,16 @@ public void testCloseContainerCommandOnRestart() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - - try { - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); + .build()) { + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); - helper.createKeys(10, 4096); + TestDataUtil.createKeys(cluster, 10); GenericTestUtils.waitFor(() -> cluster.getStorageContainerManager().getContainerManager() .getContainers() != null, 1000, 10000); @@ -905,8 +815,6 @@ public void testCloseContainerCommandOnRestart() throws Exception { } else { verify(nodeManager).addDatanodeCommand(dnUuid, closeContainerCommand); } - } finally { - cluster.shutdown(); } } @@ -926,7 +834,7 @@ public void testContainerReportQueueWithDrop() throws Exception { ContainerReportHandler containerReportHandler = mock(ContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(500); + Thread.sleep(500); return null; }).when(containerReportHandler).onMessage(dndata, eventQueue); List executors = FixedThreadPoolWithAffinityExecutor @@ -948,7 +856,7 @@ public void testContainerReportQueueWithDrop() throws Exception { eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.CONTAINER_REPORT, dndata); assertThat(containerReportExecutors.droppedEvents()).isGreaterThan(1); - Thread.currentThread().sleep(1000); + Thread.sleep(1000); assertEquals(containerReportExecutors.droppedEvents() + containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); @@ -968,7 +876,7 @@ public void testContainerReportQueueTakingMoreTime() throws Exception { ContainerReportHandler containerReportHandler = mock(ContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(1000); + Thread.sleep(1000); semaphore.release(1); return null; }).when(containerReportHandler).onMessage(any(), eq(eventQueue)); @@ -987,7 +895,7 @@ public void testContainerReportQueueTakingMoreTime() throws Exception { reportExecutorMap); containerReportExecutors.setQueueWaitThreshold(800); containerReportExecutors.setExecWaitThreshold(800); - + eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportExecutors, containerReportHandler); ContainerReportsProto report = ContainerReportsProto.getDefaultInstance(); @@ -1026,7 +934,7 @@ public void testIncrementalContainerReportQueue() throws Exception { IncrementalContainerReportHandler icr = mock(IncrementalContainerReportHandler.class); doAnswer((inv) -> { - Thread.currentThread().sleep(500); + Thread.sleep(500); return null; }).when(icr).onMessage(dndata, eventQueue); List executors = FixedThreadPoolWithAffinityExecutor @@ -1048,7 +956,7 @@ public void testIncrementalContainerReportQueue() throws Exception { eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); eventQueue.fireEvent(SCMEvents.INCREMENTAL_CONTAINER_REPORT, dndata); assertEquals(0, containerReportExecutors.droppedEvents()); - Thread.currentThread().sleep(3000); + Thread.sleep(3000); assertEquals(containerReportExecutors.scheduledEvents(), containerReportExecutors.queuedEvents()); containerReportExecutors.close(); @@ -1072,13 +980,14 @@ public void testNonRatisToRatis() DefaultConfigManager.clearDefaultConfigs(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); StorageContainerManager.scmInit(conf, cluster.getClusterId()); + conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); + conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); cluster.restartStorageContainerManager(false); final StorageContainerManager ratisSCM = cluster .getStorageContainerManager(); assertNotNull(ratisSCM.getScmHAManager().getRatisServer()); assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled()); - } } @@ -1115,4 +1024,86 @@ public boolean matches(CommandForDatanode cmdRight) { && left.getProto().equals(right.getProto()); } } + + public List getAllBlocks(MiniOzoneCluster cluster, OzoneConfiguration conf, Set containerIDs) + throws IOException { + List allBlocks = Lists.newArrayList(); + for (Long containerID : containerIDs) { + allBlocks.addAll(getAllBlocks(cluster, conf, containerID)); + } + return allBlocks; + } + + public List getAllBlocks(MiniOzoneCluster cluster, + OzoneConfiguration conf, Long containerID) throws IOException { + List allBlocks = Lists.newArrayList(); + KeyValueContainerData cData = getContainerMetadata(cluster, containerID); + try (DBHandle db = BlockUtils.getDB(cData, conf)) { + + List> kvs = + db.getStore().getBlockDataTable() + .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, + cData.containerPrefix(), cData.getUnprefixedKeyFilter()); + + for (Table.KeyValue entry : kvs) { + allBlocks.add(Long.valueOf(DatanodeSchemaThreeDBDefinition + .getKeyWithoutPrefix(entry.getKey()))); + } + } + return allBlocks; + } + + public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, OzoneConfiguration conf, + Map> containerBlocks) + throws IOException { + for (Map.Entry> entry : containerBlocks.entrySet()) { + KeyValueContainerData cData = getContainerMetadata(cluster, entry.getKey()); + try (DBHandle db = BlockUtils.getDB(cData, conf)) { + DatanodeStore ds = db.getStore(); + DatanodeStoreSchemaThreeImpl dnStoreImpl = + (DatanodeStoreSchemaThreeImpl) ds; + List> + txnsInTxnTable = dnStoreImpl.getDeleteTransactionTable() + .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, + cData.containerPrefix()); + List conID = new ArrayList<>(); + for (Table.KeyValue txn : + txnsInTxnTable) { + conID.addAll(txn.getValue().getLocalIDList()); + } + if (!conID.equals(containerBlocks.get(entry.getKey()))) { + return false; + } + } + } + return true; + } + + private KeyValueContainerData getContainerMetadata(MiniOzoneCluster cluster, Long containerID) + throws IOException { + ContainerWithPipeline containerWithPipeline = cluster + .getStorageContainerManager().getClientProtocolServer() + .getContainerWithPipeline(containerID); + + DatanodeDetails dn = + containerWithPipeline.getPipeline().getFirstNode(); + OzoneContainer containerServer = + getContainerServerByDatanodeUuid(cluster, dn.getUuidString()); + KeyValueContainerData containerData = + (KeyValueContainerData) containerServer.getContainerSet() + .getContainer(containerID).getContainerData(); + return containerData; + } + + private OzoneContainer getContainerServerByDatanodeUuid(MiniOzoneCluster cluster, String dnUUID) + throws IOException { + for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { + if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) { + return dn.getDatanodeStateMachine().getContainer(); + } + } + throw new IOException("Unable to get the ozone container " + + "for given datanode ID " + dnUUID); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index e62820cfb1d0..2986484d2ad0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -95,7 +95,7 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, "1"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java deleted file mode 100644 index 322b1e65bc68..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHelper.java +++ /dev/null @@ -1,198 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.TestDataUtil; -import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; -import org.apache.hadoop.ozone.container.metadata.DatanodeStore; -import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; - - -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import org.apache.commons.lang3.RandomStringUtils; - -/** - * A helper class used by {@link TestStorageContainerManager} to generate - * some keys and helps to verify containers and blocks locations. - */ -public class TestStorageContainerManagerHelper { - - private final MiniOzoneCluster cluster; - private final OzoneConfiguration conf; - - public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, - OzoneConfiguration conf) throws IOException { - this.cluster = cluster; - this.conf = conf; - } - - public Map createKeys(int numOfKeys, int keySize) - throws Exception { - Map keyLocationMap = Maps.newHashMap(); - - try (OzoneClient client = cluster.newClient()) { - OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client); - // Write 20 keys in bucketName. - Set keyNames = Sets.newHashSet(); - for (int i = 0; i < numOfKeys; i++) { - String keyName = RandomStringUtils.randomAlphabetic(5) + i; - keyNames.add(keyName); - - TestDataUtil - .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); - } - - for (String key : keyNames) { - OmKeyArgs arg = new OmKeyArgs.Builder() - .setVolumeName(bucket.getVolumeName()) - .setBucketName(bucket.getName()) - .setKeyName(key) - .build(); - OmKeyInfo location = cluster.getOzoneManager() - .lookupKey(arg); - keyLocationMap.put(key, location); - } - } - - return keyLocationMap; - } - - public List getPendingDeletionBlocks(Long containerID) - throws IOException { - List pendingDeletionBlocks = Lists.newArrayList(); - KeyValueContainerData cData = getContainerMetadata(containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - KeyPrefixFilter filter = cData.getDeletingBlockKeyFilter(); - - List> kvs = - db.getStore().getBlockDataTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix(), filter); - - for (Table.KeyValue entry : kvs) { - pendingDeletionBlocks - .add(entry.getKey().replace(cData.getDeletingBlockKeyPrefix(), "")); - } - } - return pendingDeletionBlocks; - } - - public List getAllBlocks(Set containerIDs) - throws IOException { - List allBlocks = Lists.newArrayList(); - for (Long containerID : containerIDs) { - allBlocks.addAll(getAllBlocks(containerID)); - } - return allBlocks; - } - - public List getAllBlocks(Long containeID) throws IOException { - List allBlocks = Lists.newArrayList(); - KeyValueContainerData cData = getContainerMetadata(containeID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - - List> kvs = - db.getStore().getBlockDataTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix(), cData.getUnprefixedKeyFilter()); - - for (Table.KeyValue entry : kvs) { - allBlocks.add(Long.valueOf(DatanodeSchemaThreeDBDefinition - .getKeyWithoutPrefix(entry.getKey()))); - } - } - return allBlocks; - } - - public boolean verifyBlocksWithTxnTable(Map> containerBlocks) - throws IOException { - for (Map.Entry> entry : containerBlocks.entrySet()) { - KeyValueContainerData cData = getContainerMetadata(entry.getKey()); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { - DatanodeStore ds = db.getStore(); - DatanodeStoreSchemaThreeImpl dnStoreImpl = - (DatanodeStoreSchemaThreeImpl) ds; - List> - txnsInTxnTable = dnStoreImpl.getDeleteTransactionTable() - .getRangeKVs(cData.startKeyEmpty(), Integer.MAX_VALUE, - cData.containerPrefix()); - List conID = new ArrayList<>(); - for (Table.KeyValue txn : - txnsInTxnTable) { - conID.addAll(txn.getValue().getLocalIDList()); - } - if (!conID.equals(containerBlocks.get(entry.getKey()))) { - return false; - } - } - } - return true; - } - - private KeyValueContainerData getContainerMetadata(Long containerID) - throws IOException { - ContainerWithPipeline containerWithPipeline = cluster - .getStorageContainerManager().getClientProtocolServer() - .getContainerWithPipeline(containerID); - - DatanodeDetails dn = - containerWithPipeline.getPipeline().getFirstNode(); - OzoneContainer containerServer = - getContainerServerByDatanodeUuid(dn.getUuidString()); - KeyValueContainerData containerData = - (KeyValueContainerData) containerServer.getContainerSet() - .getContainer(containerID).getContainerData(); - return containerData; - } - - private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID) - throws IOException { - for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { - if (dn.getDatanodeDetails().getUuidString().equals(dnUUID)) { - return dn.getDatanodeStateMachine().getContainer(); - } - } - throw new IOException("Unable to get the ozone container " - + "for given datanode ID " + dnUUID); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index fb312dfb5096..99095f55b008 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -20,13 +20,17 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; + +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; @@ -71,8 +75,8 @@ public void setup() { RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.CLOSED) .setNodes(dns) + .setNodesInOrder(dnsInOrder) .build(); - pipeline.setNodesInOrder(dnsInOrder); } @Test @@ -174,6 +178,39 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, seenDNs.size()); } + @Test + public void testPrimaryReadFromNormalDatanode() + throws IOException { + final List seenDNs = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + Pipeline randomPipeline = MockPipeline.createRatisPipeline(); + int nodeCount = randomPipeline.getNodes().size(); + assertThat(nodeCount).isGreaterThan(1); + randomPipeline.getNodes().forEach( + node -> assertEquals(NodeOperationalState.IN_SERVICE, node.getPersistedOpState())); + + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + try (XceiverClientGrpc client = new XceiverClientGrpc(randomPipeline, conf) { + @Override + public XceiverClientReply sendCommandAsync( + ContainerProtos.ContainerCommandRequestProto request, + DatanodeDetails dn) { + seenDNs.add(dn); + return buildValidResponse(); + } + }) { + invokeXceiverClientGetBlock(client); + } catch (IOException e) { + e.printStackTrace(); + } + // Always the IN_SERVICE datanode will be read first + assertEquals(NodeOperationalState.IN_SERVICE, seenDNs.get(0).getPersistedOpState()); + } + } + @Test public void testConnectionReusedAfterGetBlock() throws IOException { // With a new Client, make 100 calls. On each call, ensure that only one diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java index 5ebf9b56a8ec..fb4cb3ba4cdd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java @@ -152,7 +152,7 @@ public static void init() { MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(DATANODE_COUNT); - clusterProvider = new MiniOzoneClusterProvider(conf, builder, 7); + clusterProvider = new MiniOzoneClusterProvider(builder, 7); } @AfterAll @@ -211,7 +211,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() final DatanodeDetails toDecommission = nm.getNodeByUuid(dnID.toString()); scmClient.decommissionNodes(Arrays.asList( - getDNHostAndPort(toDecommission))); + getDNHostAndPort(toDecommission)), false); waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED); // Ensure one node transitioned to DECOMMISSIONING @@ -265,7 +265,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() waitForAndReturnContainer(ratisRepConfig, 3); final DatanodeDetails dn = getOneDNHostingReplica(getContainerReplicas(container)); - scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn))); + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn)), false); // Wait for the state to be persisted on the DN so it can report it on // restart of SCM. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java index e8dc7455a11c..683a0c176eb9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java @@ -32,11 +32,9 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -50,7 +48,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,11 +62,7 @@ public class TestQueryNode { @BeforeEach public void setUp() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final int interval = 1000; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - interval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); @@ -77,10 +70,10 @@ public void setUp() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + numOfDatanodes / 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2) .build(); cluster.waitForClusterToBeReady(); scmClient = new ContainerOperationClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 439b563d6330..51b5d84a13e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -37,9 +37,12 @@ import java.util.Map; import java.util.UUID; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -56,12 +59,12 @@ public class TestLeaderChoosePolicy { public void init(int numDatanodes, int datanodePipelineLimit) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, datanodePipelineLimit); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index c73ffb982cf6..d8840436ee0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -61,11 +62,11 @@ public static void init() throws Exception { conf.setFromObject(ratisServerConfig); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); + conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(6) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 443105b6ccb6..829a9581f663 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -38,8 +38,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -55,14 +57,14 @@ public class TestRatisPipelineCreateAndDestroy { public void init(int numDatanodes) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, 500, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java index 988f163adab5..6ce05ad3be74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java @@ -35,6 +35,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotSame; @@ -60,17 +61,17 @@ public class TestSCMRestart { */ @BeforeAll public static void init() throws Exception { + final int numOfNodes = 4; conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - int numOfNodes = 4; + // allow only one FACTOR THREE pipeline. + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfNodes + 1); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfNodes) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(numOfNodes + 1) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 563e0162acc6..c3ea911f1935 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -144,7 +144,6 @@ public void init() throws Exception { StorageUnit.MB); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -210,7 +209,7 @@ public void testReleaseBuffers() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } @@ -238,6 +237,8 @@ public void testReleaseBuffers() throws Exception { assertThat(watcher.getFutureMap()).isEmpty(); assertThat(watcher.getCommitIndexMap()).isEmpty(); } + } finally { + bufferPool.clearBufferPool(); } } @@ -281,7 +282,7 @@ public void testReleaseBuffersOnException() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } @@ -331,6 +332,8 @@ public void testReleaseBuffersOnException() throws Exception { assertThat(watcher.getCommitIndexMap()).isEmpty(); } } + } finally { + bufferPool.clearBufferPool(); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 57e807b7c751..675570164dd9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -20,12 +20,17 @@ import static java.lang.Thread.sleep; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_COMPLETE_FINALIZATION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_POST_FINALIZE_UPGRADE; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_PRE_FINALIZE_UPGRADE; @@ -71,6 +76,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneClusterProvider; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -148,30 +154,31 @@ public static void initClass() { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1"); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + // allow only one FACTOR THREE pipeline. + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, NUM_DATA_NODES + 1); + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, TimeUnit.MILLISECONDS); scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor<>(); SCMConfigurator scmConfigurator = new SCMConfigurator(); scmConfigurator.setUpgradeFinalizationExecutor(scmFinalizationExecutor); - MiniOzoneCluster.Builder builder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumDatanodes(NUM_DATA_NODES) - .setNumOfStorageContainerManagers(NUM_SCMS) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(NUM_DATA_NODES + 1) - .setHbInterval(500) - .setHbProcessorInterval(500) - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + .setNumDatanodes(NUM_DATA_NODES) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); // Setting the provider to a max of 100 clusters. Some of the tests here // use multiple clusters, so its hard to know exactly how many will be // needed. This means the provider will create 1 extra cluster than needed // but that will not greatly affect runtimes. - clusterProvider = new MiniOzoneClusterProvider(conf, builder, 100); + clusterProvider = new MiniOzoneClusterProvider(builder, 100); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index d2ae30efcebc..d5802aab6e02 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizationContext; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.upgrade.DefaultUpgradeFinalizationExecutor; import org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizationExecutor; @@ -55,6 +56,7 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -87,17 +89,19 @@ public void init(OzoneConfiguration conf, SCMConfigurator configurator = new SCMConfigurator(); configurator.setUpgradeFinalizationExecutor(executor); - MiniOzoneCluster.Builder clusterBuilder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumOfStorageContainerManagers(NUM_SCMS) + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf); + clusterBuilder.setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setSCMServiceId("scmservice") - .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) + .setSCMConfigurator(configurator) .setNumDatanodes(NUM_DATANODES) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - this.cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); + this.cluster = clusterBuilder.build(); scmClient = cluster.getStorageContainerLocationClient(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java new file mode 100644 index 000000000000..e7e0337b5f9f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; +import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Utility method to manipulate/inspect container data on disk in a mini cluster. + */ +public final class ClusterContainersUtil { + private ClusterContainersUtil() { + } + + /** + * + * + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return the location of the chunk file. + * @throws IOException + */ + public static File getChunksLocationPath(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + Preconditions.checkArgument(key instanceof OzoneKeyDetails); + long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getContainerID(); + long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getLocalID(); + // From the containerData, get the block iterator for all the blocks in + // the container. + KeyValueContainerData containerData = + (KeyValueContainerData) container.getContainerData(); + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); + BlockIterator keyValueBlockIterator = + db.getStore().getBlockIterator(containerID)) { + // Find the block corresponding to the key we put. We use the localID of + // the BlockData to identify out key. + BlockData blockData = null; + while (keyValueBlockIterator.hasNext()) { + blockData = keyValueBlockIterator.nextBlock(); + if (blockData.getBlockID().getLocalID() == localID) { + break; + } + } + assertNotNull(blockData, "Block not found"); + + // Get the location of the chunk file + String containreBaseDir = + container.getContainerData().getVolume().getHddsRootDir().getPath(); + File chunksLocationPath = KeyValueContainerLocationUtil + .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); + return chunksLocationPath; + } + } + + /** + * Corrupt the chunk backing the key in a mini cluster. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @throws IOException + */ + public static void corruptData(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + byte[] corruptData = "corrupted data".getBytes(UTF_8); + // Corrupt the contents of chunk files + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + FileUtils.writeByteArrayToFile(file, corruptData); + } + } + + /** + * Inspect and verify if chunk backing the key in a mini cluster is the same as the string. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return true if the same; false if does not match. + * @throws IOException + */ + public static boolean verifyOnDiskData(MiniOzoneCluster cluster, Container container, OzoneKey key, String data) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + String chunkOnDisk = FileUtils.readFileToString(file, Charset.defaultCharset()); + if (!data.equals(chunkOnDisk)) { + return false; + } + } + return true; + } + + /** + * Return the first container object in a mini cluster specified by its ID. + * @param cluster a mini ozone cluster object. + * @param containerID a long variable representing cluater ID. + * @return the container object; null if not found. + */ + public static Container getContainerByID(MiniOzoneCluster cluster, long containerID) { + // Get the container by traversing the datanodes. Atleast one of the + // datanode must have this container. + Container container = null; + for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { + container = hddsDatanode.getDatanodeStateMachine().getContainer() + .getContainerSet().getContainer(containerID); + if (container != null) { + break; + } + } + return container; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index 4197ac8a8165..3239dfc1a47b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -50,10 +50,7 @@ static void setUp() throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - String omServiceId = "omServiceId1"; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(1) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index e864cae00b37..9c76c0ec0c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -19,10 +19,10 @@ import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.UUID; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -38,6 +38,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; +import org.apache.ratis.util.function.CheckedFunction; /** * Interface used for MiniOzoneClusters. @@ -62,11 +63,7 @@ static Builder newBuilder(OzoneConfiguration conf) { * * @return MiniOzoneCluster builder */ - static Builder newOMHABuilder(OzoneConfiguration conf) { - return new MiniOzoneHAClusterImpl.Builder(conf); - } - - static Builder newHABuilder(OzoneConfiguration conf) { + static MiniOzoneHAClusterImpl.Builder newHABuilder(OzoneConfiguration conf) { return new MiniOzoneHAClusterImpl.Builder(conf); } @@ -77,11 +74,6 @@ static Builder newHABuilder(OzoneConfiguration conf) { */ OzoneConfiguration getConf(); - /** - * Set the configuration for the MiniOzoneCluster. - */ - void setConf(OzoneConfiguration newConf); - /** * Waits for the cluster to be ready, this call blocks till all the * configured {@link HddsDatanodeService} registers with @@ -93,7 +85,7 @@ static Builder newHABuilder(OzoneConfiguration conf) { void waitForClusterToBeReady() throws TimeoutException, InterruptedException; /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open + * Waits for at least one RATIS pipeline of given factor to be reported in open * state. * * @param factor replication factor @@ -121,21 +113,6 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException; - /** - * Returns OzoneManager Service ID. - * - * @return Service ID String - */ - String getOMServiceId(); - - - /** - * Returns StorageContainerManager Service ID. - * - * @return Service ID String - */ - String getSCMServiceId(); - /** * Returns {@link StorageContainerManager} associated with this * {@link MiniOzoneCluster} instance. @@ -180,20 +157,12 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. - * - * @return StorageContainerLocation Client - * @throws IOException */ StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient() throws IOException; /** * Restarts StorageContainerManager instance. - * - * @param waitForDatanode - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException */ void restartStorageContainerManager(boolean waitForDatanode) throws InterruptedException, TimeoutException, IOException, @@ -201,8 +170,6 @@ void restartStorageContainerManager(boolean waitForDatanode) /** * Restarts OzoneManager instance. - * - * @throws IOException */ void restartOzoneManager() throws IOException; @@ -266,11 +233,6 @@ default void close() { */ void stop(); - /** - * Start Scm. - */ - void startScm() throws IOException; - /** * Start DataNodes. */ @@ -297,57 +259,43 @@ default String getBaseDir() { @SuppressWarnings("visibilitymodifier") abstract class Builder { - protected static final int DEFAULT_HB_INTERVAL_MS = 1000; - protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100; protected static final int ACTIVE_OMS_NOT_SET = -1; protected static final int ACTIVE_SCMS_NOT_SET = -1; - protected static final int DEFAULT_PIPELINE_LIMIT = 3; protected static final int DEFAULT_RATIS_RPC_TIMEOUT_SEC = 1; protected OzoneConfiguration conf; protected String path; protected String clusterId; - protected String omServiceId; - protected int numOfOMs; - protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET; - - protected String scmServiceId; - protected int numOfSCMs; - protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; - protected Optional hbInterval = Optional.empty(); - protected Optional hbProcessorInterval = Optional.empty(); protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); - - protected Optional datanodeReservedSpace = Optional.empty(); - protected boolean includeRecon = false; + protected boolean includeRecon = false; - protected Optional omLayoutVersion = Optional.empty(); - protected Optional scmLayoutVersion = Optional.empty(); - protected Optional dnLayoutVersion = Optional.empty(); + protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); + protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); protected int numOfDatanodes = 3; - protected int numDataVolumes = 1; protected boolean startDataNodes = true; protected CertificateClient certClient; protected SecretKeyClient secretKeyClient; - protected int pipelineNumLimit = DEFAULT_PIPELINE_LIMIT; + protected DatanodeFactory dnFactory = UniformDatanodesFactory.newBuilder().build(); protected Builder(OzoneConfiguration conf) { this.conf = conf; - setClusterId(UUID.randomUUID().toString()); + setClusterId(); // Use default SCM configurations if no override is provided. setSCMConfigurator(new SCMConfigurator()); ExitUtils.disableSystemExit(); } - public Builder setConf(OzoneConfiguration config) { - this.conf = config; - return this; + /** Prepare the builder for another call to {@link #build()}, avoiding conflict + * between the clusters created. */ + protected void prepareForNextBuild() { + conf = new OzoneConfiguration(conf); + setClusterId(); } public Builder setSCMConfigurator(SCMConfigurator configurator) { @@ -355,13 +303,8 @@ public Builder setSCMConfigurator(SCMConfigurator configurator) { return this; } - /** - * Sets the cluster Id. - * - * @param id cluster Id - */ - void setClusterId(String id) { - clusterId = id; + private void setClusterId() { + clusterId = UUID.randomUUID().toString(); path = GenericTestUtils.getTempPath( MiniOzoneClusterImpl.class.getSimpleName() + "-" + clusterId); } @@ -382,13 +325,6 @@ public Builder setStartDataNodes(boolean nodes) { return this; } - /** - * Sets the certificate client. - * - * @param client - * - * @return MiniOzoneCluster.Builder - */ public Builder setCertificateClient(CertificateClient client) { this.certClient = client; return this; @@ -413,83 +349,31 @@ public Builder setNumDatanodes(int val) { } /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - - /** - * Sets the total number of pipelines to create. - * @param val number of pipelines - * @return MiniOzoneCluster.Builder - */ - public Builder setTotalPipelineNumLimit(int val) { - pipelineNumLimit = val; - return this; - } - - /** - * Sets the number of HeartBeat Interval of Datanodes, the value should be - * in MilliSeconds. + * Set the initialVersion for all datanodes. * - * @param val HeartBeat interval in milliseconds + * @param val initialVersion value to be set for all datanodes. * * @return MiniOzoneCluster.Builder */ - public Builder setHbInterval(int val) { - hbInterval = Optional.of(val); + public Builder setDatanodeInitialVersion(int val) { + dnInitialVersion = val; return this; } /** - * Sets the number of HeartBeat Processor Interval of Datanodes, - * the value should be in MilliSeconds. + * Set the currentVersion for all datanodes. * - * @param val HeartBeat Processor interval in milliseconds + * @param val currentVersion value to be set for all datanodes. * * @return MiniOzoneCluster.Builder */ - public Builder setHbProcessorInterval(int val) { - hbProcessorInterval = Optional.of(val); + public Builder setDatanodeCurrentVersion(int val) { + dnCurrentVersion = val; return this; } - /** - * Sets the reserved space - * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} - * HDDS_DATANODE_DIR_DU_RESERVED - * for each volume in each datanode. - * @param reservedSpace String that contains the numeric size value and - * ends with a - * {@link org.apache.hadoop.hdds.conf.StorageUnit} - * suffix. For example, "50GB". - * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo - * - * @return {@link MiniOzoneCluster} Builder - */ - public Builder setDatanodeReservedSpace(String reservedSpace) { - datanodeReservedSpace = Optional.of(reservedSpace); - return this; - } - - public Builder setNumOfOzoneManagers(int numOMs) { - this.numOfOMs = numOMs; - return this; - } - - public Builder setNumOfActiveOMs(int numActiveOMs) { - this.numOfActiveOMs = numActiveOMs; - return this; - } - - public Builder setOMServiceId(String serviceId) { - this.omServiceId = serviceId; + public Builder setDatanodeFactory(DatanodeFactory factory) { + this.dnFactory = factory; return this; } @@ -498,43 +382,18 @@ public Builder includeRecon(boolean include) { return this; } - public Builder setNumOfStorageContainerManagers(int numSCMs) { - this.numOfSCMs = numSCMs; - return this; - } - - public Builder setNumOfActiveSCMs(int numActiveSCMs) { - this.numOfActiveSCMs = numActiveSCMs; - return this; - } - - public Builder setSCMServiceId(String serviceId) { - this.scmServiceId = serviceId; - return this; - } - - public Builder setScmLayoutVersion(int layoutVersion) { - scmLayoutVersion = Optional.of(layoutVersion); - return this; - } - - public Builder setOmLayoutVersion(int layoutVersion) { - omLayoutVersion = Optional.of(layoutVersion); - return this; - } - - public Builder setDnLayoutVersion(int layoutVersion) { - dnLayoutVersion = Optional.of(layoutVersion); - return this; - } - /** * Constructs and returns MiniOzoneCluster. * * @return {@link MiniOzoneCluster} - * - * @throws IOException */ public abstract MiniOzoneCluster build() throws IOException; } + + /** + * Factory to customize configuration of each datanode. + */ + interface DatanodeFactory extends CheckedFunction { + // marker + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 400ae3ee2cc8..50013b57f4c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -27,18 +27,16 @@ import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -68,10 +66,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; -import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; @@ -81,19 +77,13 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; + +import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; @@ -102,6 +92,8 @@ import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; +import org.mockito.MockedStatic; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -116,6 +108,8 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private static final Logger LOG = LoggerFactory.getLogger(MiniOzoneClusterImpl.class); + private static final String[] NO_ARGS = new String[0]; + static { CodecBuffer.enableLeakDetection(); } @@ -132,18 +126,17 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private CertificateClient caClient; private final Set clients = ConcurrentHashMap.newKeySet(); private SecretKeyClient secretKeyClient; + private static MockedStatic mockDNStatic = Mockito.mockStatic(HddsDatanodeService.class); /** * Creates a new MiniOzoneCluster with Recon. - * - * @throws IOException if there is an I/O error */ - MiniOzoneClusterImpl(OzoneConfiguration conf, - SCMConfigurator scmConfigurator, - OzoneManager ozoneManager, - StorageContainerManager scm, - List hddsDatanodes, - ReconServer reconServer) { + private MiniOzoneClusterImpl(OzoneConfiguration conf, + SCMConfigurator scmConfigurator, + OzoneManager ozoneManager, + StorageContainerManager scm, + List hddsDatanodes, + ReconServer reconServer) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; @@ -157,9 +150,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { * StorageContainerManager. This is used by * {@link MiniOzoneHAClusterImpl} for starting multiple * OzoneManagers and StorageContainerManagers. - * - * @param conf - * @param hddsDatanodes */ MiniOzoneClusterImpl(OzoneConfiguration conf, SCMConfigurator scmConfigurator, List hddsDatanodes, ReconServer reconServer) { @@ -178,23 +168,10 @@ public OzoneConfiguration getConf() { return conf; } - @Override - public void setConf(OzoneConfiguration newConf) { + protected void setConf(OzoneConfiguration newConf) { this.conf = newConf; } - @Override - public String getOMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - - @Override - public String getSCMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - public void waitForSCMToBeReady() throws TimeoutException, InterruptedException { if (SCMHAUtils.isSCMHAEnabled(conf)) { @@ -207,9 +184,6 @@ public StorageContainerManager getActiveSCM() { return scm; } - /** - * Waits for the Ozone cluster to be ready for processing requests. - */ @Override public void waitForClusterToBeReady() throws TimeoutException, InterruptedException { @@ -233,10 +207,6 @@ public void waitForClusterToBeReady() }, 1000, waitForClusterToBeReadyTimeout); } - /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open - * state. - */ @Override public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, int timeoutInMs) throws @@ -249,24 +219,11 @@ public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, }, 1000, timeoutInMs); } - /** - * Sets the timeout value after which - * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out. - * - * @param timeoutInMs timeout value in milliseconds - */ @Override public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) { waitForClusterToBeReadyTimeout = timeoutInMs; } - /** - * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out - * of Safe mode. - * - * @throws TimeoutException - * @throws InterruptedException - */ @Override public void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException { @@ -396,6 +353,16 @@ private void waitForHddsDatanodeToStop(DatanodeDetails dn) }, 1000, waitForClusterToBeReadyTimeout); } + private static void overrideDatanodeVersions(int dnInitialVersion, int dnCurrentVersion) { + // FUTURE_VERSION (-1) is not a valid version for a datanode, using it as a marker when version is not overridden + if (dnInitialVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultInitialVersion).thenReturn(dnInitialVersion); + } + if (dnCurrentVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { + mockDNStatic.when(HddsDatanodeService::getDefaultCurrentVersion).thenReturn(dnCurrentVersion); + } + } + @Override public void restartHddsDatanode(int i, boolean waitForDatanode) throws InterruptedException, TimeoutException { @@ -407,8 +374,7 @@ public void restartHddsDatanode(int i, boolean waitForDatanode) // wait for node to be removed from SCM healthy node list. waitForHddsDatanodeToStop(datanodeService.getDatanodeDetails()); } - String[] args = new String[] {}; - HddsDatanodeService service = new HddsDatanodeService(args); + HddsDatanodeService service = new HddsDatanodeService(NO_ARGS); service.setConfiguration(config); hddsDatanodes.add(i, service); startHddsDatanode(service); @@ -464,15 +430,7 @@ public void stop() { stopRecon(reconServer); } - /** - * Start Scm. - */ - @Override - public void startScm() throws IOException { - scm.start(); - } - - public void startHddsDatanode(HddsDatanodeService datanode) { + private void startHddsDatanode(HddsDatanodeService datanode) { try { datanode.setCertificateClient(getCAClient()); } catch (IOException e) { @@ -482,9 +440,6 @@ public void startHddsDatanode(HddsDatanodeService datanode) { datanode.start(); } - /** - * Start DataNodes. - */ @Override public void startHddsDatanodes() { hddsDatanodes.forEach(this::startHddsDatanode); @@ -504,7 +459,7 @@ public void shutdownHddsDatanodes() { @Override public void startRecon() { reconServer = new ReconServer(); - reconServer.execute(new String[]{}); + reconServer.execute(NO_ARGS); } @Override @@ -590,25 +545,10 @@ public MiniOzoneCluster build() throws IOException { ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); try { - scm = createSCM(); - scm.start(); - om = createOM(); - if (certClient != null) { - om.setCertClient(certClient); - } - if (secretKeyClient != null) { - om.setSecretKeyClient(secretKeyClient); - } - om.start(); - - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } - - hddsDatanodes = createHddsDatanodes( - Collections.singletonList(scm), reconServer); + scm = createAndStartSingleSCM(); + om = createAndStartSingleOM(); + reconServer = createRecon(); + hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, @@ -619,6 +559,8 @@ public MiniOzoneCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + + prepareForNextBuild(); return cluster; } catch (Exception ex) { stopOM(om); @@ -641,10 +583,17 @@ public MiniOzoneCluster build() throws IOException { } } + protected void setClients(OzoneManager om) throws IOException { + if (certClient != null) { + om.setCertClient(certClient); + } + if (secretKeyClient != null) { + om.setSecretKeyClient(secretKeyClient); + } + } + /** * Initializes the configuration required for starting MiniOzoneCluster. - * - * @throws IOException */ protected void initializeConfiguration() throws IOException { Path metaDir = Paths.get(path, "ozone-meta"); @@ -652,10 +601,6 @@ protected void initializeConfiguration() throws IOException { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - // MiniOzoneCluster should have global pipeline upper limit. - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, - pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ? - pipelineNumLimit : DEFAULT_PIPELINE_LIMIT); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, DEFAULT_RATIS_RPC_TIMEOUT_SEC, TimeUnit.SECONDS); SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class); @@ -672,23 +617,24 @@ void removeConfiguration() { FileUtils.deleteQuietly(new File(path)); } + protected StorageContainerManager createAndStartSingleSCM() + throws AuthenticationException, IOException { + StorageContainerManager scm = createSCM(); + scm.start(); + configureScmDatanodeAddress(singletonList(scm)); + return scm; + } + /** * Creates a new StorageContainerManager instance. * * @return {@link StorageContainerManager} - * @throws IOException */ protected StorageContainerManager createSCM() throws IOException, AuthenticationException { configureSCM(); - SCMStorageConfig scmStore; - - // Set non standard layout version if needed. - scmLayoutVersion.ifPresent(integer -> - conf.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - scmStore = new SCMStorageConfig(conf); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf, scmConfigurator); @@ -701,6 +647,7 @@ protected StorageContainerManager createSCM() } return scm; } + protected void initializeScmStorage(SCMStorageConfig scmStore) throws IOException { if (scmStore.getState() == StorageState.INITIALIZED) { @@ -736,31 +683,35 @@ void initializeOmStorage(OMStorage omStorage) throws IOException { omStorage.initialize(); } + protected OzoneManager createAndStartSingleOM() throws AuthenticationException, IOException { + OzoneManager om = createOM(); + setClients(om); + om.start(); + return om; + } + /** * Creates a new OzoneManager instance. * * @return {@link OzoneManager} - * @throws IOException */ protected OzoneManager createOM() throws IOException, AuthenticationException { configureOM(); - omLayoutVersion.ifPresent(integer -> - conf.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); return OzoneManager.createOm(conf); } - protected String getSCMAddresses(List scms) { + private String getSCMAddresses(List scms) { StringBuilder stringBuilder = new StringBuilder(); Iterator iter = scms.iterator(); while (iter.hasNext()) { StorageContainerManager scm = iter.next(); - stringBuilder.append(scm.getDatanodeRpcAddress().getHostString() + - ":" + scm.getDatanodeRpcAddress().getPort()); + stringBuilder.append(scm.getDatanodeRpcAddress().getHostString()) + .append(":") + .append(scm.getDatanodeRpcAddress().getPort()); if (iter.hasNext()) { stringBuilder.append(","); } @@ -769,76 +720,48 @@ protected String getSCMAddresses(List scms) { return stringBuilder.toString(); } + protected void configureScmDatanodeAddress(List scms) { + conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, getSCMAddresses(scms)); + } + + protected ReconServer createRecon() { + ReconServer reconServer = null; + if (includeRecon) { + configureRecon(); + reconServer = new ReconServer(); + reconServer.execute(NO_ARGS); + + OzoneStorageContainerManager reconScm = + reconServer.getReconStorageContainerManager(); + conf.set(OZONE_RECON_ADDRESS_KEY, + reconScm.getDatanodeRpcAddress().getHostString() + ":" + + reconScm.getDatanodeRpcAddress().getPort()); + } + return reconServer; + } + /** * Creates HddsDatanodeService(s) instance. * * @return List of HddsDatanodeService - * @throws IOException */ - protected List createHddsDatanodes( - List scms, ReconServer reconServer) + protected List createHddsDatanodes() throws IOException { - String scmAddress = getSCMAddresses(scms); - String[] args = new String[] {}; - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); List hddsDatanodes = new ArrayList<>(); + + // Override default datanode initial and current version if necessary + overrideDatanodeVersions(dnInitialVersion, dnCurrentVersion); + for (int i = 0; i < numOfDatanodes; i++) { - OzoneConfiguration dnConf = new OzoneConfiguration(conf); - configureDatanodePorts(dnConf); - String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); - Path metaDir = Paths.get(datanodeBaseDir, "meta"); - List dataDirs = new ArrayList<>(); - List reservedSpaceList = new ArrayList<>(); - for (int j = 0; j < numDataVolumes; j++) { - Path dir = Paths.get(datanodeBaseDir, "data-" + j, "containers"); - Files.createDirectories(dir); - dataDirs.add(dir.toString()); - datanodeReservedSpace.ifPresent( - s -> reservedSpaceList.add(dir + ":" + s)); - } - String reservedSpaceString = String.join(",", reservedSpaceList); - String listOfDirs = String.join(",", dataDirs); - Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis"); - Path workDir = Paths.get(datanodeBaseDir, "data", "replication", - "work"); - Files.createDirectories(metaDir); - Files.createDirectories(ratisDir); - Files.createDirectories(workDir); - dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - dnConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, - reservedSpaceString); - dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, - ratisDir.toString()); - if (reconServer != null) { - OzoneStorageContainerManager reconScm = - reconServer.getReconStorageContainerManager(); - dnConf.set(OZONE_RECON_ADDRESS_KEY, - reconScm.getDatanodeRpcAddress().getHostString() + ":" + - reconScm.getDatanodeRpcAddress().getPort()); - } + OzoneConfiguration dnConf = dnFactory.apply(conf); - HddsDatanodeService datanode = new HddsDatanodeService(args); + HddsDatanodeService datanode = new HddsDatanodeService(NO_ARGS); datanode.setConfiguration(dnConf); hddsDatanodes.add(datanode); } - if (dnLayoutVersion.isPresent()) { - configureLayoutVersionInDatanodes(hddsDatanodes, dnLayoutVersion.get()); - } return hddsDatanodes; } - private void configureLayoutVersionInDatanodes( - List dns, int layoutVersion) throws IOException { - for (HddsDatanodeService dn : dns) { - DatanodeLayoutStorage layoutStorage; - layoutStorage = new DatanodeLayoutStorage(dn.getConf(), - UUID.randomUUID().toString(), layoutVersion); - layoutStorage.initialize(); - } - } - protected void configureSCM() { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, localhostWithFreePort()); @@ -850,30 +773,6 @@ protected void configureSCM() { localhostWithFreePort()); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); - configureSCMheartbeat(); - } - - private void configureSCMheartbeat() { - if (hbInterval.isPresent()) { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - hbInterval.get(), TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - DEFAULT_HB_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } - - if (hbProcessorInterval.isPresent()) { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - hbProcessorInterval.get(), - TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - DEFAULT_HB_PROCESSOR_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } } private void configureOM() { @@ -884,22 +783,7 @@ private void configureOM() { conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort()); } - protected void configureDatanodePorts(ConfigurationTarget conf) { - conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY, - anyHostWithFreePort()); - conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); - conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); - } - - protected void configureRecon() throws IOException { + protected void configureRecon() { ConfigurationProvider.resetConfiguration(); File tempNewFolder = new File(path, "recon"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java index cdd12ac841e3..618e2dd42107 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java @@ -17,14 +17,12 @@ */ package org.apache.hadoop.ozone; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashSet; import java.util.Set; -import java.util.UUID; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; @@ -49,10 +47,10 @@ * however shutting down the cluster in the background while the new cluster is * getting created will likely save about 10 seconds per test. * - * To use this class, setup the Cluster Provider in a static method annotated - * with @BeforeClass, eg: - * - * @BeforeClass + * To use this class, set up the Cluster Provider in a static method annotated + * with {@code @BeforeAll}, eg: + *
    + *   @BeforeAll
      *   public static void init() {
      *     OzoneConfiguration conf = new OzoneConfiguration();
      *     final int interval = 100;
    @@ -71,29 +69,34 @@
      *
      *     clusterProvider = new MiniOzoneClusterProvider(conf, builder, 5);
      *   }
    + * 
    * - * Ensure you shutdown the provider in a @AfterClass annotated method: + * Ensure you shut down the provider in an {@code @AfterAll} annotated method: * - * @AfterClass + *
    + *   @AfterAll
      *   public static void shutdown() throws InterruptedException {
      *     if (clusterProvider != null) {
      *       clusterProvider.shutdown();
      *     }
      *   }
    + * 
    * - * Then in the @Before method, or in the test itself, obtain a cluster: + * Then in the {@code @BeforeEach} method, or in the test itself, obtain a cluster: * - * @Before + *
    + *   @BeforeEach
      *   public void setUp() throws Exception {
      *     cluster = clusterProvider.provide();
      *   }
      *
    - *   @After
    + *   @AfterEach
      *   public void tearDown() throws InterruptedException, IOException {
      *     if (cluster != null) {
      *       clusterProvider.destroy(cluster);
      *     }
      *   }
    + * 
    * * This only works if the same config / builder object can be passed to each * cluster in the test suite. @@ -118,7 +121,6 @@ public class MiniOzoneClusterProvider { private final int clusterLimit; private int consumedClusterCount = 0; - private final OzoneConfiguration conf; private final MiniOzoneCluster.Builder builder; private final Thread createThread; private final Thread reapThread; @@ -130,16 +132,13 @@ public class MiniOzoneClusterProvider { = new ArrayBlockingQueue<>(EXPIRED_LIMIT); /** - * - * @param conf The configuration to use when creating the cluster * @param builder A builder object with all cluster options set * @param clusterLimit The total number of clusters this provider should * create. If another is requested after this limit has * been reached, an exception will be thrown. */ - public MiniOzoneClusterProvider(OzoneConfiguration conf, + public MiniOzoneClusterProvider( MiniOzoneCluster.Builder builder, int clusterLimit) { - this.conf = conf; this.builder = builder; this.clusterLimit = clusterLimit; createThread = createClusters(); @@ -214,9 +213,6 @@ private Thread createClusters() { while (!Thread.interrupted() && createdCount < clusterLimit) { MiniOzoneCluster cluster = null; try { - builder.setClusterId(UUID.randomUUID().toString()); - builder.setConf(new OzoneConfiguration(conf)); - cluster = builder.build(); cluster.waitForClusterToBeReady(); createdCount++; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 797a7515f206..16ef88177f3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -56,8 +56,6 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -82,11 +80,6 @@ public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl { private static final int RATIS_RPC_TIMEOUT = 1000; // 1 second public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds - /** - * Creates a new MiniOzoneCluster. - * - * @throws IOException if there is an I/O error - */ public MiniOzoneHAClusterImpl( OzoneConfiguration conf, SCMConfigurator scmConfigurator, @@ -101,19 +94,8 @@ public MiniOzoneHAClusterImpl( this.clusterMetaPath = clusterPath; } - @Override - public String getOMServiceId() { - return omhaService.getServiceId(); - } - - @Override - public String getSCMServiceId() { - return scmhaService.getServiceId(); - } - /** * Returns the first OzoneManager from the list. - * @return */ @Override public OzoneManager getOzoneManager() { @@ -355,12 +337,20 @@ private static void configureOMPorts(ConfigurationTarget conf, public static class Builder extends MiniOzoneClusterImpl.Builder { private static final String OM_NODE_ID_PREFIX = "omNode-"; - private List activeOMs = new ArrayList<>(); - private List inactiveOMs = new ArrayList<>(); + private final List activeOMs = new ArrayList<>(); + private final List inactiveOMs = new ArrayList<>(); private static final String SCM_NODE_ID_PREFIX = "scmNode-"; - private List activeSCMs = new ArrayList<>(); - private List inactiveSCMs = new ArrayList<>(); + private final List activeSCMs = new ArrayList<>(); + private final List inactiveSCMs = new ArrayList<>(); + + private String omServiceId; + private int numOfOMs; + private int numOfActiveOMs = ACTIVE_OMS_NOT_SET; + + private String scmServiceId; + private int numOfSCMs; + private int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; /** * Creates a new Builder. @@ -371,8 +361,38 @@ public Builder(OzoneConfiguration conf) { super(conf); } + public Builder setNumOfOzoneManagers(int numOMs) { + this.numOfOMs = numOMs; + return this; + } + + public Builder setNumOfActiveOMs(int numActiveOMs) { + this.numOfActiveOMs = numActiveOMs; + return this; + } + + public Builder setOMServiceId(String serviceId) { + this.omServiceId = serviceId; + return this; + } + + public Builder setNumOfStorageContainerManagers(int numSCMs) { + this.numOfSCMs = numSCMs; + return this; + } + + public Builder setNumOfActiveSCMs(int numActiveSCMs) { + this.numOfActiveSCMs = numActiveSCMs; + return this; + } + + public Builder setSCMServiceId(String serviceId) { + this.scmServiceId = serviceId; + return this; + } + @Override - public MiniOzoneCluster build() throws IOException { + public MiniOzoneHAClusterImpl build() throws IOException { if (numOfActiveOMs > numOfOMs) { throw new IllegalArgumentException("Number of active OMs cannot be " + "more than the total number of OMs"); @@ -399,21 +419,16 @@ public MiniOzoneCluster build() throws IOException { initOMRatisConf(); SCMHAService scmService; OMHAService omService; - ReconServer reconServer = null; + ReconServer reconServer; try { scmService = createSCMService(); omService = createOMService(); - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } + reconServer = createRecon(); } catch (AuthenticationException ex) { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), reconServer); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, scmConfigurator, omService, scmService, hddsDatanodes, path, @@ -422,9 +437,14 @@ public MiniOzoneCluster build() throws IOException { if (startDataNodes) { cluster.startHddsDatanodes(); } + prepareForNextBuild(); return cluster; } + protected int numberOfOzoneManagers() { + return numOfOMs; + } + protected void initOMRatisConf() { conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); @@ -460,8 +480,7 @@ protected void initOMRatisConf() { protected OMHAService createOMService() throws IOException, AuthenticationException { if (omServiceId == null) { - OzoneManager om = createOM(); - om.start(); + OzoneManager om = createAndStartSingleOM(); return new OMHAService(singletonList(om), null, null); } @@ -487,16 +506,9 @@ protected OMHAService createOMService() throws IOException, String metaDirPath = path + "/" + nodeId; config.set(OZONE_METADATA_DIRS, metaDirPath); - // Set non standard layout version if needed. - omLayoutVersion.ifPresent(integer -> - config.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - OzoneManager.omInit(config); OzoneManager om = OzoneManager.createOm(config); - if (certClient != null) { - om.setCertClient(certClient); - } + setClients(om); omList.add(om); if (i <= numOfActiveOMs) { @@ -533,8 +545,7 @@ protected OMHAService createOMService() throws IOException, protected SCMHAService createSCMService() throws IOException, AuthenticationException { if (scmServiceId == null) { - StorageContainerManager scm = createSCM(); - scm.start(); + StorageContainerManager scm = createAndStartSingleSCM(); return new SCMHAService(singletonList(scm), null, null); } @@ -555,10 +566,6 @@ protected SCMHAService createSCMService() scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - scmLayoutVersion.ifPresent(integer -> - scmConfig.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - configureSCM(); if (i == 1) { StorageContainerManager.scmInit(scmConfig, clusterId); @@ -603,6 +610,8 @@ protected SCMHAService createSCMService() } } + configureScmDatanodeAddress(activeSCMs); + return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId); } @@ -731,7 +740,7 @@ public void bootstrapOzoneManager(String omNodeId, while (true) { try { - OzoneConfiguration newConf = addNewOMToConfig(getOMServiceId(), + OzoneConfiguration newConf = addNewOMToConfig(omhaService.getServiceId(), omNodeId); if (updateConfigs) { @@ -788,7 +797,7 @@ private OzoneConfiguration addNewOMToConfig(String omServiceId, /** * Update the configurations of the given list of OMs. */ - public void updateOMConfigs(OzoneConfiguration newConf) { + private void updateOMConfigs(OzoneConfiguration newConf) { for (OzoneManager om : omhaService.getActiveServices()) { om.setConfiguration(newConf); } @@ -881,17 +890,17 @@ public void setupExitManagerForTesting() { * @param */ static class MiniOzoneHAService { - private Map serviceMap; - private List services; - private String serviceId; - private String serviceName; + private final Map serviceMap; + private final List services; + private final String serviceId; + private final String serviceName; // Active services s denote OM/SCM services which are up and running - private List activeServices; - private List inactiveServices; + private final List activeServices; + private final List inactiveServices; // Function to extract the Id from service - private Function serviceIdProvider; + private final Function serviceIdProvider; MiniOzoneHAService(String name, List activeList, List inactiveList, String serviceId, @@ -1017,8 +1026,8 @@ public StorageContainerManager getStorageContainerManager() { private static final class ExitManagerForOM extends ExitManager { - private MiniOzoneHAClusterImpl cluster; - private String omNodeId; + private final MiniOzoneHAClusterImpl cluster; + private final String omNodeId; private ExitManagerForOM(MiniOzoneHAClusterImpl cluster, String nodeId) { this.cluster = cluster; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 5338cb8a0cc1..c084a72a3c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -44,11 +44,11 @@ public interface RatisTestHelper { Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - LOG.info("{} = {}", OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + LOG.info("{} = {}", OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index a04c1236186c..26c1868084fc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -295,9 +296,11 @@ private void readData(OmKeyInfo keyInfo, Function retryFunc) throws IOException { XceiverClientFactory xceiverClientManager = ((RpcClient) client.getProxy()).getXceiverClientManager(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(false); try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo, - xceiverClientManager, - false, retryFunc, blockInputStreamFactory)) { + xceiverClientManager, retryFunc, blockInputStreamFactory, + clientConfig)) { byte[] buf = new byte[100]; int readBytes = is.read(buf, 0, 100); assertEquals(100, readBytes); @@ -378,13 +381,12 @@ private static void setSecureConfig() throws IOException { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index a181a6f45e95..87242cb2790e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -318,14 +318,13 @@ private String[] createArgsForCommand(String[] additionalArgs) { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java index 79ea4c593c40..75264d2e7a67 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.ozone.test.tag.Unhealthy; @@ -31,16 +32,16 @@ import org.junit.jupiter.api.Timeout; import java.util.Optional; -import java.util.concurrent.TimeUnit; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class tests container balancer operations * from cblock clients. */ -@Timeout(value = 300, unit = TimeUnit.MILLISECONDS) +@Timeout(value = 300) public class TestContainerBalancerOperations { private static ScmClient containerBalancerClient; @@ -83,11 +84,18 @@ public void testContainerBalancerCLIOperations() throws Exception { Optional maxSizeToMovePerIterationInGB = Optional.of(1L); Optional maxSizeEnteringTargetInGB = Optional.of(1L); Optional maxSizeLeavingSourceInGB = Optional.of(1L); - + Optional balancingInterval = Optional.of(1); + Optional moveTimeout = Optional.of(1); + Optional moveReplicationTimeout = Optional.of(1); + Optional networkTopologyEnable = Optional.of(false); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -105,7 +113,9 @@ public void testContainerBalancerCLIOperations() throws Exception { containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -115,4 +125,61 @@ public void testContainerBalancerCLIOperations() throws Exception { } //TODO: add more acceptance after container balancer is fully completed + + /** + * Test if Container Balancer CLI overrides default configs and + * options specified in the configs. + */ + @Test + public void testIfCBCLIOverridesConfigs() throws Exception { + //Configurations added in ozone-site.xml + ozoneConf.setInt("hdds.container.balancer.iterations", 40); + ozoneConf.setInt("hdds.container.balancer.datanodes.involved.max.percentage.per.iteration", 30); + + boolean running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + + //CLI option for iterations and balancing interval is not passed + Optional iterations = Optional.empty(); + Optional balancingInterval = Optional.empty(); + + //CLI options are passed + Optional threshold = Optional.of(0.1); + Optional maxDatanodesPercentageToInvolvePerIteration = + Optional.of(100); + Optional maxSizeToMovePerIterationInGB = Optional.of(1L); + Optional maxSizeEnteringTargetInGB = Optional.of(6L); + Optional maxSizeLeavingSourceInGB = Optional.of(6L); + Optional moveTimeout = Optional.of(65); + Optional moveReplicationTimeout = Optional.of(55); + Optional networkTopologyEnable = Optional.of(true); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); + containerBalancerClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); + running = containerBalancerClient.getContainerBalancerStatus(); + assertTrue(running); + + ContainerBalancerConfiguration config = cluster.getStorageContainerManager().getContainerBalancer().getConfig(); + + //If config value is not added in ozone-site.xml and CLI option is not passed + //then it takes the default configuration + assertEquals(70, config.getBalancingInterval().toMinutes()); + + //If config value is added in ozone-site.xml and CLI option is not passed + //then it takes the value from ozone-site.xml + assertEquals(40, config.getIterations()); + + //If config value is added in ozone-site.xml and CLI option is passed + //then it takes the CLI option. + assertEquals(100, config.getMaxDatanodesPercentageToInvolvePerIteration()); + + containerBalancerClient.stopContainerBalancer(); + running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index bb04c73ffe27..11be7be716b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -22,8 +22,10 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.HashMap; +import java.util.Map; import java.util.Scanner; +import com.google.common.collect.Maps; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -38,6 +40,8 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; @@ -180,4 +184,29 @@ public static OzoneBucket createVolumeAndBucket(OzoneClient client, "Could not create unique volume/bucket " + "in " + attempts + " attempts"); } + + public static Map createKeys(MiniOzoneCluster cluster, int numOfKeys) + throws Exception { + Map keyLocationMap = Maps.newHashMap(); + + try (OzoneClient client = cluster.newClient()) { + OzoneBucket bucket = createVolumeAndBucket(client); + for (int i = 0; i < numOfKeys; i++) { + String keyName = RandomStringUtils.randomAlphabetic(5) + i; + createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5)); + keyLocationMap.put(keyName, lookupOmKeyInfo(cluster, bucket, keyName)); + } + } + return keyLocationMap; + } + + private static OmKeyInfo lookupOmKeyInfo(MiniOzoneCluster cluster, + OzoneBucket bucket, String key) throws IOException { + OmKeyArgs arg = new OmKeyArgs.Builder() + .setVolumeName(bucket.getVolumeName()) + .setBucketName(bucket.getName()) + .setKeyName(key) + .build(); + return cluster.getOzoneManager().lookupKey(arg); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index a82a1a8be70a..77970ad4470b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -46,6 +47,7 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; @@ -312,6 +314,8 @@ public void testDelegationToken(boolean useIp) throws Exception { try { // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java new file mode 100644 index 000000000000..9becc8b2591c --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; + +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +import java.io.IOException; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * + * This class is to test the serialization/deserialization of cluster tree + * information from SCM. + */ +@Timeout(300) +public class TestGetClusterTreeInformation { + + public static final Logger LOG = + LoggerFactory.getLogger(TestGetClusterTreeInformation.class); + private static int numOfDatanodes = 3; + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf; + private static StorageContainerManager scm; + + @BeforeAll + public static void init() throws IOException, TimeoutException, + InterruptedException { + conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newHABuilder(conf) + .setNumOfOzoneManagers(3) + .setNumOfStorageContainerManagers(3) + .setNumDatanodes(numOfDatanodes) + .build(); + cluster.waitForClusterToBeReady(); + scm = cluster.getStorageContainerManager(); + } + + @AfterAll + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testGetClusterTreeInformation() throws IOException { + SCMBlockLocationFailoverProxyProvider failoverProxyProvider = + new SCMBlockLocationFailoverProxyProvider(conf); + failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId()); + ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = + new ScmBlockLocationProtocolClientSideTranslatorPB( + failoverProxyProvider); + + InnerNode expectedInnerNode = (InnerNode) scm.getClusterMap().getNode(ROOT); + InnerNode actualInnerNode = scmBlockLocationClient.getNetworkTopology(); + assertEquals(expectedInnerNode, actualInnerNode); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 74d52c4a9457..275061ef7843 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -48,7 +48,7 @@ import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -68,7 +68,7 @@ static void setup(@TempDir File testDir) { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); } @@ -114,13 +114,13 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { // Each instance of SM will create an ozone container // that bounds to a random port. - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); List stateMachines = new ArrayList<>(); try { @@ -168,7 +168,7 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { } // Turn off the random port flag and test again - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); try ( DatanodeStateMachine sm1 = new DatanodeStateMachine( randomDatanodeDetails(), ozoneConf); @@ -182,8 +182,8 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort())); assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort())); assertEquals(ports.iterator().next().intValue(), - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT)); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT)); } } @@ -258,8 +258,10 @@ public void testMultipleDataDirs() throws Exception { String reservedSpace = "1B"; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .setNumDataVolumes(3) - .setDatanodeReservedSpace(reservedSpace) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .setReservedSpace(reservedSpace) + .build()) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java deleted file mode 100644 index 0c51ba41311c..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -/** - * This class tests MiniOzoneHAClusterImpl. - */ -@Timeout(value = 300, unit = TimeUnit.SECONDS) -public class TestMiniOzoneOMHACluster { - - private MiniOzoneHAClusterImpl cluster = null; - private OzoneConfiguration conf; - private String omServiceId; - private int numOfOMs = 3; - - /** - * Create a MiniOzoneHAClusterImpl for testing. - * - * @throws Exception - */ - @BeforeEach - public void init() throws Exception { - conf = new OzoneConfiguration(); - omServiceId = "omServiceId1"; - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, - OZONE_ADMINISTRATORS_WILDCARD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Shutdown MiniOzoneHAClusterImpl. - */ - @AfterEach - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testGetOMLeader() throws InterruptedException, TimeoutException { - AtomicReference ozoneManager = new AtomicReference<>(); - // Wait for OM leader election to finish - GenericTestUtils.waitFor(() -> { - OzoneManager om = cluster.getOMLeader(); - ozoneManager.set(om); - return om != null; - }, 100, 120000); - assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: " - + "no leader or more than one leader."); - assertTrue(ozoneManager.get().isLeaderReady(), "Should have gotten the leader!"); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 852f351ee25a..daeb3a7b2d74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -103,13 +103,12 @@ public static void init() throws Exception { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } @@ -141,11 +140,11 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, Response response = REST.put(BUCKET, KEY, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java new file mode 100644 index 000000000000..cef872597e43 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.server.SCMConfigurator; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.net.StaticMapping; + +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_LEVEL; +import static org.mockito.Mockito.mock; + +/** + * {@link org.apache.hadoop.hdds.scm.server.TestSCMBlockProtocolServer} + * sortDatanodes tests for + * {@link org.apache.hadoop.ozone.om.KeyManagerImpl#sortDatanodes(List, String)}. + */ +@Timeout(300) +public class TestOMSortDatanodes { + + private static OzoneConfiguration config; + private static StorageContainerManager scm; + private static NodeManager nodeManager; + private static KeyManagerImpl keyManager; + private static StorageContainerLocationProtocol mockScmContainerClient; + private static OzoneManager om; + private static File dir; + private static final int NODE_COUNT = 10; + private static final Map EDGE_NODES = ImmutableMap.of( + "edge0", "/rack0", + "edge1", "/rack1" + ); + + @BeforeAll + public static void setup() throws Exception { + config = new OzoneConfiguration(); + dir = GenericTestUtils.getRandomizedTestDir(); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + StaticMapping.class.getName()); + config.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + List datanodes = new ArrayList<>(NODE_COUNT); + List nodeMapping = new ArrayList<>(NODE_COUNT); + for (int i = 0; i < NODE_COUNT; i++) { + DatanodeDetails dn = randomDatanodeDetails(); + final String rack = "/rack" + (i % 2); + nodeMapping.add(dn.getHostName() + "=" + rack); + nodeMapping.add(dn.getIpAddress() + "=" + rack); + datanodes.add(dn); + } + EDGE_NODES.forEach((n, r) -> nodeMapping.add(n + "=" + r)); + config.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, + String.join(",", nodeMapping)); + + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + scm = HddsTestUtils.getScm(config, configurator); + scm.start(); + scm.exitSafeMode(); + nodeManager = scm.getScmNodeManager(); + datanodes.forEach(dn -> nodeManager.register(dn, null, null)); + mockScmContainerClient = + mock(StorageContainerLocationProtocol.class); + OmTestManagers omTestManagers + = new OmTestManagers(config, scm.getBlockProtocolServer(), + mockScmContainerClient); + om = omTestManagers.getOzoneManager(); + keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); + } + + @AfterAll + public static void cleanup() throws Exception { + if (scm != null) { + scm.stop(); + scm.join(); + } + if (om != null) { + om.stop(); + } + FileUtils.deleteDirectory(dir); + } + + @Test + public void sortDatanodesRelativeToDatanode() { + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + assertEquals(ROOT_LEVEL + 2, dn.getLevel()); + List sorted = + keyManager.sortDatanodes(nodeManager.getAllNodes(), nodeAddress(dn)); + assertEquals(dn, sorted.get(0), + "Source node should be sorted very first"); + assertRackOrder(dn.getNetworkLocation(), sorted); + } + } + + @Test + public void sortDatanodesRelativeToNonDatanode() { + for (Map.Entry entry : EDGE_NODES.entrySet()) { + assertRackOrder(entry.getValue(), + keyManager.sortDatanodes(nodeManager.getAllNodes(), entry.getKey())); + } + } + + @Test + public void testSortDatanodes() { + List nodes = nodeManager.getAllNodes(); + + // sort normal datanodes + String client; + client = nodeManager.getAllNodes().get(0).getIpAddress(); + List datanodeDetails = + keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 1 + client += "X"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 2 + client = "/default-rack"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + } + + private static void assertRackOrder(String rack, List list) { + int size = list.size(); + for (int i = 0; i < size / 2; i++) { + assertEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the same rack should be sorted first"); + } + for (int i = size / 2; i < size; i++) { + assertNotEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the other rack should be sorted last"); + } + } + + private String nodeAddress(DatanodeDetails dn) { + boolean useHostname = config.getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + return useHostname ? dn.getHostName() : dn.getIpAddress(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 1be5b64ac87d..26ecb34c8866 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -50,10 +50,12 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -87,6 +89,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; @@ -121,21 +124,16 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; -import static org.apache.hadoop.net.ServerSocketUtil.getPort; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; @@ -170,6 +168,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -179,7 +178,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -209,6 +207,7 @@ final class TestSecureOzoneCluster { private File testUserKeytab; private String testUserPrincipal; private StorageContainerManager scm; + private ScmBlockLocationProtocol scmBlockClient; private OzoneManager om; private HddsProtos.OzoneManagerDetailsProto omInfo; private String host; @@ -227,19 +226,14 @@ void init() { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); - conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, - getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_DATANODE_PORT_KEY, - getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, - getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100)); - conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, - getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100)); - // use the same base ports as MiniOzoneHACluster - conf.setInt(OZONE_SCM_RATIS_PORT_KEY, getPort(1200, 100)); - conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getPort(1201, 100)); + conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_RATIS_PORT_KEY, getFreePort()); + conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getFreePort()); conf.set(OZONE_OM_ADDRESS_KEY, - InetAddress.getLocalHost().getCanonicalHostName() + ":1202"); + InetAddress.getLocalHost().getCanonicalHostName() + ":" + getFreePort()); conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); DefaultMetricsSystem.setMiniClusterMode(true); @@ -265,6 +259,7 @@ void init() { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); omId = UUID.randomUUID().toString(); + scmBlockClient = new ScmBlockLocationTestingClient(null, null, 0); startMiniKdc(); setSecureConfig(); @@ -277,7 +272,7 @@ void init() { } @AfterEach - void stop() { + void stop() throws Exception { try { stopMiniKdc(); if (scm != null) { @@ -610,6 +605,7 @@ void testAccessControlExceptionOnClient() throws Exception { setupOm(conf); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); } catch (Exception ex) { // Expects timeout failure from scmClient in om but om user login via @@ -677,6 +673,7 @@ void testDelegationTokenRenewal() throws Exception { setupOm(conf); OzoneManager.setTestSecureOmFlag(true); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); @@ -764,6 +761,7 @@ void testGetSetRevokeS3Secret() throws Exception { setupOm(conf); // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); @@ -875,11 +873,16 @@ void testSecureOmReInit() throws Exception { assertThat(logOutput) .doesNotContain("Successfully stored SCM signed certificate"); + if (om.stop()) { + om.join(); + } + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setBoolean(OZONE_OM_S3_GPRC_SERVER_ENABLED, true); + conf.set(OZONE_OM_ADDRESS_KEY, + InetAddress.getLocalHost().getCanonicalHostName() + ":" + getFreePort()); OzoneManager.omInit(conf); - om.stop(); om = OzoneManager.createOm(conf); assertNotNull(om.getCertificateClient()); @@ -1000,6 +1003,7 @@ void testCertificateRotation() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(client); // check after renew, client will have the new cert ID @@ -1165,6 +1169,7 @@ void testCertificateRotationUnRecoverableFailure() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(mockClient); // check error message during renew @@ -1203,6 +1208,7 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { String omCertId1 = omCert.getSerialNumber().toString(); // Start OM om.setCertClient(certClient); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); GenericTestUtils.waitFor(() -> om.isLeaderReady(), 100, 10000); @@ -1346,27 +1352,16 @@ void testOMGrpcServerCertificateRenew() throws Exception { } // get new client, it should succeed. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); + client1.close(); + // Wait for old OM certificate to expire GenericTestUtils.waitFor(() -> omCert.getNotAfter().before(new Date()), 500, certLifetime * 1000); // get new client, it should succeed too. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client2 = OzoneClientFactory.getRpcClient(conf); + client2.close(); } finally { OzoneManager.setUgi(null); GrpcOmTransport.setCaCerts(null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java new file mode 100644 index 000000000000..8f79605ab051 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.conf.ConfigurationTarget; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; +import org.apache.hadoop.ozone.container.replication.ReplicationServer; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; + +/** + * Creates datanodes with similar configuration (same number of volumes, same layout version, etc.). + */ +public class UniformDatanodesFactory implements MiniOzoneCluster.DatanodeFactory { + + private final AtomicInteger nodesCreated = new AtomicInteger(); + + private final int numDataVolumes; + private final String reservedSpace; + private final Integer layoutVersion; + + protected UniformDatanodesFactory(Builder builder) { + numDataVolumes = builder.numDataVolumes; + layoutVersion = builder.layoutVersion; + reservedSpace = builder.reservedSpace; + } + + @Override + public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { + final int i = nodesCreated.incrementAndGet(); + final OzoneConfiguration dnConf = new OzoneConfiguration(conf); + + configureDatanodePorts(dnConf); + + Path baseDir = Paths.get(Objects.requireNonNull(conf.get(OZONE_METADATA_DIRS)), "datanode-" + i); + + Path metaDir = baseDir.resolve("meta"); + Files.createDirectories(metaDir); + dnConf.set(OZONE_METADATA_DIRS, metaDir.toString()); + + List dataDirs = new ArrayList<>(); + List reservedSpaceList = new ArrayList<>(); + for (int j = 0; j < numDataVolumes; j++) { + Path dir = baseDir.resolve("data-" + j); + Files.createDirectories(dir); + dataDirs.add(dir.toString()); + if (reservedSpace != null) { + reservedSpaceList.add(dir + ":" + reservedSpace); + } + } + String reservedSpaceString = String.join(",", reservedSpaceList); + String listOfDirs = String.join(",", dataDirs); + dnConf.set(DFS_DATANODE_DATA_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString); + + Path ratisDir = baseDir.resolve("ratis"); + Files.createDirectories(ratisDir); + dnConf.set(HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); + + if (layoutVersion != null) { + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage( + dnConf, UUID.randomUUID().toString(), layoutVersion); + layoutStorage.initialize(); + } + + return dnConf; + } + + private void configureDatanodePorts(ConfigurationTarget conf) { + conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort()); + conf.setInt(HDDS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setFromObject(new ReplicationServer.ReplicationConfig().setPort(getFreePort())); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for UniformDatanodesFactory. + */ + public static class Builder { + + private int numDataVolumes = 1; + private String reservedSpace; + private Integer layoutVersion; + + /** + * Sets the number of data volumes per datanode. + */ + public Builder setNumDataVolumes(int n) { + numDataVolumes = n; + return this; + } + + /** + * Sets the reserved space + * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys#HDDS_DATANODE_DIR_DU_RESERVED} + * for each volume in each datanode. + * @param reservedSpace String that contains the numeric size value and ends with a + * {@link org.apache.hadoop.hdds.conf.StorageUnit} suffix. For example, "50GB". + * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo + */ + public Builder setReservedSpace(String reservedSpace) { + this.reservedSpace = reservedSpace; + return this; + } + + public Builder setLayoutVersion(int layoutVersion) { + this.layoutVersion = layoutVersion; + return this; + } + + public UniformDatanodesFactory build() { + return new UniformDatanodesFactory(this); + } + + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index f2a079ca80ca..e2a15595b553 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.client; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -36,10 +37,10 @@ public class TestOzoneClientFactory { public void testRemoteException() { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); Exception e = assertThrows(Exception.class, () -> { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setTotalPipelineNumLimit(10) .build(); String omPort = cluster.getOzoneManager().getRpcPort(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 0b0b2586c9e2..b40b0bbcc626 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -121,6 +122,7 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { TimeUnit.SECONDS); conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, zeroCopyEnabled); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -129,8 +131,9 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10).build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index e7c8be170ca1..d7ce08338db8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -85,8 +84,6 @@ private void startCluster(OzoneConfiguration conf) throws Exception { blockSize = 2 * maxFlushSize; // Make sure the pipeline does not get destroyed quickly - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, - 60, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index c0ae49f3bf41..d668bb4b6522 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -49,6 +49,7 @@ HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -84,10 +85,11 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 8bb791bb103e..90a3f1d68933 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -17,9 +17,11 @@ package org.apache.hadoop.ozone.client.rpc; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; @@ -28,6 +30,7 @@ import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -45,6 +48,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; @@ -70,6 +74,7 @@ public class TestBlockDataStreamOutput { private static String volumeName; private static String bucketName; private static String keyString; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); /** * Create a MiniDFSCluster for testing. @@ -105,7 +110,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) + .setDatanodeCurrentVersion(DN_OLD_VERSION) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -270,4 +275,25 @@ public void testTotalAckDataLength() throws Exception { assertEquals(dataLength, stream.getTotalAckDataLength()); } + @Test + public void testDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 1e9cefbaa481..ce5432739cbd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -80,8 +80,13 @@ class TestBlockOutputStream { static MiniOzoneCluster createCluster() throws IOException, InterruptedException, TimeoutException { - OzoneConfiguration conf = new OzoneConfiguration(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + clientConfig.setEnablePutblockPiggybacking(true); + conf.setFromObject(clientConfig); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setQuietMode(false); @@ -115,7 +120,6 @@ static MiniOzoneCluster createCluster() throws IOException, MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); @@ -398,7 +402,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { key.flush(); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 1, + assertEquals(putBlockCount, metrics.getContainerOpCountMetrics(PutBlock)); assertEquals(pendingWriteChunkCount, metrics.getPendingContainerOpCountMetrics(WriteChunk)); @@ -427,9 +431,9 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + assertEquals(putBlockCount + 1, metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 4, metrics.getTotalOpCount()); + assertEquals(totalOpCount + 3, metrics.getTotalOpCount()); assertEquals(0, keyOutputStream.getStreamEntries().size()); validateData(keyName, data1, client.getObjectStore(), VOLUME, BUCKET); @@ -494,9 +498,9 @@ void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 3, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + assertEquals(putBlockCount + 1, metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 5, metrics.getTotalOpCount()); + assertEquals(totalOpCount + 4, metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); // make sure the bufferPool is empty assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); @@ -687,9 +691,9 @@ void testWriteMoreThanMaxFlushSize(boolean flushDelay) throws Exception { assertEquals(writeChunkCount + 5, metrics.getContainerOpCountMetrics(WriteChunk)); // The previous flush did not trigger any action with flushDelay enabled - assertEquals(putBlockCount + (flushDelay ? 3 : 4), + assertEquals(putBlockCount + (flushDelay ? 2 : 3), metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + (flushDelay ? 8 : 9), + assertEquals(totalOpCount + (flushDelay ? 7 : 8), metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index fe08b9e0f4ba..78a4e78647eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -59,6 +59,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; @@ -115,11 +116,12 @@ public static void init() throws Exception { replicationConf.setInterval(Duration.ofMillis(containerReportInterval)); conf.setFromObject(replicationConf); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 6); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4) - .setTotalPipelineNumLimit(6).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.getStorageContainerManager().getReplicationManager().start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 84b7579cd01d..3f1c31edfe70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -92,6 +93,7 @@ public void setup() throws Exception { baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -101,7 +103,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); @@ -113,7 +115,6 @@ public void setup() throws Exception { // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index 1d0f25b3a041..eea068a8742f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -51,6 +51,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -94,6 +95,8 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000)); @@ -118,7 +121,6 @@ public void setup() throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 2c11177e5eaf..b6eaca8e80d0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -83,6 +83,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; @@ -143,6 +144,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -166,10 +168,10 @@ public static void init() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(raftClientConfig); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); @@ -307,9 +309,9 @@ public void testContainerStateMachineFailures() throws Exception { // restart the hdds datanode, container should not in the regular set OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) @@ -371,9 +373,9 @@ public void testUnhealthyContainer() throws Exception { OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the // in the missing container set and not in the regular set diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 23ab89b80c65..229059d84ad1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -101,10 +102,11 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -119,7 +121,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index 97a3047bfdb0..d48df574a94e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -89,6 +90,7 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); @@ -121,13 +123,11 @@ public void setup() throws Exception { .setStreamBufferMaxSize(MAX_FLUSH_SIZE) .applyTo(conf); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setHbInterval(200) - .build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java new file mode 100644 index 000000000000..5e7d8a4b0525 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java @@ -0,0 +1,143 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.client.rpc; + +import org.apache.hadoop.hdds.DatanodeVersion; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.ClientConfigForTesting; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry; +import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; +import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; +import org.apache.hadoop.ozone.container.TestHelper; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.util.List; +import java.util.UUID; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests DatanodeVersion in client stream. + */ +@Timeout(120) +public class TestDatanodeVersion { + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf = new OzoneConfiguration(); + private static OzoneClient client; + private static ObjectStore objectStore; + private static int chunkSize; + private static int flushSize; + private static int maxFlushSize; + private static int blockSize; + private static String volumeName; + private static String bucketName; + private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); + + /** + * Create a MiniDFSCluster for testing. + *

    + * Ozone is made active by setting OZONE_ENABLED = true + */ + @BeforeAll + public static void init() throws Exception { + chunkSize = 100; + flushSize = 2 * chunkSize; + maxFlushSize = 2 * flushSize; + blockSize = 2 * maxFlushSize; + + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + conf.setFromObject(clientConfig); + + conf.setQuietMode(false); + conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); + + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) + .setBlockSize(blockSize) + .setChunkSize(chunkSize) + .setStreamBufferFlushSize(flushSize) + .setStreamBufferMaxSize(maxFlushSize) + .setDataStreamBufferFlushSize(maxFlushSize) + .setDataStreamMinPacketSize(chunkSize) + .setDataStreamWindowSize(5 * chunkSize) + .applyTo(conf); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .setDatanodeCurrentVersion(DN_OLD_VERSION) + .build(); + cluster.waitForClusterToBeReady(); + //the easiest way to create an open container is creating a key + client = OzoneClientFactory.getRpcClient(conf); + objectStore = client.getObjectStore(); + volumeName = "testblockoutputstream"; + bucketName = volumeName; + objectStore.createVolume(volumeName); + objectStore.getVolume(volumeName).createBucket(bucketName); + } + + /** + * Shutdown MiniDFSCluster. + */ + @AfterAll + public static void shutdown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + } + + static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, long size) throws Exception { + return TestHelper.createStreamKey(keyName, type, size, objectStore, volumeName, bucketName); + } + + @Test + public void testStreamDatanodeVersion() throws Exception { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + + String keyName = UUID.randomUUID().toString(); + OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index fa50dac64f7e..d4ff85736273 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -65,6 +65,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -101,15 +102,20 @@ public class TestDeleteWithInAdequateDN { */ @BeforeAll public static void init() throws Exception { + final int numOfDatanodes = 3; + conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, + TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT); // Make the stale, dead and server failure timeout higher so that a dead // node is not detecte at SCM as well as the pipeline close action // never gets initiated early at Datanode in the test. @@ -156,12 +162,8 @@ public static void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); - int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit( - numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) - .setHbInterval(100) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(THREE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index d1b20de88a86..5c0910ecdc2d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -111,11 +111,12 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -139,7 +140,7 @@ private void init() throws Exception { Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10).setTotalPipelineNumLimit(15).build(); + .setNumDatanodes(10).build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index fadc06bd57bd..b4ad49a3ed5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -107,11 +107,12 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -140,7 +141,6 @@ private void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index 4ccdd0e2d4b3..51ebf3fa0ccd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -44,6 +44,7 @@ import java.io.IOException; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -73,8 +74,9 @@ public class TestHybridPipelineOnDatanode { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3) - .setTotalPipelineNumLimit(5).build(); + .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b7b52d389bc3..34f85d8e9922 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -105,14 +105,13 @@ private void startCluster(int datanodes) throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(datanodes) - .setTotalPipelineNumLimit(0) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 919654d82a9b..5288bcb3cf21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -22,6 +22,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; @@ -34,10 +35,14 @@ import java.util.UUID; import com.google.common.cache.Cache; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -64,8 +69,10 @@ import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -79,6 +86,14 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.verifyOnDiskData; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -88,6 +103,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -120,6 +136,7 @@ class TestOzoneAtRestEncryption { private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB // (this is the default Crypto Buffer size as determined by the config // hadoop.security.crypto.buffer.size) + private static MessageDigest eTagProvider; @BeforeAll static void init() throws Exception { @@ -169,6 +186,12 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setInt(OZONE_REPLICATION, 1); } @AfterAll @@ -190,6 +213,14 @@ static void shutdown() throws IOException { } } + static void reInitClient() throws IOException { + ozClient = OzoneClientFactory.getRpcClient(conf); + store = ozClient.getObjectStore(); + TestOzoneRpcClient.setOzClient(ozClient); + TestOzoneRpcClient.setStore(store); + } + + @ParameterizedTest @EnumSource void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { @@ -206,6 +237,7 @@ void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { createAndVerifyKeyData(bucket); createAndVerifyStreamKeyData(bucket); + createAndVerifyFileSystemData(bucket); } @ParameterizedTest @@ -252,6 +284,38 @@ static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { out.write(value.getBytes(StandardCharsets.UTF_8)); } verifyKeyData(bucket, keyName, value, testStartTime); + OzoneKeyDetails key1 = bucket.getKey(keyName); + + // Overwrite the key + try (OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(StandardCharsets.UTF_8).length, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), + new HashMap<>())) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + OzoneKeyDetails key2 = bucket.getKey(keyName); + assertNotEquals(key1.getFileEncryptionInfo().toString(), key2.getFileEncryptionInfo().toString()); + } + + static void createAndVerifyFileSystemData( + OzoneBucket bucket) throws Exception { + // OBS does not support file system semantics. + if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { + return; + } + Instant testStartTime = getTestStartTime(); + String keyName = UUID.randomUUID().toString(); + String value = "sample value"; + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path file = new Path(dir, keyName); + try (FileSystem fs = FileSystem.get(conf)) { + try (FSDataOutputStream out = fs.create(file, true)) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + } + verifyKeyData(bucket, keyName, value, testStartTime); } static void verifyKeyData(OzoneBucket bucket, String keyName, String value, @@ -272,7 +336,6 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, len = is.read(fileContent); } - assertEquals(len, value.length()); assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, RATIS, @@ -280,6 +343,13 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); assertFalse(key.getCreationTime().isBefore(testStartTime)); assertFalse(key.getModificationTime().isBefore(testStartTime)); + + long containerID = key.getOzoneKeyLocations().get(0) + .getContainerID(); + Container container = getContainerByID(cluster, containerID); + // the data stored on disk should not be the same as the input. + assertFalse(verifyOnDiskData(cluster, container, key, value), + "On disk block is written in clear text!"); } private OzoneBucket createVolumeAndBucket(String volumeName, @@ -439,6 +509,18 @@ void mpuOnePart(BucketLayout bucketLayout) throws Exception { createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1); } + @ParameterizedTest + @EnumSource + void mpuOnePartInvalidUploadID(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OMException e = assertThrows(OMException.class, () -> + testMultipartUploadWithEncryption( + createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1, false, true) + ); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, e.getResult()); + } + @ParameterizedTest @EnumSource void mpuTwoParts(BucketLayout bucketLayout) throws Exception { @@ -516,12 +598,21 @@ private void testMultipartUploadWithEncryption(OzoneBucket bucket, private void testMultipartUploadWithEncryption(OzoneBucket bucket, int numParts, boolean isStream) throws Exception { + testMultipartUploadWithEncryption(bucket, numParts, isStream, false); + } + + private void testMultipartUploadWithEncryption(OzoneBucket bucket, + int numParts, boolean isStream, boolean invalidUploadID) throws Exception { String keyName = "mpu_test_key_" + numParts; // Initiate multipart upload String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationConfig.fromTypeAndFactor(RATIS, ONE)); + if (invalidUploadID) { + uploadID += "random1234"; + } + // Upload Parts Map partsMap = new TreeMap<>(); List partsData = new ArrayList<>(); @@ -631,14 +722,17 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); + multipartStreamKey.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = multipartStreamKey.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private String uploadPart(OzoneBucket bucket, String keyName, @@ -646,14 +740,17 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private void completeMultipartUpload(OzoneBucket bucket, String keyName, @@ -691,9 +788,7 @@ void testGetKeyProvider() throws Exception { KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider(); assertNotEquals(kp3, kpSpy); - // Restore ozClient and store - TestOzoneRpcClient.setOzClient(OzoneClientFactory.getRpcClient(conf)); - TestOzoneRpcClient.setStore(ozClient.getObjectStore()); + reInitClient(); } private static RepeatedOmKeyInfo getMatchedKeyInfo( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index a89e61769966..1e75a4d10a86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -17,8 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; + +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -78,6 +84,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,6 +104,7 @@ public class TestOzoneClientMultipartUploadWithFSO { private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; + private static MessageDigest eTagProvider; private String volumeName; private String bucketName; @@ -114,8 +122,10 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } /** @@ -135,7 +145,6 @@ public static void shutdown() throws IOException { static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -187,6 +196,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); // Call initiate multipart upload for the same key again, this should @@ -194,6 +206,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws multipartInfo = bucket.initiateMultipartUpload(keyName); assertNotNull(multipartInfo); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotEquals(multipartInfo.getUploadID(), uploadID); assertNotNull(multipartInfo.getUploadID()); } @@ -207,13 +222,14 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @Test @@ -223,12 +239,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { ReplicationType.RATIS, THREE); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - sampleData.getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, sampleData.getBytes(UTF_8)); //Overwrite the part by creating part key with same part number. - String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber, - "name".getBytes(UTF_8)); + Pair partNameAndETagNew = uploadPart(bucket, keyName, + uploadID, partNumber, "name".getBytes(UTF_8)); // PartName should be same from old part Name. // AWS S3 for same content generates same partName during upload part. @@ -238,7 +254,10 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - assertEquals(partName, partNameNew, "Part names should be same"); + assertEquals(partNameAndETag.getKey(), partNameAndETagNew.getKey()); + + // ETags are not equal due to content differences + assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue()); // old part bytes written needs discard and have only // new part bytes in quota for this bucket @@ -248,7 +267,8 @@ public void testUploadPartOverrideWithRatis() throws Exception { } @Test - public void testUploadTwiceWithEC() throws IOException { + public void testUploadTwiceWithEC() + throws IOException, NoSuchAlgorithmException { bucketName = UUID.randomUUID().toString(); bucket = getOzoneECBucket(bucketName); @@ -259,12 +279,12 @@ public void testUploadTwiceWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - data); - - Map partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, data); + + Map eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); @@ -275,12 +295,12 @@ public void testUploadTwiceWithEC() throws IOException { multipartInfo = bucket.initiateMultipartUpload(keyName); uploadID = multipartInfo.getUploadID(); - partName = uploadPart(bucket, keyName, uploadID, partNumber, + partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber, data); - partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); // used sized should remain same, overwrite previous upload assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -288,7 +308,8 @@ public void testUploadTwiceWithEC() throws IOException { } @Test - public void testUploadAbortWithEC() throws IOException { + public void testUploadAbortWithEC() + throws IOException, NoSuchAlgorithmException { byte[] data = generateData(81920, (byte) 97); bucketName = UUID.randomUUID().toString(); @@ -331,19 +352,19 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { ONE); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); + eTagsMap.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, - "data".getBytes(UTF_8)); - partsMap.put(2, partName); + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, + "data".getBytes(UTF_8)); + eTagsMap.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -354,22 +375,24 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() byte[] data = generateData(10000000, (byte) 97); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); - // Upload part 1 and add it to the partsMap for completing the upload. - String partName1 = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName1); + // Upload part 1 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, data); + eTagsMap.put(1, partNameAndETag1.getValue()); - // Upload part 2 and add it to the partsMap for completing the upload. - String partName2 = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName2); + // Upload part 2 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, data); + eTagsMap.put(2, partNameAndETag2.getValue()); - // Upload part 3 but do not add it to the partsMap. + // Upload part 3 but do not add it to the eTagsMap. uploadPart(bucket, keyName, uploadID, 3, data); - completeMultipartUpload(bucket, keyName, uploadID, partsMap); + completeMultipartUpload(bucket, keyName, uploadID, eTagsMap); - // Check the bucket size. Since part number 3 was not added to the partsMap, + // Check the bucket size. Since part number 3 was not added to the eTagsMap, // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); @@ -456,6 +479,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -464,10 +490,13 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -540,12 +569,13 @@ public void testAbortUploadSuccessWithParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager(); - String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr); + String multipartKey = verifyUploadedPart(uploadID, partNameAndETag.getKey(), + metadataMgr); bucket.abortMultipartUpload(keyName, uploadID); @@ -571,17 +601,17 @@ public void testListMultipartUploadParts() throws Exception { Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -639,7 +669,6 @@ private void verifyPartNamesInDB(Map partsMap, listPartNames.remove(partKeyName); } - assertThat(listPartNames).withFailMessage("Wrong partKeyName format in DB!").isEmpty(); } @@ -661,17 +690,17 @@ public void testListMultipartUploadPartsWithContinuation() Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -733,8 +762,8 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() bucket.listParts(keyName, uploadID, 100, 2); // Should return empty - assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); + assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); @@ -869,27 +898,37 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(kName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); return uploadID; } - private String uploadPart(OzoneBucket oBucket, String kName, String - uploadID, int partNumber, byte[] data) throws IOException { + private Pair uploadPart(OzoneBucket oBucket, String kName, + String uploadID, int partNumber, + byte[] data) + throws IOException, NoSuchAlgorithmException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, data.length, partNumber, uploadID); - ozoneOutputStream.write(data, 0, - data.length); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, @@ -898,6 +937,11 @@ private void completeMultipartUpload(OzoneBucket oBucket, String kName, .completeMultipartUpload(kName, uploadID, partsMap); assertNotNull(omMultipartUploadCompleteInfo); + assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket + .getName()); + assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket + .getVolumeName()); + assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); assertNotNull(omMultipartUploadCompleteInfo.getHash()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index c3e8a8d461b8..ad59621e0c75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -109,7 +109,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index cd99382f300b..3e1667a38a68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -118,7 +118,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index a87d05321e27..a77edd3abc59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -17,14 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; -import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; @@ -38,6 +38,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec; @@ -91,7 +94,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; @@ -122,13 +124,14 @@ import org.apache.ozone.test.tag.Flaky; import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.FileUtils; + import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OmUtils.LOG; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -136,7 +139,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.corruptData; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -160,6 +167,7 @@ import static org.slf4j.event.Level.DEBUG; import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @@ -186,13 +194,19 @@ public abstract class TestOzoneRpcClientAbstract { private static String remoteUserName = "remoteUser"; private static String remoteGroupName = "remoteGroup"; private static OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); private static OzoneAcl defaultGroupAcl = new OzoneAcl(GROUP, remoteGroupName, - READ, DEFAULT); + DEFAULT, READ); private static OzoneAcl inheritedUserAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); + ACCESS, READ); private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, - remoteGroupName, READ, ACCESS); + remoteGroupName, ACCESS, READ); + private static MessageDigest eTagProvider; + + @BeforeAll + public static void initialize() throws NoSuchAlgorithmException { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } /** * Create a MiniOzoneCluster for testing. @@ -203,6 +217,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop // for testZReadKeyWithUnhealthyContainerReplica. conf.set("ozone.scm.stale.node.interval", "10s"); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.MB) .setDataStreamMinPacketSize(1) @@ -210,7 +225,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); @@ -659,13 +673,11 @@ public void testCreateBucketWithAcls() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - READ, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, READ); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); @@ -695,16 +707,14 @@ public void testCreateBucketWithAllArgument() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, ACLType.ALL); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder(); builder.setVersioning(true) .setStorageType(StorageType.SSD) - .setAcls(acls) + .addAcl(userAcl) .setDefaultReplicationConfig(new DefaultReplicationConfig(repConfig)); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); @@ -736,7 +746,7 @@ public void testAddBucketAcl() OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); List acls = new ArrayList<>(); - acls.add(new OzoneAcl(USER, "test", ACLType.ALL, ACCESS)); + acls.add(new OzoneAcl(USER, "test", ACCESS, ACLType.ALL)); OzoneBucket bucket = volume.getBucket(bucketName); for (OzoneAcl acl : acls) { assertTrue(bucket.addAcl(acl)); @@ -752,21 +762,17 @@ public void testRemoveBucketAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); + ACCESS, ACLType.ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); - for (OzoneAcl acl : acls) { - assertTrue(bucket.removeAcl(acl)); - } + assertTrue(bucket.removeAcl(userAcl)); OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertThat(bucket.getAcls()).doesNotContain(acls.get(0)); + assertThat(newBucket.getAcls()).doesNotContain(userAcl); } @Test @@ -775,15 +781,14 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", - ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - acls.add(new OzoneAcl(USER, "test1", - ACLType.ALL, ACCESS)); + ACCESS, ACLType.ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "test1", + ACCESS, ACLType.ALL); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl) + .addAcl(acl2); volume.createBucket(bucketName, builder.build()); OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() .setBucketName(bucketName) @@ -792,13 +797,11 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() .setResType(OzoneObj.ResourceType.BUCKET).build(); // Remove the 2nd acl added to the list. - boolean remove = store.removeAcl(ozoneObj, acls.get(1)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(1)); + assertTrue(store.removeAcl(ozoneObj, acl2)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(acl2); - remove = store.removeAcl(ozoneObj, acls.get(0)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(0)); + assertTrue(store.removeAcl(ozoneObj, userAcl)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(userAcl); } @Test @@ -1329,7 +1332,6 @@ public void testMissingParentBucketUsedNamespace(BucketLayout layout) if (layout.equals(BucketLayout.LEGACY)) { OzoneConfiguration conf = cluster.getConf(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster.setConf(conf); } // the directory "/dir1", ""/dir1/dir2/", "/dir1/dir2/dir3/" @@ -1491,6 +1493,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -1646,6 +1649,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, } latch.countDown(); } catch (IOException ex) { + LOG.error("Execution failed: ", ex); latch.countDown(); failCount.incrementAndGet(); } @@ -1717,16 +1721,9 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Get the container by traversing the datanodes. Atleast one of the // datanode must have this container. - Container container = null; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - break; - } - } + Container container = getContainerByID(cluster, containerID); assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); } @@ -1889,7 +1886,7 @@ public void testReadKeyWithCorruptedData() throws IOException { } } assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); // Try reading the key. Since the chunk file is corrupted, it should // throw a checksum mismatch exception. @@ -2044,7 +2041,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { } } assertThat(containerList).withFailMessage("Container not found").isNotEmpty(); - corruptData(containerList.get(0), key); + corruptData(cluster, containerList.get(0), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2052,7 +2049,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(1), key); + corruptData(cluster, containerList.get(1), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2060,7 +2057,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(2), key); + corruptData(cluster, containerList.get(2), key); // Try reading the key. Read will fail here as all the replicas are corrupt IOException ioException = assertThrows(IOException.class, () -> { @@ -2072,43 +2069,6 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { assertThat(ioException).hasMessageContaining("Checksum mismatch"); } - private void corruptData(Container container, OzoneKey key) - throws IOException { - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getLocalID(); - // From the containerData, get the block iterator for all the blocks in - // the container. - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); - BlockIterator keyValueBlockIterator = - db.getStore().getBlockIterator(containerID)) { - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; - } - } - assertNotNull(blockData, "Block not found"); - - // Get the location of the chunk file - String containreBaseDir = - container.getContainerData().getVolume().getHddsRootDir().getPath(); - File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); - byte[] corruptData = "corrupted data".getBytes(UTF_8); - // Corrupt the contents of chunk files - for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { - FileUtils.writeByteArrayToFile(file, corruptData); - } - } - } - @Test public void testDeleteKey() throws Exception { @@ -2637,13 +2597,14 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @ParameterizedTest @@ -2671,6 +2632,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2678,7 +2640,7 @@ void testUploadPartOverride(ReplicationConfig replication) assertNotNull(commitUploadPartInfo); String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // Overwrite the part by creating part key with same part number // and different content. @@ -2686,13 +2648,14 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // AWS S3 for same content generates same partName during upload part. // In AWS S3 ETag is generated from md5sum. In Ozone right now we @@ -2739,10 +2702,10 @@ public void testMultipartUploadWithACL() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Add ACL on Bucket - OzoneAcl acl1 = new OzoneAcl(USER, "Monday", ACLType.ALL, DEFAULT); - OzoneAcl acl2 = new OzoneAcl(USER, "Friday", ACLType.ALL, DEFAULT); - OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACLType.ALL, ACCESS); - OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACLType.ALL, ACCESS); + OzoneAcl acl1 = new OzoneAcl(USER, "Monday", DEFAULT, ACLType.ALL); + OzoneAcl acl2 = new OzoneAcl(USER, "Friday", DEFAULT, ACLType.ALL); + OzoneAcl acl3 = new OzoneAcl(USER, "Jan", ACCESS, ACLType.ALL); + OzoneAcl acl4 = new OzoneAcl(USER, "Feb", ACCESS, ACLType.ALL); bucket.addAcl(acl1); bucket.addAcl(acl2); bucket.addAcl(acl3); @@ -2776,8 +2739,8 @@ public void testMultipartUploadWithACL() throws Exception { try (OzoneClient client = remoteUser.doAs((PrivilegedExceptionAction) () -> OzoneClientFactory.getRpcClient(cluster.getConf()))) { - OzoneAcl acl5 = new OzoneAcl(USER, userName, ACLType.READ, DEFAULT); - OzoneAcl acl6 = new OzoneAcl(USER, userName, ACLType.READ, ACCESS); + OzoneAcl acl5 = new OzoneAcl(USER, userName, DEFAULT, ACLType.READ); + OzoneAcl acl6 = new OzoneAcl(USER, userName, ACCESS, ACLType.READ); OzoneObj volumeObj = OzoneObjInfo.Builder.newBuilder() .setVolumeName(volumeName).setStoreType(OzoneObj.StoreType.OZONE) .setResType(OzoneObj.ResourceType.VOLUME).build(); @@ -2800,10 +2763,10 @@ public void testMultipartUploadWithACL() throws Exception { assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult()); // Add create permission for user, and try multi-upload init again - OzoneAcl acl7 = new OzoneAcl(USER, userName, ACLType.CREATE, DEFAULT); - OzoneAcl acl8 = new OzoneAcl(USER, userName, ACLType.CREATE, ACCESS); - OzoneAcl acl9 = new OzoneAcl(USER, userName, WRITE, DEFAULT); - OzoneAcl acl10 = new OzoneAcl(USER, userName, WRITE, ACCESS); + OzoneAcl acl7 = new OzoneAcl(USER, userName, DEFAULT, ACLType.CREATE); + OzoneAcl acl8 = new OzoneAcl(USER, userName, ACCESS, ACLType.CREATE); + OzoneAcl acl9 = new OzoneAcl(USER, userName, DEFAULT, WRITE); + OzoneAcl acl10 = new OzoneAcl(USER, userName, ACCESS, WRITE); store.addAcl(volumeObj, acl7); store.addAcl(volumeObj, acl8); store.addAcl(volumeObj, acl9); @@ -2818,12 +2781,13 @@ public void testMultipartUploadWithACL() throws Exception { // Upload part byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1); - String partName = uploadPart(bucket, keyName2, uploadId, 1, data); - Map partsMap = new TreeMap<>(); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName2, + uploadId, 1, data); + Map eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, partNameAndETag.getValue()); // Complete multipart upload request - completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); + completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps); // User without permission cannot read multi-uploaded object OMException ex = assertThrows(OMException.class, () -> { @@ -2873,21 +2837,21 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { anyReplication()); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMaps = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, + uploadID, 1, "data".getBytes(UTF_8)); + eTagsMaps.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMaps.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @Test @@ -2934,11 +2898,11 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); + TreeMap eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString())); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @@ -2960,11 +2924,11 @@ public void testMultipartUploadWithMissingParts() throws Exception { uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); + TreeMap eTagsMap = new TreeMap<>(); + eTagsMap.put(3, DigestUtils.md5Hex("random")); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -3063,6 +3027,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3071,10 +3038,13 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -3144,17 +3114,17 @@ void testListMultipartUploadParts(ReplicationConfig replication) Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -3195,17 +3165,17 @@ void testListMultipartUploadPartsWithContinuation( Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -3473,11 +3443,7 @@ public void testNativeAclsForPrefix() throws Exception { .setStoreType(OzoneObj.StoreType.OZONE) .build(); - // add acl - BitSet aclRights1 = new BitSet(); - aclRights1.set(READ.ordinal()); - OzoneAcl user1Acl = new OzoneAcl(USER, - "user1", aclRights1, ACCESS); + OzoneAcl user1Acl = new OzoneAcl(USER, "user1", ACCESS, READ); assertTrue(store.addAcl(prefixObj, user1Acl)); // get acl @@ -3490,11 +3456,7 @@ public void testNativeAclsForPrefix() throws Exception { aclsGet = store.getAcl(prefixObj); assertEquals(0, aclsGet.size()); - // set acl - BitSet aclRights2 = new BitSet(); - aclRights2.set(ACLType.ALL.ordinal()); - OzoneAcl group1Acl = new OzoneAcl(GROUP, - "group1", aclRights2, ACCESS); + OzoneAcl group1Acl = new OzoneAcl(GROUP, "group1", ACCESS, ACLType.ALL); List acls = new ArrayList<>(); acls.add(user1Acl); acls.add(group1Acl); @@ -3534,12 +3496,11 @@ private List getAclList(OzoneConfiguration conf) ACLType userRights = aclConfig.getUserDefaultRights(); ACLType groupRights = aclConfig.getGroupDefaultRights(); - listOfAcls.add(new OzoneAcl(USER, - ugi.getUserName(), userRights, ACCESS)); + listOfAcls.add(new OzoneAcl(USER, ugi.getUserName(), ACCESS, userRights)); //Group ACLs of the User List userGroups = Arrays.asList(ugi.getGroupNames()); userGroups.stream().forEach((group) -> listOfAcls.add( - new OzoneAcl(GROUP, group, groupRights, ACCESS))); + new OzoneAcl(GROUP, group, ACCESS, groupRights))); return listOfAcls; } @@ -3555,7 +3516,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { if (expectedAcls.size() > 0) { OzoneAcl oldAcl = expectedAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), - ACLType.READ_ACL, oldAcl.getAclScope()); + oldAcl.getAclScope(), ACLType.READ_ACL); // Verify that operation successful. assertTrue(store.addAcl(ozObj, newAcl)); @@ -3606,9 +3567,9 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { // Reset acl's. OzoneAcl ua = new OzoneAcl(USER, "userx", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); OzoneAcl ug = new OzoneAcl(GROUP, "userx", - ACLType.ALL, ACCESS); + ACCESS, ACLType.ALL); store.setAcl(ozObj, Arrays.asList(ua, ug)); newAcls = store.getAcl(ozObj); assertEquals(2, newAcls.size()); @@ -3642,19 +3603,20 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, // than 5mb int length = 0; byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); + Pair partNameAndEtag = uploadPart(bucket, keyName, uploadID, + 1, data); + partsMap.put(1, partNameAndEtag.getValue()); length += data.length; - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data); + partsMap.put(2, partNameAndEtag.getValue()); length += data.length; String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( UTF_8)); - partsMap.put(3, partName); + partsMap.put(3, partNameAndEtag.getValue()); length += part3.getBytes(UTF_8).length; // Complete multipart upload request @@ -3711,20 +3673,26 @@ private String initiateMultipartUpload(OzoneBucket bucket, String keyName, return uploadID; } - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { + private Pair uploadPart(OzoneBucket bucket, String keyName, + String uploadID, int partNumber, + byte[] data) throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } @@ -3929,7 +3897,7 @@ public void testSetS3VolumeAcl() throws Exception { .setStoreType(OzoneObj.StoreType.OZONE) .build(); - OzoneAcl ozoneAcl = new OzoneAcl(USER, remoteUserName, WRITE, DEFAULT); + OzoneAcl ozoneAcl = new OzoneAcl(USER, remoteUserName, DEFAULT, WRITE); boolean result = store.addAcl(s3vVolume, ozoneAcl); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index e373b06d950c..8f3c82620402 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -83,10 +83,10 @@ public class TestOzoneRpcClientForAclAuditLog { private static UserGroupInformation ugi; private static final OzoneAcl USER_ACL = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "johndoe", IAccessAuthorizer.ACLType.ALL, ACCESS); + "johndoe", ACCESS, IAccessAuthorizer.ACLType.ALL); private static final OzoneAcl USER_ACL_2 = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, - "jane", IAccessAuthorizer.ACLType.ALL, ACCESS); + "jane", ACCESS, IAccessAuthorizer.ACLType.ALL); private static List aclListToAdd = new ArrayList<>(); private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index ffd80f359ff6..4ecbd08a41b0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,12 +24,15 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -37,6 +40,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -52,6 +56,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerStateMachine; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -166,7 +171,8 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { } @Test - public void testMultiPartUploadWithStream() throws IOException { + public void testMultiPartUploadWithStream() + throws IOException, NoSuchAlgorithmException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -196,6 +202,9 @@ public void testMultiPartUploadWithStream() throws IOException { keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); + ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); OzoneMultipartUploadPartListParts parts = @@ -315,7 +324,11 @@ public void testParallelDeleteBucketAndCreateKey() throws IOException, omSM.getHandler().setInjector(injector); thread1.start(); thread2.start(); - Thread.sleep(2000); + // Wait long enough for createKey's preExecute to finish executing + GenericTestUtils.waitFor(() -> { + return getCluster().getOzoneManager().getOmServerProtocol().getLastRequestToSubmit().getCmdType().equals( + Type.CreateKey); + }, 100, 10000); injector.resume(); try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index a8029987fedd..1e22613f929b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -57,6 +57,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -100,6 +101,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); @@ -125,8 +127,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). - setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 9f5d04c56f94..256148dfb8de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -53,6 +53,7 @@ protected static MiniOzoneCluster newCluster( conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); @@ -73,7 +74,6 @@ protected static MiniOzoneCluster newCluster( return MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(5) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index e045b48bda96..24064ae5c883 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -140,6 +140,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -148,8 +149,8 @@ public static void init() throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10) + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index d5564ac2315e..5ff8d713649e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -180,7 +180,6 @@ public void init() throws Exception { conf.setFromObject(replicationConf); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index cd25ee25c8f4..8d22eddadc59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -85,10 +85,10 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 8c35d5011a5d..0fd31bb4b728 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -183,10 +183,10 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index d4900bb48783..a4a5701f5491 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.metrics; import java.io.File; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.UUID; @@ -59,12 +60,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * Test for metrics published by storage containers. */ @Timeout(300) public class TestContainerMetrics { + @TempDir + private Path tempDir; @Test public void testContainerMetrics() throws Exception { @@ -78,7 +82,7 @@ public void testContainerMetrics() throws Exception { Pipeline pipeline = MockPipeline .createSingleNodePipeline(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, @@ -105,6 +109,8 @@ public void testContainerMetrics() throws Exception { } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index a1d436b3360a..2f18326f7b1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -67,13 +67,13 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(1) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7a64ddc5d5e7..1c5da04c0a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,11 +30,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -55,6 +57,8 @@ */ @Timeout(300) public class TestOzoneContainer { + @TempDir + private Path tempDir; @Test public void testCreateOzoneContainer( @@ -68,13 +72,15 @@ public void testCreateOzoneContainer( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + StorageVolumeUtil.getHddsVolumesList(container.getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); @@ -99,7 +105,7 @@ void testOzoneContainerStart( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 73910ef00ff1..b05c547b625d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -303,6 +305,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); container = new OzoneContainer( dn, conf, stateContext, caClient, keyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); container.start(clusterID); } catch (Throwable e) { if (container != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 715b0678a173..5585696dfc31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; @@ -129,14 +131,17 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) .getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils .getMockContext(dn, conf), caClient, secretKeyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 3c89bb12ee7a..630c4d314959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; @@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -69,6 +71,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; @@ -84,12 +87,14 @@ public class TestContainerServer { .getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + @TempDir + private Path tempDir; @BeforeAll public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); + CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); @@ -104,7 +109,7 @@ public static void tearDown() throws Exception { public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -121,10 +126,10 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -182,7 +187,7 @@ static void runTestClientServer( } } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); @@ -192,6 +197,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -216,7 +223,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 53420c0e2209..8044685bb747 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.EnumSet; @@ -65,6 +66,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -103,6 +105,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; @@ -115,6 +118,8 @@ * Test Container servers when security is enabled. */ public class TestSecureContainerServer { + @TempDir + private Path tempDir; private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -158,7 +163,7 @@ public void testClientServer() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -166,7 +171,7 @@ public void testClientServer() throws Exception { hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, @@ -175,6 +180,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -199,16 +206,16 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( + XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -216,12 +223,12 @@ static XceiverServerRatis newXceiverServerRatis( caClient, null); } - private static void runTestClientServerRatis(RpcType rpc, int numNodes) + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, + this::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), (p) -> { }); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index ec7eb81db33d..e94f46a398b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -22,6 +22,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -61,6 +62,7 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -295,8 +297,7 @@ private void prepareTable(String tableName, boolean schemaV3) for (int i = 1; i <= 5; i++) { String key = "key" + i; OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE); + key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index cca47e17e407..7c82633f1136 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -35,6 +35,7 @@ import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; /** @@ -50,8 +51,9 @@ public class TestDnRatisLogParser { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); + .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); System.setOut(new PrintStream(out, false, UTF_8.name())); System.setErr(new PrintStream(err, false, UTF_8.name())); @@ -71,7 +73,7 @@ public void destroy() throws Exception { public void testRatisLogParsing() throws Exception { OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf(); String path = - conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + conf.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); UUID pid = cluster.getStorageContainerManager().getPipelineManager() .getPipelines().get(0).getId().getId(); File pipelineDir = new File(path, pid.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 3e22c1db90de..8d77b6cc58b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -274,7 +274,6 @@ private static MiniOzoneCluster newCluster(boolean schemaV3) ozoneConfig.setFromObject(dnConf); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(1) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(ReplicationFactor.ONE, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index 0273deb50e61..98ab87b871de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -90,7 +91,9 @@ public void init() throws Exception { ozoneConfig.setFromObject(dnConf); cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(3) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .build()) .build(); cluster.waitForClusterToBeReady(); datanodes = cluster.getHddsDatanodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java new file mode 100644 index 000000000000..d049a7e320cf --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.freon; + +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import picocli.CommandLine; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Tests Freon, with MiniOzoneCluster and validate data. + */ +public class TestDNRPCLoadGenerator { + + private static MiniOzoneCluster cluster = null; + private static ContainerWithPipeline container; + + private static void startCluster(OzoneConfiguration conf) throws Exception { + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(5).build(); + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, + 180000); + + StorageContainerLocationProtocolClientSideTranslatorPB + storageContainerLocationClient = cluster + .getStorageContainerLocationClient(); + container = + storageContainerLocationClient.allocateContainer( + SCMTestUtils.getReplicationType(conf), + HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE); + XceiverClientManager xceiverClientManager = new XceiverClientManager(conf); + XceiverClientSpi client = xceiverClientManager + .acquireClient(container.getPipeline()); + ContainerProtocolCalls.createContainer(client, + container.getContainerInfo().getContainerID(), null); + } + + static void shutdownCluster() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + startCluster(conf); + } + + @AfterAll + public static void shutdown() { + shutdownCluster(); + } + + @Test + public void test() { + DNRPCLoadGenerator randomKeyGenerator = + new DNRPCLoadGenerator(cluster.getConf()); + CommandLine cmd = new CommandLine(randomKeyGenerator); + int exitCode = cmd.execute( + "--container-id", Long.toString(container.getContainerInfo().getContainerID()), + "--clients", "5", + "-t", "10"); + assertEquals(0, exitCode); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 28cc863c26d5..e1f2061c7d46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -52,8 +53,10 @@ static void startCluster(OzoneConfiguration conf) throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); + cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); + .setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 180000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 0798731a839d..862b52c8e9e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -34,6 +34,7 @@ import picocli.CommandLine; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -54,9 +55,8 @@ public class TestFreonWithDatanodeFastRestart { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index d78beff7e78b..08c1b3bd3b35 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -61,6 +61,8 @@ public static void init() throws Exception { 1, TimeUnit.SECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -74,10 +76,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) - .setTotalPipelineNumLimit(8) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index bca21aebd1ac..c566cae414fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -39,7 +38,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.test.GenericTestUtils; @@ -215,20 +213,16 @@ public void testDAGReconstruction() OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); RDBStore rdbStore = (RDBStore) omMetadataManager.getStore(); RocksDBCheckpointDiffer differ = rdbStore.getRocksDBCheckpointDiffer(); - ReferenceCounted - snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); - ReferenceCounted - snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + ReferenceCounted snapDB1 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap1"); + ReferenceCounted snapDB2 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap2"); DifferSnapshotInfo snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); DifferSnapshotInfo snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); // RocksDB does checkpointing in a separate thread, wait for it @@ -247,13 +241,11 @@ public void testDAGReconstruction() resp = store.createSnapshot(volumeName, bucketName, "snap3"); LOG.debug("Snapshot created: {}", resp); - ReferenceCounted - snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + ReferenceCounted snapDB3 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap3"); DifferSnapshotInfo snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); @@ -274,24 +266,21 @@ public void testDAGReconstruction() ozoneManager = cluster.getOzoneManager(); omMetadataManager = ozoneManager.getMetadataManager(); snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); + .getActiveSnapshot(volumeName, bucketName, "snap1"); snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + .getActiveSnapshot(volumeName, bucketName, "snap2"); snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + .getActiveSnapshot(volumeName, bucketName, "snap3"); snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); assertEquals(sstDiffList21, sstDiffList21Run2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index b74022b83e5d..3c7a04071b3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -34,7 +34,8 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -88,8 +89,9 @@ private void shutdown() { * * @throws IOException */ - private void startCluster() throws Exception { + private void startCluster(boolean fsPathsEnabled) throws Exception { conf = getOzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, fsPathsEnabled); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.OBJECT_STORE.name()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); @@ -104,10 +106,11 @@ private OzoneConfiguration getOzoneConfiguration() { return new OzoneConfiguration(); } - @Test - public void testOmBucketReadWriteKeyOps() throws Exception { + @ParameterizedTest(name = "Filesystem Paths Enabled: {0}") + @ValueSource(booleans = {false, true}) + public void testOmBucketReadWriteKeyOps(boolean fsPathsEnabled) throws Exception { try { - startCluster(); + startCluster(fsPathsEnabled); FileOutputStream out = FileUtils.openOutputStream(new File(path, "conf")); cluster.getConf().writeXml(out); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index 9d4d489586b0..da0f82d4707b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -95,7 +95,7 @@ public class TestAddRemoveOzoneManager { private void setupCluster(int numInitialOMs) throws Exception { conf = new OzoneConfiguration(); conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(SCM_DUMMY_SERVICE_ID) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(numInitialOMs) @@ -303,7 +303,6 @@ public void testForceBootstrap() throws Exception { config.setInt(OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_MAX_RETRIES_KEY, 2); config.setInt( OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_WAIT_BETWEEN_RETRIES_KEY, 100); - cluster.setConf(config); GenericTestUtils.LogCapturer omLog = GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java index 37fec8dcda72..97512fec40c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java @@ -123,7 +123,7 @@ public void testBucketOwner() throws Exception { ozoneBucket.getAcls(); //Add Acls OzoneAcl acl = new OzoneAcl(USER, "testuser", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); } } @@ -179,7 +179,7 @@ public void testNonBucketNonVolumeOwner() throws Exception { OzoneVolume volume = client.getObjectStore().getVolume("volume1"); OzoneBucket ozoneBucket = volume.getBucket("bucket1"); OzoneAcl acl = new OzoneAcl(USER, "testuser1", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); }, "Add Acls as non-volume and non-bucket owner should fail"); } @@ -202,7 +202,7 @@ public void testVolumeOwner() throws Exception { ozoneBucket.getAcls(); //Add Acls OzoneAcl acl = new OzoneAcl(USER, "testuser2", - IAccessAuthorizer.ACLType.ALL, DEFAULT); + DEFAULT, IAccessAuthorizer.ACLType.ALL); ozoneBucket.addAcl(acl); //Bucket Delete volume.deleteBucket("bucket2"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 67ab3169b69c..f443104871f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -24,7 +24,6 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; -import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -37,6 +36,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -44,9 +45,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -87,7 +86,6 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -110,7 +108,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; @@ -118,13 +118,17 @@ import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -164,7 +168,7 @@ public class TestKeyManagerImpl { private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; private static final String BUCKET2_NAME = "bucket2"; - private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; + private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; private static OzoneManager om; @@ -176,6 +180,9 @@ public static void setUp() throws Exception { dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, + conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); nodeManager = new MockNodeManager(true, 10); NodeSchema[] schemas = new NodeSchema[] @@ -224,9 +231,6 @@ public static void setUp() throws Exception { new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION)); createVolume(VOLUME_NAME); - createBucket(VOLUME_NAME, BUCKET_NAME, false); - createBucket(VOLUME_NAME, BUCKET2_NAME, false); - createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); } @AfterAll @@ -237,21 +241,21 @@ public static void cleanup() throws Exception { FileUtils.deleteDirectory(dir); } + @BeforeEach + public void init() throws Exception { + createBucket(VOLUME_NAME, BUCKET_NAME, false); + createBucket(VOLUME_NAME, BUCKET2_NAME, false); + createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); + } + @AfterEach public void cleanupTest() throws IOException { mockContainerClient(); - List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); - for (OzoneFileStatus fileStatus : fileStatuses) { - if (fileStatus.isFile()) { - writeClient.deleteKey( - createKeyArgs(fileStatus.getKeyInfo().getKeyName())); - } else { - writeClient.deleteKey(createKeyArgs(OzoneFSUtils - .addTrailingSlashIfNeeded( - fileStatus.getKeyInfo().getKeyName()))); - } - } + org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); + FileSystem fs = FileSystem.get(conf); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); } private static void mockContainerClient() { @@ -527,7 +531,7 @@ public void testPrefixAclOps() throws IOException { .build(); OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); writeClient.addAcl(ozPrefix1, ozAcl1); List ozAclGet = writeClient.getAcl(ozPrefix1); @@ -535,24 +539,13 @@ public void testPrefixAclOps() throws IOException { assertEquals(ozAcl1, ozAclGet.get(0)); List acls = new ArrayList<>(); - OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", - ACLType.ALL, ACCESS); + OzoneAcl ozAcl2 = new OzoneAcl(ACLIdentityType.USER, "admin", ACCESS, ACLType.ALL); - BitSet rwRights = new BitSet(); - rwRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - rwRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rwRights, ACCESS); + OzoneAcl ozAcl3 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, READ, WRITE); - BitSet wRights = new BitSet(); - wRights.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - wRights, ACCESS); + OzoneAcl ozAcl4 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, WRITE); - BitSet rRights = new BitSet(); - rRights.set(IAccessAuthorizer.ACLType.READ.ordinal()); - OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", - rRights, ACCESS); + OzoneAcl ozAcl5 = new OzoneAcl(ACLIdentityType.GROUP, "dev", ACCESS, READ); acls.add(ozAcl2); acls.add(ozAcl3); @@ -624,7 +617,7 @@ public void testInvalidPrefixAcl() throws IOException { // Invalid prefix not ending with "/" String invalidPrefix = "invalid/pf"; OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); OzoneObj ozInvalidPrefix = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -637,7 +630,7 @@ public void testInvalidPrefixAcl() throws IOException { // add acl with invalid prefix name Exception ex = assertThrows(OMException.class, () -> writeClient.addAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -655,7 +648,7 @@ public void testInvalidPrefixAcl() throws IOException { // get acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.getAcl(ozInvalidPrefix)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // set acl with invalid prefix name List ozoneAcls = new ArrayList(); @@ -663,12 +656,12 @@ public void testInvalidPrefixAcl() throws IOException { ex = assertThrows(OMException.class, () -> writeClient.setAcl(ozInvalidPrefix, ozoneAcls)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // remove acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.removeAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); } @Test @@ -688,7 +681,7 @@ public void testLongestPrefixPath() throws IOException { .build(); OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", - ACLType.READ, ACCESS); + ACCESS, ACLType.READ); writeClient.addAcl(ozPrefix1, ozAcl1); OzoneObj ozFile1 = new OzoneObjInfo.Builder() @@ -976,12 +969,11 @@ public void testListStatusWithTableCache() throws Exception { if (i % 2 == 0) { // Add to DB OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1012,8 +1004,10 @@ public void testListStatusWithTableCache() throws Exception { } } - @Test - public void testListStatusWithTableCacheRecursive() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatusWithTableCacheRecursive(boolean enablePath) throws Exception { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String keyNameDir1 = "dir1"; OmKeyArgs keyArgsDir1 = createBuilder().setKeyName(keyNameDir1).build(); @@ -1048,13 +1042,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1092,13 +1085,12 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); existKeySet.add(prefixKey + i); } else { OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); String key = metadataManager.getOzoneKey( @@ -1199,8 +1191,10 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { assertTrue(existKeySet.isEmpty()); } - @Test - public void testListStatus() throws IOException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatus(boolean enablePath) throws IOException { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String superDir = RandomStringUtils.randomAlphabetic(5); int numDirectories = 5; @@ -1446,8 +1440,7 @@ public void testRefreshPipeline() throws Exception { when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -1501,8 +1494,7 @@ public void testRefreshPipelineException() throws Exception { OMPerformanceMetrics metrics = mock(OMPerformanceMetrics.class); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 83eac0ab288b..e3bb5b5bccb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -45,6 +45,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -67,6 +68,7 @@ public class TestKeyPurging { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -77,7 +79,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index be972557f4a4..204c0ee66818 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -63,6 +63,8 @@ public class TestListKeys { private static OzoneConfiguration conf; private static OzoneBucket legacyOzoneBucket; + + private static OzoneBucket obsOzoneBucket; private static OzoneClient client; /** @@ -86,6 +88,10 @@ public static void init() throws Exception { legacyOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.LEGACY); + // create a volume and a OBJECT_STORE bucket + obsOzoneBucket = TestDataUtil + .createVolumeAndBucket(client, BucketLayout.OBJECT_STORE); + initFSNameSpace(); } @@ -99,6 +105,7 @@ public static void teardownClass() { private static void initFSNameSpace() throws Exception { buildNameSpaceTree(legacyOzoneBucket); + buildNameSpaceTree(obsOzoneBucket); } /** @@ -108,9 +115,9 @@ private static void initFSNameSpace() throws Exception { * | * a1 * | - * ----------------------------------- - * | | | - * b1 b2 b3 + * -------------------------------------------------------- + * | | | | + * b1 b2 b3 b4 * ------- --------- ----------- * | | | | | | | | * c1 c2 d1 d2 d3 e1 e2 e3 @@ -125,25 +132,27 @@ private static void initFSNameSpace() throws Exception { private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { LinkedList keys = new LinkedList<>(); - keys.add("/a1/b1/c1111.tx"); - keys.add("/a1/b1/c1222.tx"); - keys.add("/a1/b1/c1333.tx"); - keys.add("/a1/b1/c1444.tx"); - keys.add("/a1/b1/c1555.tx"); - keys.add("/a1/b1/c1/c1.tx"); - keys.add("/a1/b1/c12/c2.tx"); - keys.add("/a1/b1/c12/c3.tx"); - - keys.add("/a1/b2/d1/d11.tx"); - keys.add("/a1/b2/d2/d21.tx"); - keys.add("/a1/b2/d2/d22.tx"); - keys.add("/a1/b2/d3/d31.tx"); - - keys.add("/a1/b3/e1/e11.tx"); - keys.add("/a1/b3/e2/e21.tx"); - keys.add("/a1/b3/e3/e31.tx"); + keys.add("a1/b1/c1111.tx"); + keys.add("a1/b1/c1222.tx"); + keys.add("a1/b1/c1333.tx"); + keys.add("a1/b1/c1444.tx"); + keys.add("a1/b1/c1555.tx"); + keys.add("a1/b1/c1/c1.tx"); + keys.add("a1/b1/c12/c2.tx"); + keys.add("a1/b1/c12/c3.tx"); + + keys.add("a1/b2/d1/d11.tx"); + keys.add("a1/b2/d2/d21.tx"); + keys.add("a1/b2/d2/d22.tx"); + keys.add("a1/b2/d3/d31.tx"); + + keys.add("a1/b3/e1/e11.tx"); + keys.add("a1/b3/e2/e21.tx"); + keys.add("a1/b3/e3/e31.tx"); createKeys(ozoneBucket, keys); + + ozoneBucket.createDirectory("a1/b4/"); } private static Stream shallowListDataWithTrailingSlash() { @@ -186,6 +195,58 @@ private static Stream shallowListDataWithTrailingSlash() { "a1/b1/c1333.tx", "a1/b1/c1444.tx", "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" + ))) + ); + } + + private static Stream shallowListObsDataWithTrailingSlash() { + return Stream.of( + + // Case-1: StartKey is less than prefixKey, return emptyList. + of("a1/b2/", "a1", newLinkedList(Collections.emptyList())), + + // Case-2: StartKey is empty, return all immediate node. + of("a1/b2/", "", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-3: StartKey is same as prefixKey, return all immediate nodes. + of("a1/b2/", "a1/b2", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-4: StartKey is greater than prefixKey + of("a1/b2/", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList( + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-5: StartKey reaches last element, return emptyList + of("a1/b2/", "a1/b2/d3/d31.tx", newLinkedList( + Collections.emptyList() + )), + + // Case-6: Mix result + of("a1/b1/", "a1/b1/c12", newLinkedList(Arrays.asList( + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); } @@ -252,6 +313,11 @@ private static Stream shallowListDataWithoutTrailingSlash() { of("a1/b1/c12", "", newLinkedList(Arrays.asList( "a1/b1/c12/", "a1/b1/c1222.tx" + ))), + + // Case-10: + of("a1/b4", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); @@ -264,11 +330,19 @@ public void testShallowListKeysWithPrefixTrailingSlash(String keyPrefix, checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); } + @ParameterizedTest + @MethodSource("shallowListObsDataWithTrailingSlash") + public void testShallowListObsKeysWithPrefixTrailingSlash(String keyPrefix, + String startKey, List expectedKeys) throws Exception { + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); + } + @ParameterizedTest @MethodSource("shallowListDataWithoutTrailingSlash") public void testShallowListKeysWithoutPrefixTrailingSlash(String keyPrefix, String startKey, List expectedKeys) throws Exception { checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); } private void checkKeyShallowList(String keyPrefix, String startKey, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index f499e3569c8b..11594f3ef11c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -63,6 +63,8 @@ public class TestListKeysWithFSO { private static OzoneBucket fsoOzoneBucket; private static OzoneBucket legacyOzoneBucket2; private static OzoneBucket fsoOzoneBucket2; + private static OzoneBucket emptyLegacyOzoneBucket; + private static OzoneBucket emptyFsoOzoneBucket; private static OzoneClient client; /** @@ -105,6 +107,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(fsoBucketName, omBucketArgs); fsoOzoneBucket2 = ozoneVolume.getBucket(fsoBucketName); + fsoBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(fsoBucketName, omBucketArgs); + emptyFsoOzoneBucket = ozoneVolume.getBucket(fsoBucketName); + builder = BucketArgs.newBuilder(); builder.setStorageType(StorageType.DISK); builder.setBucketLayout(BucketLayout.LEGACY); @@ -113,6 +119,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(legacyBucketName, omBucketArgs); legacyOzoneBucket2 = ozoneVolume.getBucket(legacyBucketName); + legacyBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(legacyBucketName, omBucketArgs); + emptyLegacyOzoneBucket = ozoneVolume.getBucket(legacyBucketName); + initFSNameSpace(); } @@ -479,6 +489,23 @@ public void testShallowListKeys() throws Exception { expectedKeys = getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); + + // case-7: keyPrefix corresponds to multiple existing keys and + // startKey is null in empty bucket + keyPrefix = "a1/b1/c12"; + startKey = null; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, emptyLegacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, emptyFsoOzoneBucket); + + // case-8: keyPrefix corresponds to multiple existing keys and + // startKey is null + keyPrefix = "a1/b1/c12"; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index 52cb9287cc02..20977f9d4834 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -29,24 +29,30 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.apache.hadoop.ozone.OzoneConfigKeys. - OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * A simple test that asserts that list status output is sorted. */ @Timeout(1200) public class TestListStatus { + private static final Logger LOG = LoggerFactory.getLogger(TestListStatus.class); private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; private static OzoneBucket fsoOzoneBucket; private static OzoneClient client; @@ -54,11 +60,11 @@ public class TestListStatus { * Create a MiniDFSCluster for testing. *

    * - * @throws IOException + * @throws IOException in case of I/O error */ @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -69,7 +75,7 @@ public static void init() throws Exception { fsoOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED); - // Set the number of keys to be processed during batch operate. + // Set the number of keys to be processed during batch operated. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); buildNameSpaceTree(fsoOzoneBucket); @@ -83,44 +89,30 @@ public static void teardownClass() { } } - @Test - public void testSortedListStatus() throws Exception { - // a) test if output is sorted - checkKeyList("", "", 1000, 10, false); - - // b) number of keys returns is expected - checkKeyList("", "", 2, 2, false); - - // c) check if full prefix works - checkKeyList("a1", "", 100, 3, false); - - // d) check if full prefix with numEntries work - checkKeyList("a1", "", 2, 2, false); - - // e) check if existing start key >>> - checkKeyList("a1", "a1/a12", 100, 2, false); - - // f) check with non-existing start key - checkKeyList("", "a7", 100, 6, false); - - // g) check if half prefix works - checkKeyList("b", "", 100, 4, true); - - // h) check half prefix with non-existing start key - checkKeyList("b", "b5", 100, 2, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c", 100, 0, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "b/g5", 100, 4, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c/g5", 100, 0, true); + @MethodSource("sortedListStatusParametersSource") + @ParameterizedTest(name = "{index} {5}") + public void testSortedListStatus(String keyPrefix, String startKey, int numEntries, int expectedNumKeys, + boolean isPartialPrefix, String testName) throws Exception { + checkKeyList(keyPrefix, startKey, numEntries, expectedNumKeys, isPartialPrefix); + } - // j) check prefix with non-existing prefix key - // and non-existing parent in start key - checkKeyList("a1/a111", "a1/a111/a100", 100, 0, true); + private static Stream sortedListStatusParametersSource() { + return Stream.of( + arguments("", "", 1000, 10, false, "Test if output is sorted"), + arguments("", "", 2, 2, false, "Number of keys returns is expected"), + arguments("a1", "", 100, 3, false, "Check if the full prefix works"), + arguments("a1", "", 2, 2, false, "Check if full prefix with numEntries work"), + arguments("a1", "a1/a12", 100, 2, false, "Check if existing start key >>>"), + arguments("", "a7", 100, 6, false, "Check with a non-existing start key"), + arguments("b", "", 100, 4, true, "Check if half-prefix works"), + arguments("b", "b5", 100, 2, true, "Check half prefix with non-existing start key"), + arguments("b", "c", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "b/g5", 100, 4, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "c/g5", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("a1/a111", "a1/a111/a100", 100, 0, true, "Check prefix with a non-existing prefix key\n" + + " and non-existing parent in a start key"), + arguments("a1/a111", null, 100, 0, true, "Check start key is null") + ); } private static void createFile(OzoneBucket bucket, String keyName) @@ -131,6 +123,7 @@ private static void createFile(OzoneBucket bucket, String keyName) oos.flush(); } } + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { /* @@ -172,33 +165,29 @@ private static void buildNameSpaceTree(OzoneBucket ozoneBucket) createFile(ozoneBucket, "/b8"); } - private void checkKeyList(String keyPrefix, String startKey, - long numEntries, int expectedNumKeys, - boolean isPartialPrefix) - throws Exception { + private void checkKeyList(String keyPrefix, String startKey, long numEntries, int expectedNumKeys, + boolean isPartialPrefix) throws Exception { List statuses = fsoOzoneBucket.listStatus(keyPrefix, false, startKey, numEntries, isPartialPrefix); assertEquals(expectedNumKeys, statuses.size()); - System.out.println("BEGIN:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("BEGIN:::keyPrefix---> {} :::---> {}", keyPrefix, startKey); for (int i = 0; i < statuses.size() - 1; i++) { OzoneFileStatus stCurr = statuses.get(i); OzoneFileStatus stNext = statuses.get(i + 1); - System.out.println("status:" + stCurr); + LOG.info("status: {}", stCurr); assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0); } if (!statuses.isEmpty()) { OzoneFileStatus stNext = statuses.get(statuses.size() - 1); - System.out.println("status:" + stNext); + LOG.info("status: {}", stNext); } - System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("END:::keyPrefix---> {}:::---> {}", keyPrefix, startKey); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index b8e115864727..ae97b3f7b907 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; @@ -91,13 +92,13 @@ class TestOMBucketLayoutUpgrade { @BeforeAll void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .setOmLayoutVersion(fromLayoutVersion) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); ozoneManager = cluster.getOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index 991b3a66fb03..01ba4db399fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.ObjectStore; @@ -42,11 +44,13 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OmUtils.EPOCH_ID_SHIFT; import static org.apache.hadoop.ozone.OmUtils.EPOCH_WHEN_RATIS_NOT_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests OM epoch generation for when Ratis is not enabled. @@ -145,6 +149,48 @@ public void testUniqueTrxnIndexOnOMRestart() throws Exception { assertEquals(4, om.getLastTrxnIndexForNonRatis()); } + @Test + public void testIncreaseTrxnIndexBasedOnExistingDB() throws Exception { + // Set transactionInfo.getTerm() not -1 to mock the DB migrated from ratis cluster. + // When OM is first started from the existing ratis DB, the transaction index for + // requests should not start from 0. It should incrementally increase from the last + // transaction index which was stored in DB transactionInfoTable before started. + + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + String keyName = "key" + RandomStringUtils.randomNumeric(5); + + OzoneManager om = cluster.getOzoneManager(); + ObjectStore objectStore = client.getObjectStore(); + + objectStore.createVolume(volumeName); + OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); + ozoneVolume.createBucket(bucketName); + + Table transactionInfoTable = om.getMetadataManager().getTransactionInfoTable(); + long initIndex = transactionInfoTable.get(TRANSACTION_INFO_KEY).getTransactionIndex(); + // Set transactionInfo.getTerm() = 1 to mock the DB migrated from ratis cluster + transactionInfoTable.put(TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, initIndex)); + TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); + // Verify transaction term != -1 and index > 1 + assertEquals(1, transactionInfo.getTerm()); + assertTrue(initIndex > 1); + + // Restart the OM and create new object + cluster.restartOzoneManager(); + + String data = "random data"; + OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); + ozoneOutputStream.close(); + + // Transaction index after OM restart is incremented by 2 (create and commit op) from the last + // transaction index before OM restart rather than from 0. + // So, the transactionIndex should be (initIndex + 2) rather than (0 + 2) + assertEquals(initIndex + 2, + om.getMetadataManager().getTransactionInfoTable().get(TRANSACTION_INFO_KEY).getTransactionIndex()); + } + @Test public void testEpochIntegrationInObjectID() throws Exception { // Create a volume and check the objectID has the epoch as diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index f3f0c7d69b9c..bd5046bfc0bf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -155,7 +155,7 @@ public void init(TestInfo testInfo) throws Exception { omRatisConf.setLogAppenderWaitTimeMin(10); conf.setFromObject(omRatisConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index fa84130c9d6f..ccf94bef3c80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.assertClusterPrepared; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; import static org.apache.ozone.test.GenericTestUtils.waitFor; @@ -103,12 +104,12 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { - return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(UUID.randomUUID().toString()) + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .setOmLayoutVersion(INITIAL_VERSION.layoutVersion()) - .build(); + .setNumDatanodes(1); + return builder.build(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index be2e0a96526e..9c7a0a7032bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -52,6 +53,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -213,7 +216,8 @@ public void testMultiPartCompleteUpload() throws Exception { } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( - OzoneBucket bucket, String keyName) throws IOException { + OzoneBucket bucket, String keyName) + throws IOException, NoSuchAlgorithmException { OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); @@ -226,6 +230,9 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(data)).toLowerCase()); ozoneOutputStream.close(); if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { @@ -245,7 +252,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( ozoneOutputStream.getCommitUploadPartInfo(); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 50ff9c36a0a3..e773bf7ed7f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -50,6 +50,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -162,6 +165,9 @@ public static void setUp() throws Exception { mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); mockScmContainerClient = mock(StorageContainerLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); @@ -247,10 +253,13 @@ private static void createVolume(String volumeName) throws IOException { } @BeforeEach - public void beforeEach() { + public void beforeEach() throws IOException { CONTAINER_ID.getAndIncrement(); reset(mockScmBlockLocationProtocol, mockScmContainerClient, mockDn1Protocol, mockDn2Protocol); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); when(mockDn1Protocol.getPipeline()).thenReturn(createPipeline(DN1)); when(mockDn2Protocol.getPipeline()).thenReturn(createPipeline(DN2)); } @@ -598,18 +607,40 @@ private ContainerProtos.DatanodeBlockID createBlockId(long containerId, private void mockWriteChunkResponse(XceiverClientSpi mockDnProtocol) throws IOException, ExecutionException, InterruptedException { - ContainerCommandResponseProto writeResponse = - ContainerCommandResponseProto.newBuilder() - .setWriteChunk(WriteChunkResponseProto.newBuilder().build()) - .setResult(Result.SUCCESS) - .setCmdType(Type.WriteChunk) - .build(); doAnswer(invocation -> - new XceiverClientReply(completedFuture(writeResponse))) + new XceiverClientReply( + completedFuture( + createWriteChunkResponse( + (ContainerCommandRequestProto)invocation.getArgument(0))))) .when(mockDnProtocol) .sendCommandAsync(argThat(matchCmd(Type.WriteChunk))); } + ContainerCommandResponseProto createWriteChunkResponse( + ContainerCommandRequestProto request) { + ContainerProtos.WriteChunkRequestProto writeChunk = request.getWriteChunk(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + + GetCommittedBlockLengthResponseProto response = + GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) + .setBlockLength(blockData.getSize()) + .build(); + + builder.setCommittedBlockLength(response); + } + return ContainerCommandResponseProto.newBuilder() + .setWriteChunk(builder.build()) + .setResult(Result.SUCCESS) + .setCmdType(Type.WriteChunk) + .build(); + } + private ArgumentMatcher matchCmd(Type type) { return argument -> argument != null && argument.getCmdType() == type; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 85e7c2a76e5c..29ee6c6e04a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -16,53 +16,25 @@ */ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; -import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.apache.ozone.test.MetricsAsserts.assertCounter; -import static org.apache.ozone.test.MetricsAsserts.getMetrics; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.anyInt; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; -import static org.mockito.Mockito.spy; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.contract.ContractTestUtils; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; @@ -80,19 +52,46 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; -import org.apache.ozone.test.MetricsAsserts; import org.apache.ozone.test.GenericTestUtils; import org.assertj.core.util.Lists; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.BUCKET; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; +import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; +import static org.apache.ozone.test.MetricsAsserts.getLongCounter; +import static org.apache.ozone.test.MetricsAsserts.getMetrics; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.spy; + /** * Test for OM metrics. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) public class TestOmMetrics { private MiniOzoneCluster cluster; @@ -106,16 +105,21 @@ public class TestOmMetrics { private final OMException exception = new OMException("dummyException", OMException.ResultCodes.TIMEOUT); private OzoneClient client; - /** * Create a MiniDFSCluster for testing. */ - @BeforeEach + + @BeforeAll public void setup() throws Exception { conf = new OzoneConfiguration(); conf.setTimeDuration(OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL, 1000, TimeUnit.MILLISECONDS); - clusterBuilder = MiniOzoneCluster.newBuilder(conf).withoutDatanodes(); + // Speed up background directory deletion for this test. + conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); + // For testing fs operations with legacy buckets. + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + clusterBuilder = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5); + startCluster(); } private void startCluster() throws Exception { @@ -130,7 +134,7 @@ private void startCluster() throws Exception { /** * Shutdown MiniDFSCluster. */ - @AfterEach + @AfterAll public void shutdown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -140,22 +144,38 @@ public void shutdown() { @Test public void testVolumeOps() throws Exception { - startCluster(); VolumeManager volumeManager = (VolumeManager) HddsWhiteboxTestUtils.getInternalState( ozoneManager, "volumeManager"); VolumeManager mockVm = spy(volumeManager); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumVolumeOps = getLongCounter("NumVolumeOps", omMetrics); + long initialNumVolumeCreates = getLongCounter("NumVolumeCreates", omMetrics); + long initialNumVolumeUpdates = getLongCounter("NumVolumeUpdates", omMetrics); + long initialNumVolumeInfos = getLongCounter("NumVolumeInfos", omMetrics); + long initialNumVolumeDeletes = getLongCounter("NumVolumeDeletes", omMetrics); + long initialNumVolumeLists = getLongCounter("NumVolumeLists", omMetrics); + long initialNumVolumes = getLongCounter("NumVolumes", omMetrics); + + long initialNumVolumeCreateFails = getLongCounter("NumVolumeCreateFails", omMetrics); + long initialNumVolumeUpdateFails = getLongCounter("NumVolumeUpdateFails", omMetrics); + long initialNumVolumeInfoFails = getLongCounter("NumVolumeInfoFails", omMetrics); + long initialNumVolumeDeleteFails = getLongCounter("NumVolumeDeleteFails", omMetrics); + long initialNumVolumeListFails = getLongCounter("NumVolumeListFails", omMetrics); + OmVolumeArgs volumeArgs = createVolumeArgs(); doVolumeOps(volumeArgs); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 5L, omMetrics); - assertCounter("NumVolumeCreates", 1L, omMetrics); - assertCounter("NumVolumeUpdates", 1L, omMetrics); - assertCounter("NumVolumeInfos", 1L, omMetrics); - assertCounter("NumVolumeDeletes", 1L, omMetrics); - assertCounter("NumVolumeLists", 1L, omMetrics); - assertCounter("NumVolumes", 1L, omMetrics); + + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumVolumeOps + 5, getLongCounter("NumVolumeOps", omMetrics)); + assertEquals(initialNumVolumeCreates + 1, getLongCounter("NumVolumeCreates", omMetrics)); + assertEquals(initialNumVolumeUpdates + 1, getLongCounter("NumVolumeUpdates", omMetrics)); + assertEquals(initialNumVolumeInfos + 1, getLongCounter("NumVolumeInfos", omMetrics)); + assertEquals(initialNumVolumeDeletes + 1, getLongCounter("NumVolumeDeletes", omMetrics)); + assertEquals(initialNumVolumeLists + 1, getLongCounter("NumVolumeLists", omMetrics)); + assertEquals(initialNumVolumes, getLongCounter("NumVolumes", omMetrics)); volumeArgs = createVolumeArgs(); writeClient.createVolume(volumeArgs); @@ -166,10 +186,8 @@ public void testVolumeOps() throws Exception { writeClient.deleteVolume(volumeArgs.getVolume()); omMetrics = getMetrics("OMMetrics"); - // Accounting 's3v' volume which is created by default. - assertCounter("NumVolumes", 3L, omMetrics); - + assertEquals(initialNumVolumes + 2, getLongCounter("NumVolumes", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockVm).getVolumeInfo(any()); @@ -178,61 +196,78 @@ public void testVolumeOps() throws Exception { HddsWhiteboxTestUtils.setInternalState(ozoneManager, "volumeManager", mockVm); + // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmVolumeArgs.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmVolumeArgs.class); volumeArgs = createVolumeArgs(); doVolumeOps(volumeArgs); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumVolumeOps", 14L, omMetrics); - assertCounter("NumVolumeCreates", 5L, omMetrics); - assertCounter("NumVolumeUpdates", 2L, omMetrics); - assertCounter("NumVolumeInfos", 2L, omMetrics); - assertCounter("NumVolumeDeletes", 3L, omMetrics); - assertCounter("NumVolumeLists", 2L, omMetrics); - - assertCounter("NumVolumeCreateFails", 1L, omMetrics); - assertCounter("NumVolumeUpdateFails", 1L, omMetrics); - assertCounter("NumVolumeInfoFails", 1L, omMetrics); - assertCounter("NumVolumeDeleteFails", 1L, omMetrics); - assertCounter("NumVolumeListFails", 1L, omMetrics); - - // As last call for volumesOps does not increment numVolumes as those are - // failed. - assertCounter("NumVolumes", 3L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumVolumes", 3L, omMetrics); - - + assertEquals(initialNumVolumeOps + 14, getLongCounter("NumVolumeOps", omMetrics)); + assertEquals(initialNumVolumeCreates + 5, getLongCounter("NumVolumeCreates", omMetrics)); + assertEquals(initialNumVolumeUpdates + 2, getLongCounter("NumVolumeUpdates", omMetrics)); + assertEquals(initialNumVolumeInfos + 2, getLongCounter("NumVolumeInfos", omMetrics)); + assertEquals(initialNumVolumeDeletes + 3, getLongCounter("NumVolumeDeletes", omMetrics)); + assertEquals(initialNumVolumeLists + 2, getLongCounter("NumVolumeLists", omMetrics)); + + assertEquals(initialNumVolumeCreateFails + 1, getLongCounter("NumVolumeCreateFails", omMetrics)); + assertEquals(initialNumVolumeUpdateFails + 1, getLongCounter("NumVolumeUpdateFails", omMetrics)); + assertEquals(initialNumVolumeInfoFails + 1, getLongCounter("NumVolumeInfoFails", omMetrics)); + assertEquals(initialNumVolumeDeleteFails + 1, getLongCounter("NumVolumeDeleteFails", omMetrics)); + assertEquals(initialNumVolumeListFails + 1, getLongCounter("NumVolumeListFails", omMetrics)); + assertEquals(initialNumVolumes + 2, getLongCounter("NumVolumes", omMetrics)); + + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "volumeManager", volumeManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @Test public void testBucketOps() throws Exception { - startCluster(); BucketManager bucketManager = (BucketManager) HddsWhiteboxTestUtils.getInternalState( ozoneManager, "bucketManager"); BucketManager mockBm = spy(bucketManager); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumBucketOps = getLongCounter("NumBucketOps", omMetrics); + long initialNumBucketCreates = getLongCounter("NumBucketCreates", omMetrics); + long initialNumBucketUpdates = getLongCounter("NumBucketUpdates", omMetrics); + long initialNumBucketInfos = getLongCounter("NumBucketInfos", omMetrics); + long initialNumBucketDeletes = getLongCounter("NumBucketDeletes", omMetrics); + long initialNumBucketLists = getLongCounter("NumBucketLists", omMetrics); + long initialNumBuckets = getLongCounter("NumBuckets", omMetrics); + long initialEcBucketCreateTotal = getLongCounter("EcBucketCreateTotal", omMetrics); + long initialEcBucketCreateFailsTotal = getLongCounter("EcBucketCreateFailsTotal", omMetrics); + + long initialNumBucketCreateFails = getLongCounter("NumBucketCreateFails", omMetrics); + long initialNumBucketUpdateFails = getLongCounter("NumBucketUpdateFails", omMetrics); + long initialNumBucketInfoFails = getLongCounter("NumBucketInfoFails", omMetrics); + long initialNumBucketDeleteFails = getLongCounter("NumBucketDeleteFails", omMetrics); + long initialNumBucketListFails = getLongCounter("NumBucketListFails", omMetrics); + OmBucketInfo bucketInfo = createBucketInfo(false); doBucketOps(bucketInfo); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 5L, omMetrics); - assertCounter("NumBucketCreates", 1L, omMetrics); - assertCounter("NumBucketUpdates", 1L, omMetrics); - assertCounter("NumBucketInfos", 1L, omMetrics); - assertCounter("NumBucketDeletes", 1L, omMetrics); - assertCounter("NumBucketLists", 1L, omMetrics); - assertCounter("NumBuckets", 0L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumBucketOps + 5, getLongCounter("NumBucketOps", omMetrics)); + assertEquals(initialNumBucketCreates + 1, getLongCounter("NumBucketCreates", omMetrics)); + assertEquals(initialNumBucketUpdates + 1, getLongCounter("NumBucketUpdates", omMetrics)); + assertEquals(initialNumBucketInfos + 1, getLongCounter("NumBucketInfos", omMetrics)); + assertEquals(initialNumBucketDeletes + 1, getLongCounter("NumBucketDeletes", omMetrics)); + assertEquals(initialNumBucketLists + 1, getLongCounter("NumBucketLists", omMetrics)); + assertEquals(initialNumBuckets, getLongCounter("NumBuckets", omMetrics)); OmBucketInfo ecBucketInfo = createBucketInfo(true); writeClient.createBucket(ecBucketInfo); writeClient.deleteBucket(ecBucketInfo.getVolumeName(), ecBucketInfo.getBucketName()); + omMetrics = getMetrics("OMMetrics"); - assertCounter("EcBucketCreateTotal", 1L, omMetrics); + assertEquals(initialEcBucketCreateTotal + 1, getLongCounter("EcBucketCreateTotal", omMetrics)); bucketInfo = createBucketInfo(false); writeClient.createBucket(bucketInfo); @@ -244,7 +279,7 @@ public void testBucketOps() throws Exception { bucketInfo.getBucketName()); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBuckets", 2L, omMetrics); + assertEquals(initialNumBuckets + 2, getLongCounter("NumBuckets", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockBm).getBucketInfo(any(), any()); @@ -255,7 +290,7 @@ public void testBucketOps() throws Exception { ozoneManager, "bucketManager", mockBm); // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmBucketInfo.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class); doBucketOps(bucketInfo); ecBucketInfo = createBucketInfo(true); @@ -265,62 +300,81 @@ public void testBucketOps() throws Exception { //Expected failure } omMetrics = getMetrics("OMMetrics"); - assertCounter("EcBucketCreateFailsTotal", 1L, omMetrics); - - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumBucketOps", 17L, omMetrics); - assertCounter("NumBucketCreates", 7L, omMetrics); - assertCounter("NumBucketUpdates", 2L, omMetrics); - assertCounter("NumBucketInfos", 2L, omMetrics); - assertCounter("NumBucketDeletes", 4L, omMetrics); - assertCounter("NumBucketLists", 2L, omMetrics); - - assertCounter("NumBucketCreateFails", 2L, omMetrics); - assertCounter("NumBucketUpdateFails", 1L, omMetrics); - assertCounter("NumBucketInfoFails", 1L, omMetrics); - assertCounter("NumBucketDeleteFails", 1L, omMetrics); - assertCounter("NumBucketListFails", 1L, omMetrics); - - assertCounter("NumBuckets", 2L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumBuckets", 2L, omMetrics); + assertEquals(initialEcBucketCreateFailsTotal + 1, getLongCounter("EcBucketCreateFailsTotal", omMetrics)); + assertEquals(initialNumBucketOps + 17, getLongCounter("NumBucketOps", omMetrics)); + assertEquals(initialNumBucketCreates + 7, getLongCounter("NumBucketCreates", omMetrics)); + assertEquals(initialNumBucketUpdates + 2, getLongCounter("NumBucketUpdates", omMetrics)); + assertEquals(initialNumBucketInfos + 2, getLongCounter("NumBucketInfos", omMetrics)); + assertEquals(initialNumBucketDeletes + 4, getLongCounter("NumBucketDeletes", omMetrics)); + assertEquals(initialNumBucketLists + 2, getLongCounter("NumBucketLists", omMetrics)); + + assertEquals(initialNumBucketCreateFails + 2, getLongCounter("NumBucketCreateFails", omMetrics)); + assertEquals(initialNumBucketUpdateFails + 1, getLongCounter("NumBucketUpdateFails", omMetrics)); + assertEquals(initialNumBucketInfoFails + 1, getLongCounter("NumBucketInfoFails", omMetrics)); + assertEquals(initialNumBucketDeleteFails + 1, getLongCounter("NumBucketDeleteFails", omMetrics)); + assertEquals(initialNumBucketListFails + 1, getLongCounter("NumBucketListFails", omMetrics)); + assertEquals(initialNumBuckets + 2, getLongCounter("NumBuckets", omMetrics)); + + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "bucketManager", bucketManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @Test public void testKeyOps() throws Exception { - // This test needs a cluster with DNs and SCM to wait on safemode - clusterBuilder.setNumDatanodes(5); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - startCluster(); String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); KeyManager mockKm = spy(keyManager); + + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumKeyOps = getLongCounter("NumKeyOps", omMetrics); + long initialNumKeyAllocate = getLongCounter("NumKeyAllocate", omMetrics); + long initialNumKeyLookup = getLongCounter("NumKeyLookup", omMetrics); + long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); + long initialNumKeyLists = getLongCounter("NumKeyLists", omMetrics); + long initialNumTrashKeyLists = getLongCounter("NumTrashKeyLists", omMetrics); + long initialNumKeys = getLongCounter("NumKeys", omMetrics); + long initialNumInitiateMultipartUploads = getLongCounter("NumInitiateMultipartUploads", omMetrics); + + long initialEcKeyCreateTotal = getLongCounter("EcKeyCreateTotal", omMetrics); + long initialNumKeyAllocateFails = getLongCounter("NumKeyAllocateFails", omMetrics); + long initialNumKeyLookupFails = getLongCounter("NumKeyLookupFails", omMetrics); + long initialNumKeyDeleteFails = getLongCounter("NumKeyDeleteFails", omMetrics); + long initialNumTrashKeyListFails = getLongCounter("NumTrashKeyListFails", omMetrics); + long initialNumInitiateMultipartUploadFails = getLongCounter("NumInitiateMultipartUploadFails", omMetrics); + long initialNumBlockAllocationFails = getLongCounter("NumBlockAllocationFails", omMetrics); + long initialNumKeyListFails = getLongCounter("NumKeyListFails", omMetrics); + long initialEcKeyCreateFailsTotal = getLongCounter("EcKeyCreateFailsTotal", omMetrics); + // see HDDS-10078 for making this work with FILE_SYSTEM_OPTIMIZED layout TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, BucketLayout.LEGACY); OmKeyArgs keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 8L, omMetrics); - assertCounter("NumKeyAllocate", 1L, omMetrics); - assertCounter("NumKeyLookup", 1L, omMetrics); - assertCounter("NumKeyDeletes", 1L, omMetrics); - assertCounter("NumKeyLists", 1L, omMetrics); - assertCounter("NumTrashKeyLists", 1L, omMetrics); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 1L, omMetrics); - assertCounter("NumListOpenFiles", 1L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + + assertEquals(initialNumKeyOps + 8, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyAllocate + 1, getLongCounter("NumKeyAllocate", omMetrics)); + assertEquals(initialNumKeyLookup + 1, getLongCounter("NumKeyLookup", omMetrics)); + assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyLists + 1, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 1, getLongCounter("NumTrashKeyLists", omMetrics)); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumInitiateMultipartUploads + 1, getLongCounter("NumInitiateMultipartUploads", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); doKeyOps(keyArgs); + omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 16L, omMetrics); - assertCounter("EcKeyCreateTotal", 1L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialEcKeyCreateTotal + 1, getLongCounter("EcKeyCreateTotal", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); @@ -347,8 +401,8 @@ public void testKeyOps() throws Exception { } omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 2L, omMetrics); - assertCounter("NumBlockAllocationFails", 1L, omMetrics); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumBlockAllocationFails + 1, getLongCounter("NumBlockAllocationFails", omMetrics)); // inject exception to test for Failure Metrics on the read path doThrow(exception).when(mockKm).lookupKey(any(), any(), any()); @@ -365,27 +419,28 @@ public void testKeyOps() throws Exception { omMetadataReader, "keyManager", mockKm); // inject exception to test for Failure Metrics on the write path - mockWritePathExceptions(OmBucketInfo.class); + OMMetadataManager metadataManager = mockWritePathExceptions(OmBucketInfo.class); keyArgs = createKeyArgs(volumeName, bucketName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); doKeyOps(keyArgs); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeyOps", 31L, omMetrics); - assertCounter("NumKeyAllocate", 6L, omMetrics); - assertCounter("NumKeyLookup", 3L, omMetrics); - assertCounter("NumKeyDeletes", 4L, omMetrics); - assertCounter("NumKeyLists", 3L, omMetrics); - assertCounter("NumTrashKeyLists", 3L, omMetrics); - assertCounter("NumInitiateMultipartUploads", 3L, omMetrics); - - assertCounter("NumKeyAllocateFails", 1L, omMetrics); - assertCounter("NumKeyLookupFails", 1L, omMetrics); - assertCounter("NumKeyDeleteFails", 1L, omMetrics); - assertCounter("NumKeyListFails", 1L, omMetrics); - assertCounter("NumTrashKeyListFails", 1L, omMetrics); - assertCounter("NumInitiateMultipartUploadFails", 1L, omMetrics); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialNumKeyOps + 31, getLongCounter("NumKeyOps", omMetrics)); + assertEquals(initialNumKeyAllocate + 6, getLongCounter("NumKeyAllocate", omMetrics)); + assertEquals(initialNumKeyLookup + 3, getLongCounter("NumKeyLookup", omMetrics)); + assertEquals(initialNumKeyDeletes + 4, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyLists + 3, getLongCounter("NumKeyLists", omMetrics)); + assertEquals(initialNumTrashKeyLists + 3, getLongCounter("NumTrashKeyLists", omMetrics)); + assertEquals(initialNumInitiateMultipartUploads + 3, getLongCounter("NumInitiateMultipartUploads", omMetrics)); + + assertEquals(initialNumKeyAllocateFails + 1, getLongCounter("NumKeyAllocateFails", omMetrics)); + assertEquals(initialNumKeyLookupFails + 1, getLongCounter("NumKeyLookupFails", omMetrics)); + assertEquals(initialNumKeyDeleteFails + 1, getLongCounter("NumKeyDeleteFails", omMetrics)); + assertEquals(initialNumKeyListFails + 1, getLongCounter("NumKeyListFails", omMetrics)); + assertEquals(initialNumTrashKeyListFails + 1, getLongCounter("NumTrashKeyListFails", omMetrics)); + assertEquals(initialNumInitiateMultipartUploadFails + 1, getLongCounter( + "NumInitiateMultipartUploadFails", omMetrics)); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); keyArgs = createKeyArgs(volumeName, bucketName, new ECReplicationConfig("rs-3-2-1024K")); @@ -396,24 +451,24 @@ public void testKeyOps() throws Exception { //Expected Failure } omMetrics = getMetrics("OMMetrics"); - assertCounter("EcKeyCreateFailsTotal", 1L, omMetrics); - - cluster.restartOzoneManager(); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialEcKeyCreateFailsTotal + 1, getLongCounter("EcKeyCreateFailsTotal", omMetrics)); + // restore state + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "keyManager", keyManager); + HddsWhiteboxTestUtils.setInternalState(ozoneManager, + "metadataManager", metadataManager); } @ParameterizedTest @EnumSource(value = BucketLayout.class, names = {"FILE_SYSTEM_OPTIMIZED", "LEGACY"}) public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { - clusterBuilder.setNumDatanodes(3); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - // Speed up background directory deletion for this test. - conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); - conf.set(OzoneConfigKeys.OZONE_CLIENT_FS_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); - // For testing fs operations with legacy buckets. - conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - startCluster(); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumKeys = getLongCounter("NumKeys", omMetrics); + long initialNumCreateDirectory = getLongCounter("NumCreateDirectory", omMetrics); + long initialNumKeyDeletes = getLongCounter("NumKeyDeletes", omMetrics); + long initialNumKeyRenames = getLongCounter("NumKeyRenames", omMetrics); // How long to wait for directory deleting service to clean up the files before aborting the test. final int timeoutMillis = @@ -424,13 +479,8 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - // Cluster should be empty. - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumCreateDirectory", 0L, omMetrics); - // These key operations include directory operations. - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + // create bucket with different layout in each ParameterizedTest + TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName, bucketLayout); // Create bucket with 2 nested directories. String rootPath = String.format("%s://%s/", @@ -443,72 +493,81 @@ public void testDirectoryOps(BucketLayout bucketLayout) throws Exception { assertEquals(bucketLayout, client.getObjectStore().getVolume(volumeName).getBucket(bucketName).getBucketLayout()); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 2L, omMetrics); + assertEquals(initialNumKeys + 2, getLongCounter("NumKeys", omMetrics)); // Only one directory create command is given, even though it created two directories. - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames, getLongCounter("NumKeyRenames", omMetrics)); + // Add 2 files at different parts of the tree. ContractTestUtils.touch(fs, new Path(dirPath, "file1")); ContractTestUtils.touch(fs, new Path(dirPath.getParent(), "file2")); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 4L, omMetrics); - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); - assertCounter("NumKeyRenames", 0L, omMetrics); + assertEquals(initialNumKeys + 4, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames, getLongCounter("NumKeyRenames", omMetrics)); // Rename the child directory. fs.rename(dirPath, new Path(dirPath.getParent(), "new-name")); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 4L, omMetrics); - assertCounter("NumCreateDirectory", 1L, omMetrics); - assertCounter("NumKeyDeletes", 0L, omMetrics); + assertEquals(initialNumKeys + 4, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); + assertEquals(initialNumKeyDeletes, getLongCounter("NumKeyDeletes", omMetrics)); long expectedRenames = 1; if (bucketLayout == BucketLayout.LEGACY) { // Legacy bucket must rename keys individually. expectedRenames = 2; } - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); // Delete metric should be decremented by directory deleting service in the background. fs.delete(dirPath.getParent(), true); GenericTestUtils.waitFor(() -> { - long keyCount = MetricsAsserts.getLongCounter("NumKeys", getMetrics("OMMetrics")); + long keyCount = getLongCounter("NumKeys", getMetrics("OMMetrics")); return keyCount == 0; }, timeoutMillis / 5, timeoutMillis); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); // This is the number of times the create directory command was given, not the current number of directories. - assertCounter("NumCreateDirectory", 1L, omMetrics); + assertEquals(initialNumCreateDirectory + 1, getLongCounter("NumCreateDirectory", omMetrics)); // Directory delete counts as key delete. One command was given so the metric is incremented once. - assertCounter("NumKeyDeletes", 1L, omMetrics); - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyDeletes + 1, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); // Re-create the same tree as before, but this time delete the bucket recursively. // All metrics should still be properly updated. fs.mkdirs(dirPath); ContractTestUtils.touch(fs, new Path(dirPath, "file1")); ContractTestUtils.touch(fs, new Path(dirPath.getParent(), "file2")); - assertCounter("NumKeys", 4L, getMetrics("OMMetrics")); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); fs.delete(bucketPath, true); GenericTestUtils.waitFor(() -> { - long keyCount = MetricsAsserts.getLongCounter("NumKeys", getMetrics("OMMetrics")); + long keyCount = getLongCounter("NumKeys", getMetrics("OMMetrics")); return keyCount == 0; }, timeoutMillis / 5, timeoutMillis); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumKeys", 0L, omMetrics); - assertCounter("NumCreateDirectory", 2L, omMetrics); + assertEquals(initialNumKeys, getLongCounter("NumKeys", omMetrics)); + assertEquals(initialNumCreateDirectory + 2, getLongCounter("NumCreateDirectory", omMetrics)); // One more keys delete request is given as part of the bucket delete to do a batch delete of its keys. - assertCounter("NumKeyDeletes", 2L, omMetrics); - assertCounter("NumKeyRenames", expectedRenames, omMetrics); + assertEquals(initialNumKeyDeletes + 2, getLongCounter("NumKeyDeletes", omMetrics)); + assertEquals(initialNumKeyRenames + expectedRenames, getLongCounter("NumKeyRenames", omMetrics)); } @Test public void testSnapshotOps() throws Exception { // This tests needs enough dataNodes to allocate the blocks for the keys. - clusterBuilder.setNumDatanodes(3); - startCluster(); + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumSnapshotCreateFails = getLongCounter("NumSnapshotCreateFails", omMetrics); + long initialNumSnapshotCreates = getLongCounter("NumSnapshotCreates", omMetrics); + long initialNumSnapshotListFails = getLongCounter("NumSnapshotListFails", omMetrics); + long initialNumSnapshotLists = getLongCounter("NumSnapshotLists", omMetrics); + long initialNumSnapshotActive = getLongCounter("NumSnapshotActive", omMetrics); + long initialNumSnapshotDeleted = getLongCounter("NumSnapshotDeleted", omMetrics); + long initialNumSnapshotDiffJobs = getLongCounter("NumSnapshotDiffJobs", omMetrics); + long initialNumSnapshotDiffJobFails = getLongCounter("NumSnapshotDiffJobFails", omMetrics); OmBucketInfo omBucketInfo = createBucketInfo(false); @@ -528,16 +587,15 @@ public void testSnapshotOps() throws Exception { // Create first snapshot writeClient.createSnapshot(volumeName, bucketName, snapshot1); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - - assertCounter("NumSnapshotCreateFails", 0L, omMetrics); - assertCounter("NumSnapshotCreates", 1L, omMetrics); - assertCounter("NumSnapshotListFails", 0L, omMetrics); - assertCounter("NumSnapshotLists", 0L, omMetrics); - assertCounter("NumSnapshotActive", 1L, omMetrics); - assertCounter("NumSnapshotDeleted", 0L, omMetrics); - assertCounter("NumSnapshotDiffJobs", 0L, omMetrics); - assertCounter("NumSnapshotDiffJobFails", 0L, omMetrics); + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumSnapshotCreateFails, getLongCounter("NumSnapshotCreateFails", omMetrics)); + assertEquals(initialNumSnapshotCreates + 1, getLongCounter("NumSnapshotCreates", omMetrics)); + assertEquals(initialNumSnapshotListFails, getLongCounter("NumSnapshotListFails", omMetrics)); + assertEquals(initialNumSnapshotLists, getLongCounter("NumSnapshotLists", omMetrics)); + assertEquals(initialNumSnapshotActive + 1, getLongCounter("NumSnapshotActive", omMetrics)); + assertEquals(initialNumSnapshotDeleted, getLongCounter("NumSnapshotDeleted", omMetrics)); + assertEquals(initialNumSnapshotDiffJobs, getLongCounter("NumSnapshotDiffJobs", omMetrics)); + assertEquals(initialNumSnapshotDiffJobFails, getLongCounter("NumSnapshotDiffJobFails", omMetrics)); // Create second key OmKeyArgs keyArgs2 = createKeyArgs(volumeName, bucketName, @@ -560,35 +618,28 @@ public void testSnapshotOps() throws Exception { } } omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotDiffJobs", 1L, omMetrics); - assertCounter("NumSnapshotDiffJobFails", 0L, omMetrics); + assertEquals(initialNumSnapshotDiffJobs + 1, getLongCounter("NumSnapshotDiffJobs", omMetrics)); + assertEquals(initialNumSnapshotDiffJobFails, getLongCounter("NumSnapshotDiffJobFails", omMetrics)); // List snapshots writeClient.listSnapshot( volumeName, bucketName, null, null, Integer.MAX_VALUE); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotActive", 2L, omMetrics); - assertCounter("NumSnapshotCreates", 2L, omMetrics); - assertCounter("NumSnapshotLists", 1L, omMetrics); - assertCounter("NumSnapshotListFails", 0L, omMetrics); + assertEquals(initialNumSnapshotActive + 2, getLongCounter("NumSnapshotActive", omMetrics)); + assertEquals(initialNumSnapshotCreates + 2, getLongCounter("NumSnapshotCreates", omMetrics)); + assertEquals(initialNumSnapshotListFails, getLongCounter("NumSnapshotListFails", omMetrics)); + assertEquals(initialNumSnapshotLists + 1, getLongCounter("NumSnapshotLists", omMetrics)); // List snapshot: invalid bucket case. assertThrows(OMException.class, () -> writeClient.listSnapshot(volumeName, "invalidBucket", null, null, Integer.MAX_VALUE)); omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSnapshotLists", 2L, omMetrics); - assertCounter("NumSnapshotListFails", 1L, omMetrics); - - // restart OM - cluster.restartOzoneManager(); - - // Check number of active snapshots in the snapshot table - // is the same after OM restart - assertCounter("NumSnapshotActive", 2L, omMetrics); + assertEquals(initialNumSnapshotLists + 2, getLongCounter("NumSnapshotLists", omMetrics)); + assertEquals(initialNumSnapshotListFails + 1, getLongCounter("NumSnapshotListFails", omMetrics)); } - private void mockWritePathExceptions(Classklass) throws Exception { + private OMMetadataManager mockWritePathExceptions(Classklass) throws Exception { String tableName; if (klass == OmBucketInfo.class) { tableName = "bucketTable"; @@ -610,71 +661,63 @@ private void mockWritePathExceptions(Classklass) throws Exception { } HddsWhiteboxTestUtils.setInternalState( ozoneManager, "metadataManager", mockMm); + + // Return the original metadataManager so it can be restored later + return metadataManager; } @Test public void testAclOperations() throws Exception { - startCluster(); - try { - // Create a volume. - client.getObjectStore().createVolume("volumeacl"); - - OzoneObj volObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setResType(VOLUME).setStoreType(OZONE).build(); - - // Test getAcl - List acls = ozoneManager.getAcl(volObj); - MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); - assertCounter("NumGetAcl", 1L, omMetrics); - - // Test addAcl - writeClient.addAcl(volObj, - new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", - IAccessAuthorizer.ACLType.ALL, ACCESS)); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumAddAcl", 1L, omMetrics); - - // Test setAcl - writeClient.setAcl(volObj, acls); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumSetAcl", 1L, omMetrics); - - // Test removeAcl - writeClient.removeAcl(volObj, acls.get(0)); - omMetrics = getMetrics("OMMetrics"); - assertCounter("NumRemoveAcl", 1L, omMetrics); - - } finally { - client.getObjectStore().deleteVolume("volumeacl"); - } + // get initial values for metrics + MetricsRecordBuilder omMetrics = getMetrics("OMMetrics"); + long initialNumGetAcl = getLongCounter("NumGetAcl", omMetrics); + long initialNumAddAcl = getLongCounter("NumAddAcl", omMetrics); + long initialNumSetAcl = getLongCounter("NumSetAcl", omMetrics); + long initialNumRemoveAcl = getLongCounter("NumRemoveAcl", omMetrics); + // Create a volume. + client.getObjectStore().createVolume("volumeacl"); + + OzoneObj volObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") + .setResType(VOLUME).setStoreType(OZONE).build(); + + // Test getAcl, addAcl, setAcl, removeAcl + List acls = ozoneManager.getAcl(volObj); + writeClient.addAcl(volObj, + new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", + ACCESS, IAccessAuthorizer.ACLType.ALL)); + writeClient.setAcl(volObj, acls); + writeClient.removeAcl(volObj, acls.get(0)); + + omMetrics = getMetrics("OMMetrics"); + assertEquals(initialNumGetAcl + 1, getLongCounter("NumGetAcl", omMetrics)); + assertEquals(initialNumAddAcl + 1, getLongCounter("NumAddAcl", omMetrics)); + assertEquals(initialNumSetAcl + 1, getLongCounter("NumSetAcl", omMetrics)); + assertEquals(initialNumRemoveAcl + 1, getLongCounter("NumRemoveAcl", omMetrics)); + + client.getObjectStore().deleteVolume("volumeacl"); } @Test public void testAclOperationsHA() throws Exception { - // This test needs a cluster with DNs and SCM to wait on safemode - clusterBuilder.setNumDatanodes(3); - conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, true); - startCluster(); - ObjectStore objectStore = client.getObjectStore(); // Create a volume. - objectStore.createVolume("volumeacl"); + objectStore.createVolume("volumeaclha"); // Create a bucket. - objectStore.getVolume("volumeacl").createBucket("bucketacl"); + objectStore.getVolume("volumeaclha").createBucket("bucketaclha"); // Create a key. - objectStore.getVolume("volumeacl").getBucket("bucketacl") - .createKey("keyacl", 0).close(); + objectStore.getVolume("volumeaclha").getBucket("bucketaclha") + .createKey("keyaclha", 0).close(); OzoneObj volObj = - new OzoneObjInfo.Builder().setVolumeName("volumeacl").setResType(VOLUME) + new OzoneObjInfo.Builder().setVolumeName("volumeaclha").setResType(VOLUME) .setStoreType(OZONE).build(); - OzoneObj buckObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setBucketName("bucketacl").setResType(BUCKET).setStoreType(OZONE) + OzoneObj buckObj = new OzoneObjInfo.Builder().setVolumeName("volumeaclha") + .setBucketName("bucketaclha").setResType(BUCKET).setStoreType(OZONE) .build(); - OzoneObj keyObj = new OzoneObjInfo.Builder().setVolumeName("volumeacl") - .setBucketName("bucketacl").setResType(BUCKET).setKeyName("keyacl") + OzoneObj keyObj = new OzoneObjInfo.Builder().setVolumeName("volumeaclha") + .setBucketName("bucketaclha").setResType(BUCKET).setKeyName("keyaclha") .setStoreType(OZONE).build(); List acls = ozoneManager.getAcl(volObj); @@ -696,7 +739,7 @@ private void testAclMetricsInternal(ObjectStore objectStore, OzoneObj volObj, long initialValue = metrics.getNumAddAcl(); objectStore.addAcl(volObj, new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, "ozoneuser", - IAccessAuthorizer.ACLType.ALL, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.ALL)); assertEquals(initialValue + 1, metrics.getNumAddAcl()); @@ -795,13 +838,13 @@ private void doKeyOps(OmKeyArgs keyArgs) { try { ozoneManager.listKeys(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); + keyArgs.getBucketName(), null, null, 0); } catch (IOException ignored) { } try { ozoneManager.listTrash(keyArgs.getVolumeName(), - keyArgs.getBucketName(), null, null, 0); + keyArgs.getBucketName(), null, null, 0); } catch (IOException ignored) { } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 454019b4a8a4..ba0dabf47dd5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -47,6 +48,7 @@ import java.io.IOException; import java.net.ConnectException; import java.time.Duration; +import java.util.Collections; import java.util.Iterator; import java.util.UUID; import java.util.HashMap; @@ -61,6 +63,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.assertj.core.api.Assertions.assertThat; @@ -76,7 +79,6 @@ public abstract class TestOzoneManagerHA { private static MiniOzoneHAClusterImpl cluster = null; - private static MiniOzoneCluster.Builder clusterBuilder = null; private static ObjectStore objectStore; private static OzoneConfiguration conf; private static String omServiceId; @@ -106,10 +108,6 @@ public OzoneConfiguration getConf() { return conf; } - public MiniOzoneCluster.Builder getClusterBuilder() { - return clusterBuilder; - } - public String getOmServiceId() { return omServiceId; } @@ -177,11 +175,11 @@ public static void init() throws Exception { conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s"); conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); - clusterBuilder = MiniOzoneCluster.newOMHABuilder(conf) + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs); - cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + cluster = clusterBuilder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); objectStore = client.getObjectStore(); @@ -217,6 +215,14 @@ public static void createKey(OzoneBucket ozoneBucket, String keyName) throws IOE ozoneOutputStream.close(); } + public static String createPrefixName() { + return "prefix" + RandomStringUtils.randomNumeric(5) + OZONE_URI_DELIMITER; + } + + public static void createPrefix(OzoneObj prefixObj) throws IOException { + assertTrue(objectStore.setAcl(prefixObj, Collections.emptyList())); + } + protected OzoneBucket setupBucket() throws Exception { String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index fbf80a8a879f..716c1003d264 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -59,7 +59,6 @@ import java.net.InetSocketAddress; import java.time.Instant; import java.util.ArrayList; -import java.util.BitSet; import java.util.Collections; import java.util.List; @@ -70,6 +69,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_DELETE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -561,7 +561,7 @@ void testAddBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -573,7 +573,7 @@ void testRemoveBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -586,7 +586,7 @@ void testSetBucketAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildBucketObj(ozoneBucket); @@ -608,9 +608,7 @@ private boolean compareAcls(OzoneAcl givenAcl, OzoneAcl existingAcl) { if (givenAcl.getType().equals(existingAcl.getType()) && givenAcl.getName().equals(existingAcl.getName()) && givenAcl.getAclScope().equals(existingAcl.getAclScope())) { - BitSet bitSet = (BitSet) givenAcl.getAclBitSet().clone(); - bitSet.and(existingAcl.getAclBitSet()); - return bitSet.equals(existingAcl.getAclBitSet()); + return givenAcl.equals(existingAcl); } return false; } @@ -620,7 +618,7 @@ void testAddKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -634,7 +632,7 @@ void testRemoveKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -649,7 +647,7 @@ void testSetKeyAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); String key = createKey(ozoneBucket); @@ -665,7 +663,7 @@ void testAddPrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -678,9 +676,9 @@ void testRemovePrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, - READ, ACCESS); + ACCESS, READ); OzoneAcl userAcl1 = new OzoneAcl(USER, "remote", - READ, ACCESS); + ACCESS, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -710,7 +708,7 @@ void testSetPrefixAcl() throws Exception { String remoteUserName = "remoteUser"; String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, - READ, DEFAULT); + DEFAULT, READ); OzoneObj ozoneObj = buildPrefixObj(ozoneBucket, prefixName); @@ -726,13 +724,13 @@ void testLinkBucketAddBucketAcl() throws Exception { OzoneObj srcObj = buildBucketObj(srcBucket); // Add ACL to the LINK and verify that it is added to the source bucket - OzoneAcl acl1 = new OzoneAcl(USER, "remoteUser1", READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, "remoteUser1", DEFAULT, READ); boolean addAcl = getObjectStore().addAcl(linkObj, acl1); assertTrue(addAcl); assertEqualsAcls(srcObj, linkObj); // Add ACL to the SOURCE and verify that it from link - OzoneAcl acl2 = new OzoneAcl(USER, "remoteUser2", WRITE, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, "remoteUser2", DEFAULT, WRITE); boolean addAcl2 = getObjectStore().addAcl(srcObj, acl2); assertTrue(addAcl2); assertEqualsAcls(srcObj, linkObj); @@ -779,14 +777,14 @@ void testLinkBucketSetBucketAcl() throws Exception { // Set ACL to the LINK and verify that it is set to the source bucket List acl1 = Collections.singletonList( - new OzoneAcl(USER, "remoteUser1", READ, DEFAULT)); + new OzoneAcl(USER, "remoteUser1", DEFAULT, READ)); boolean setAcl1 = getObjectStore().setAcl(linkObj, acl1); assertTrue(setAcl1); assertEqualsAcls(srcObj, linkObj); // Set ACL to the SOURCE and verify that it from link List acl2 = Collections.singletonList( - new OzoneAcl(USER, "remoteUser2", WRITE, DEFAULT)); + new OzoneAcl(USER, "remoteUser2", DEFAULT, WRITE)); boolean setAcl2 = getObjectStore().setAcl(srcObj, acl2); assertTrue(setAcl2); assertEqualsAcls(srcObj, linkObj); @@ -802,12 +800,12 @@ void testLinkBucketAddKeyAcl() throws Exception { OzoneObj srcObj = buildKeyObj(srcBucket, key); String user1 = "remoteUser1"; - OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); testAddAcl(user1, linkObj, acl1); // case1: set link acl assertEqualsAcls(srcObj, linkObj); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testAddAcl(user2, srcObj, acl2); // case2: set src acl assertEqualsAcls(srcObj, linkObj); @@ -823,7 +821,7 @@ void testLinkBucketRemoveKeyAcl() throws Exception { OzoneObj linkObj = buildKeyObj(linkedBucket, key); OzoneObj srcObj = buildKeyObj(srcBucket, key); String user = "remoteUser1"; - OzoneAcl acl = new OzoneAcl(USER, user, READ, DEFAULT); + OzoneAcl acl = new OzoneAcl(USER, user, DEFAULT, READ); testRemoveAcl(user, linkObj, acl); assertEqualsAcls(srcObj, linkObj); @@ -834,7 +832,7 @@ void testLinkBucketRemoveKeyAcl() throws Exception { OzoneObj linkObj2 = buildKeyObj(linkedBucket2, key2); OzoneObj srcObj2 = buildKeyObj(srcBucket2, key2); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testRemoveAcl(user2, srcObj2, acl2); assertEqualsAcls(srcObj2, linkObj2); @@ -849,12 +847,85 @@ void testLinkBucketSetKeyAcl() throws Exception { OzoneObj srcObj = buildKeyObj(srcBucket, key); String user1 = "remoteUser1"; - OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); testSetAcl(user1, linkObj, acl1); // case1: set link acl assertEqualsAcls(srcObj, linkObj); String user2 = "remoteUser2"; - OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testSetAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + + @Test + void testLinkBucketAddPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); + testAddAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testAddAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + + @Test + void testLinkBucketRemovePrefixAcl() throws Exception { + + // CASE 1: from link bucket + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user = "remoteUser1"; + OzoneAcl acl = new OzoneAcl(USER, user, DEFAULT, READ); + testRemoveAcl(user, linkObj, acl); + assertEqualsAcls(srcObj, linkObj); + + // CASE 2: from src bucket + OzoneBucket srcBucket2 = setupBucket(); + OzoneBucket linkedBucket2 = linkBucket(srcBucket2); + String prefix2 = createPrefixName(); + OzoneObj linkObj2 = buildPrefixObj(linkedBucket2, prefix2); + OzoneObj srcObj2 = buildPrefixObj(srcBucket2, prefix2); + createPrefix(srcObj2); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); + testRemoveAcl(user2, srcObj2, acl2); + assertEqualsAcls(srcObj2, linkObj2); + + } + + @Test + void testLinkBucketSetPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, DEFAULT, READ); + testSetAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, DEFAULT, READ); testSetAcl(user2, srcObj, acl2); // case2: set src acl assertEqualsAcls(srcObj, linkObj); @@ -927,7 +998,7 @@ private void testSetAcl(String remoteUserName, OzoneObj ozoneObj, } OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); List newAcls = Collections.singletonList(modifiedUserAcl); boolean setAcl = objectStore.setAcl(ozoneObj, newAcls); @@ -960,7 +1031,7 @@ private void testAddAcl(String remoteUserName, OzoneObj ozoneObj, // Add an acl by changing acl type with same type, name and scope. userAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, userAcl); assertTrue(addAcl); } @@ -981,7 +1052,7 @@ private void testAddLinkAcl(String remoteUserName, OzoneObj ozoneObj, // Add an acl by changing acl type with same type, name and scope. userAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, userAcl); assertTrue(addAcl); } @@ -990,8 +1061,16 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, OzoneAcl userAcl) throws Exception { ObjectStore objectStore = getObjectStore(); - // As by default create will add some default acls in RpcClient. - List acls = objectStore.getAcl(ozoneObj); + // Other than prefix, by default create will add some default acls in RpcClient. + List acls; + if (ozoneObj.getResourceType().equals(OzoneObj.ResourceType.PREFIX)) { + objectStore.addAcl(ozoneObj, userAcl); + // Add another arbitrary group ACL since the prefix will be removed when removing + // the last ACL for the prefix and PREFIX_NOT_FOUND will be thrown + OzoneAcl groupAcl = new OzoneAcl(GROUP, "arbitrary-group", ACCESS, READ); + objectStore.addAcl(ozoneObj, groupAcl); + } + acls = objectStore.getAcl(ozoneObj); assertTrue(acls.size() > 0); @@ -1008,7 +1087,7 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, // Just changed acl type here to write, rest all is same as defaultUserAcl. OzoneAcl modifiedUserAcl = new OzoneAcl(USER, remoteUserName, - WRITE, DEFAULT); + DEFAULT, WRITE); addAcl = objectStore.addAcl(ozoneObj, modifiedUserAcl); assertTrue(addAcl); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index ab9f6382f0e1..63202805ec57 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -24,6 +25,7 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -187,11 +189,12 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -362,7 +365,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, for (int i = 0; i < partsMap.size(); i++) { assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), - partInfoList.get(i).getPartName()); + partInfoList.get(i).getETag()); } @@ -379,9 +382,10 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); - return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); + return ozoneOutputStream.getCommitUploadPartInfo().getETag(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 41f1c14f3727..72f1c3374b28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -46,6 +46,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.OzoneAcl; @@ -197,6 +198,8 @@ private void setupEnvironment(boolean aclEnabled, OzoneManager.setTestSecureOmFlag(true); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.setCertClient(new CertificateClientTestImpl(conf)); om.start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 14b1a30b44f1..cc0e1feaa548 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.SafeMode; +import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -35,13 +41,12 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.scm.TestStorageContainerManagerHelper; +import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterEach; @@ -54,12 +59,17 @@ import java.io.IOException; import java.util.HashMap; import java.util.List; -import java.util.Map; +import java.util.concurrent.TimeoutException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -97,9 +107,9 @@ public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s"); conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s"); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, MILLISECONDS); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setStartDataNodes(false); cluster = builder.build(); cluster.startHddsDatanodes(); @@ -127,10 +137,7 @@ public void shutdown() { @Test void testSafeModeOperations() throws Exception { - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100, 4096); + TestDataUtil.createKeys(cluster, 100); final List containers = cluster .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000); @@ -216,10 +223,7 @@ void testSCMSafeMode() throws Exception { assertFalse(cluster.getStorageContainerManager().isInSafeMode()); // Test2: Test safe mode when containers are there in system. - // Create {numKeys} random names keys. - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - Map keyLocations = helper.createKeys(100 * 2, 4096); + TestDataUtil.createKeys(cluster, 100 * 2); final List containers = cluster .getStorageContainerManager().getContainerManager().getContainers(); GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000 * 30); @@ -291,9 +295,7 @@ public void testSCMSafeModeRestrictedOp() throws Exception { cluster.waitTobeOutOfSafeMode(); assertFalse(scm.isInSafeMode()); - TestStorageContainerManagerHelper helper = - new TestStorageContainerManagerHelper(cluster, conf); - helper.createKeys(10, 4096); + TestDataUtil.createKeys(cluster, 10); SCMClientProtocolServer clientProtocolServer = cluster .getStorageContainerManager().getClientProtocolServer(); assertFalse((scm.getClientProtocolServer()).getSafeModeStatus()); @@ -323,8 +325,6 @@ public void testSCMSafeModeDisabled() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setNumDatanodes(3); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); @@ -334,4 +334,44 @@ public void testSCMSafeModeDisabled() throws Exception { cluster.restartStorageContainerManager(true); assertFalse(scm.isInSafeMode()); } + + @Test + public void testCreateRetryWhileSCMSafeMode() throws Exception { + // Test1: Test safe mode when there are no containers in system. + cluster.stop(); + cluster = builder.build(); + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + OMMetrics omMetrics = cluster.getOzoneManager().getMetrics(); + long allocateBlockReqCount = omMetrics.getNumBlockAllocateFails(); + + try (FileSystem fs = FileSystem.get(conf)) { + assertTrue(((SafeMode)fs).setSafeMode(SafeModeAction.GET)); + + Thread t = new Thread(() -> { + try { + LOG.info("Wait for allocate block fails at least once"); + GenericTestUtils.waitFor(() -> omMetrics.getNumBlockAllocateFails() > allocateBlockReqCount, + 100, 10000); + + cluster.startHddsDatanodes(); + cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); + } catch (InterruptedException | TimeoutException e) { + throw new RuntimeException(e); + } + }); + t.start(); + + final Path file = new Path("file"); + try (FSDataOutputStream outputStream = fs.create(file, true)) { + LOG.info("Successfully created a file"); + } + t.join(); + } + + assertFalse(cluster.getStorageContainerManager().isInSafeMode()); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java index 07bb3cf96270..cde89599fbea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/RangerUserRequest.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.om.multitenant; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; -import com.google.gson.JsonParseException; -import com.google.gson.JsonParser; +import com.fasterxml.jackson.core.JsonParseException; +import com.fasterxml.jackson.databind.JsonNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.kerby.util.Base64; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -191,7 +190,7 @@ private String getResponseData(HttpURLConnection urlConnection) } private HttpURLConnection makeHttpGetCall(String urlString, - String method, boolean isSpnego) throws IOException { + String method, boolean isSpnego) throws IOException { URL url = new URL(urlString); final HttpURLConnection urlConnection = openURLConnection(url); @@ -215,14 +214,16 @@ public String getUserId(String userPrincipal) throws IOException { String response = getResponseData(conn); String userIDCreated = null; try { - JsonObject jResonse = JsonParser.parseString(response).getAsJsonObject(); - JsonArray userinfo = jResonse.get("vXUsers").getAsJsonArray(); + JsonNode jResponse = + JsonUtils.readTree(response); + JsonNode userinfo = jResponse.path("vXUsers"); int numIndex = userinfo.size(); + for (int i = 0; i < numIndex; ++i) { - if (userinfo.get(i).getAsJsonObject().get("name").getAsString() - .equals(userPrincipal)) { - userIDCreated = - userinfo.get(i).getAsJsonObject().get("id").getAsString(); + JsonNode userNode = userinfo.get(i); + String name = userNode.path("name").asText(); + if (name.equals(userPrincipal)) { + userIDCreated = userNode.path("id").asText(); break; } } @@ -231,6 +232,7 @@ public String getUserId(String userPrincipal) throws IOException { e.printStackTrace(); throw e; } + return userIDCreated; } @@ -253,8 +255,8 @@ public String createUser(String userName, String password) String userId; try { assert userInfo != null; - JsonObject jObject = JsonParser.parseString(userInfo).getAsJsonObject(); - userId = jObject.get("id").getAsString(); + JsonNode jNode = JsonUtils.readTree(userInfo); + userId = jNode.get("id").asText(); LOG.debug("Ranger returned userId: {}", userId); } catch (JsonParseException e) { e.printStackTrace(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java index 1cb436dcb38d..078266581cbc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -76,9 +77,9 @@ public static void initClusterProvider() throws Exception { conf.setBoolean( OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER, true); conf.setBoolean(OZONE_OM_MULTITENANCY_ENABLED, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) - .withoutDatanodes() - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + .withoutDatanodes(); cluster = builder.build(); client = cluster.newClient(); s3VolumeName = HddsClientUtils.getDefaultS3VolumeName(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 029b0813bb55..ed399b370429 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -102,6 +102,7 @@ import java.util.Iterator; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; @@ -111,6 +112,7 @@ import static org.apache.commons.lang3.StringUtils.leftPad; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -118,7 +120,9 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; @@ -171,6 +175,7 @@ public abstract class TestOmSnapshot { private ObjectStore store; private OzoneManager ozoneManager; private OzoneBucket ozoneBucket; + private OzoneConfiguration conf; private final BucketLayout bucketLayout; private final boolean enabledFileSystemPaths; @@ -195,7 +200,7 @@ public TestOmSnapshot(BucketLayout newBucketLayout, * Create a MiniDFSCluster for testing. */ private void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); @@ -207,10 +212,11 @@ private void init() throws Exception { conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumOfOzoneManagers(3) - .setOmLayoutVersion(OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()) .build(); cluster.waitForClusterToBeReady(); @@ -236,6 +242,12 @@ private void stopKeyManager() throws IOException { keyManager.stop(); } + private void startKeyManager() throws IOException { + KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils + .getInternalState(ozoneManager, "keyManager"); + keyManager.start(conf); + } + private RDBStore getRdbStore() { return (RDBStore) ozoneManager.getMetadataManager().getStore(); } @@ -1078,7 +1090,7 @@ public void testSnapdiffWithObjectMetaModification() throws Exception { createSnapshot(testVolumeName, testBucketName, snap1); OzoneObj keyObj = buildKeyObj(bucket, key1); OzoneAcl userAcl = new OzoneAcl(USER, "user", - WRITE, DEFAULT); + DEFAULT, WRITE); store.addAcl(keyObj, userAcl); String snap2 = "snap2"; @@ -2030,7 +2042,7 @@ public void testSnapshotOpensWithDisabledAutoCompaction() throws Exception { String snapPrefix = createSnapshot(volumeName, bucketName); try (RDBStore snapshotDBStore = (RDBStore) ((OmSnapshot) cluster.getOzoneManager().getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, snapPrefix, false).get()) + .getActiveFsMetadataOrSnapshot(volumeName, bucketName, snapPrefix).get()) .getMetadataManager().getStore()) { for (String table : snapshotDBStore.getTableNames().values()) { assertTrue(snapshotDBStore.getDb().getColumnFamily(table) @@ -2160,7 +2172,7 @@ public void testCompactionDagDisableForSnapshotMetadata() throws Exception { OmSnapshot omSnapshot = (OmSnapshot) cluster.getOzoneManager() .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, snapshotName, false).get(); + .getActiveFsMetadataOrSnapshot(volumeName, bucketName, snapshotName).get(); RDBStore snapshotDbStore = (RDBStore) omSnapshot.getMetadataManager().getStore(); @@ -2481,4 +2493,49 @@ public void testSnapshotCompactionDag() throws Exception { fetchReportPage(volume1, bucket3, "bucket3-snap1", "bucket3-snap3", null, 0).getDiffList().size()); } + + @Test + public void testSnapshotReuseSnapName() throws Exception { + // start KeyManager for this test + startKeyManager(); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); + store.createVolume(volume); + OzoneVolume volume1 = store.getVolume(volume); + volume1.createBucket(bucket); + OzoneBucket bucket1 = volume1.getBucket(bucket); + // Create Key1 and take snapshot + String key1 = "key-1-"; + createFileKeyWithPrefix(bucket1, key1); + String snap1 = "snap" + counter.incrementAndGet(); + String snapshotKeyPrefix = createSnapshot(volume, bucket, snap1); + + int keyCount1 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(1, keyCount1); + + store.deleteSnapshot(volume, bucket, snap1); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(volume, bucket, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + createFileKeyWithPrefix(bucket1, key1); + String snap2 = "snap" + counter.incrementAndGet(); + createSnapshot(volume, bucket, snap2); + + String key2 = "key-2-"; + createFileKeyWithPrefix(bucket1, key2); + createSnapshot(volume, bucket, snap1); + + int keyCount2 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(3, keyCount2); + + // Stop key manager after testcase executed + stopKeyManager(); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java similarity index 91% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java index fd1a60128de1..95a24b8ca99c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.AfterAll; @@ -43,7 +44,7 @@ */ public class TestOmSnapshotDisabled { - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneClient client; private static ObjectStore store; @@ -57,17 +58,13 @@ public static void init() throws Exception { // Disable filesystem snapshot feature for this test conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, false); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = - ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java similarity index 93% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java index babc643ffa01..91ad9eb8fe55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -54,16 +55,13 @@ public static void init() throws Exception { // Enable filesystem snapshot feature at the beginning conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test2") .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = cluster.getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 055ddeb20c9a..42b43fa03a3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -41,12 +41,15 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.snapshot.TestOmSnapshot; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java index 66d395160201..47bdd8f3bd52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java index 86682b2cbc19..b8d81c31cf5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 5ed2f848aed8..06fbebb2efa2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Timeout; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -30,7 +29,6 @@ */ @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @Timeout(300) -@Unhealthy("HDDS-10149") class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index 1d3ddb08a684..341b5b78c603 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -30,6 +30,9 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.junit.jupiter.api.AfterAll; @@ -70,7 +73,7 @@ public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index 8c0b375c3ca9..5694edd773ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -110,16 +110,12 @@ public static void init() throws Exception { final String omServiceId = "om-service-test-1" + RandomStringUtils.randomNumeric(32); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .build(); cluster.waitForClusterToBeReady(); - ozoneManager = cluster.getOzoneManager(); - final OzoneConfiguration ozoneManagerConf = ozoneManager.getConfiguration(); - cluster.setConf(ozoneManagerConf); - final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; final OzoneConfiguration clientConf = new OzoneConfiguration(cluster.getConf()); @@ -128,12 +124,13 @@ public static void init() throws Exception { client = cluster.newClient(); objectStore = client.getObjectStore(); + ozoneManager = cluster.getOzoneManager(); final KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); // stop the deletion services so that keys can still be read keyManager.stop(); - OMStorage.getOmDbDir(ozoneManagerConf); + OMStorage.getOmDbDir(cluster.getConf()); } @AfterAll @@ -630,7 +627,7 @@ private void createBucket(BucketLayout bucketLayout, private void createVolume() throws IOException { final String volumePrefix = "volume-"; volumeName = volumePrefix + RandomStringUtils.randomNumeric(32); - final VolumeArgs volumeArgs = new VolumeArgs.Builder() + final VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin(ADMIN) .setOwner(ADMIN) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index 643191b36d41..d28f25a28fac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -66,7 +66,7 @@ public void init() throws Exception { omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index e0d01c148d6b..4cd2f98c2b8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -72,7 +72,7 @@ @Timeout(value = 300) public class TestOzoneSnapshotRestore { private static final String OM_SERVICE_ID = "om-service-test-1"; - private MiniOzoneCluster cluster; + private MiniOzoneHAClusterImpl cluster; private ObjectStore store; private OzoneManager leaderOzoneManager; private OzoneConfiguration clientConf; @@ -105,18 +105,17 @@ public void init() throws Exception { String serviceID = OM_SERVICE_ID + RandomStringUtils.randomNumeric(5); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(serviceID) .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); - leaderOzoneManager = ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); + leaderOzoneManager = cluster.getOMLeader(); OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + serviceID; - clientConf = new OzoneConfiguration(cluster.getConf()); + clientConf = new OzoneConfiguration(leaderConfig); clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index a7bc55446413..2f7e1bd5a9d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -34,14 +34,16 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -76,7 +78,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -153,7 +154,7 @@ public void init(TestInfo testInfo) throws Exception { OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); int numOfOMs = 3; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) @@ -259,12 +260,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot c OmSnapshot snapC; - try (ReferenceCounted rcC = newLeaderOM + try (ReferenceCounted rcC = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoC.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoC.getName())) { assertNotNull(rcC); - snapC = (OmSnapshot) rcC.get(); + snapC = rcC.get(); } // assert that key a is in snapshot c's deleted table @@ -284,12 +284,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot d OmSnapshot snapD; - try (ReferenceCounted rcD = newLeaderOM + try (ReferenceCounted rcD = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoD.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoD.getName())) { assertNotNull(rcD); - snapD = (OmSnapshot) rcD.get(); + snapD = rcD.get(); } // wait until key a appears in deleted table of snapshot d diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java similarity index 93% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java index 6e3e4fd7f404..1c98ce89af59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java @@ -17,7 +17,7 @@ * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -32,14 +32,17 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -54,15 +57,15 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Deleting Service. @@ -133,9 +136,8 @@ public void testSnapshotSplitAndMove() throws Exception { GenericTestUtils.waitFor(() -> snapshotDeletingService .getSuccessfulRunCount() >= 1, 1000, 10000); - OmSnapshot bucket1snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - getSnapshotPrefix("bucket1snap3"), true).get(); + OmSnapshot bucket1snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1snap3").get(); // Check bucket1key1 added to next non deleted snapshot db. List> omKeyInfos = @@ -190,8 +192,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { // verify the cache of purged snapshot // /vol1/bucket2/bucket2snap1 has been cleaned up from cache map - SnapshotCache snapshotCache = om.getOmSnapshotManager().getSnapshotCache(); - assertEquals(2, snapshotCache.size()); + assertEquals(2, om.getOmSnapshotManager().getSnapshotCacheSize()); } @SuppressWarnings("checkstyle:MethodLength") @@ -359,9 +360,8 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(om.getMetadataManager().getSnapshotInfoTable(), 2); verifySnapshotChain(deletedSnap, "/vol1/bucket2/snap3"); - OmSnapshot snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, - getSnapshotPrefix("snap3"), true).get(); + OmSnapshot snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, "snap3").get(); Table snapDeletedDirTable = snap3.getMetadataManager().getDeletedDirTable(); @@ -388,10 +388,10 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(renamedTable, 4); assertTableRowCount(deletedDirTable, 3); - ReferenceCounted rcSnap1 = - om.getOmSnapshotManager().checkForSnapshot( - VOLUME_NAME, BUCKET_NAME_TWO, getSnapshotPrefix("snap1"), true); - OmSnapshot snap1 = (OmSnapshot) rcSnap1.get(); + ReferenceCounted rcSnap1 = + om.getOmSnapshotManager().getSnapshot( + VOLUME_NAME, BUCKET_NAME_TWO, "snap1"); + OmSnapshot snap1 = rcSnap1.get(); Table snap1KeyTable = snap1.getMetadataManager().getFileTable(); try (TableIterator table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 6b39b76c5466..fac6764767f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -46,17 +47,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Directory Service. @@ -114,15 +115,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @SuppressWarnings("checkstyle:LineLength") @@ -258,15 +257,12 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index 8f11941fcbf6..dff4cd046c9b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -66,7 +66,7 @@ void setup() throws Exception { String omServiceId = "omServiceId1"; OzoneConfiguration conf = new OzoneConfiguration(); String scmServiceId = "scmServiceId"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index 7691704d924c..9fcb82fd4b6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -246,7 +246,7 @@ void testNodesInDecommissionOrMaintenance( TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline1))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, @@ -273,7 +273,7 @@ void testNodesInDecommissionOrMaintenance( TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline2))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 4c059be1b542..ca8fcae6643b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -54,6 +54,7 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -61,6 +62,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.fail; @@ -127,15 +129,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } /** @@ -461,21 +461,19 @@ private void assertTableRowCount(Table table, int expectedCount, private boolean assertTableRowCount(int expectedCount, Table table, boolean isRecon) { - long count = 0L; - try { + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { if (isRecon) { - count = cluster.getReconServer().getOzoneManagerServiceProvider() - .getOMMetadataManagerInstance().countRowsInTable(table); + count.set(cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance().countRowsInTable(table)); } else { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + count.set(cluster.getOzoneManager().getMetadataManager() + .countRowsInTable(table)); } LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("Test failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void syncDataFromOM() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 44385698c5c3..cba7311b3b4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -69,11 +69,11 @@ public void init() throws Exception { conf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s"); ReconTaskConfig taskConfig = conf.getObject(ReconTaskConfig.class); - taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(15)); + taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(10)); conf.setFromObject(taskConfig); conf.set("ozone.scm.stale.node.interval", "6s"); - conf.set("ozone.scm.dead.node.interval", "10s"); + conf.set("ozone.scm.dead.node.interval", "8s"); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .includeRecon(true).build(); cluster.waitForClusterToBeReady(); @@ -246,6 +246,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.size() == 1); }); + // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { @@ -274,6 +275,26 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.isEmpty()); }); + // Now remove keys from container. This data is used to + // identify if container is empty in terms of keys mapped to container. + try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + reconContainerMetadataManager + .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 0L); + reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); + } + + // Check existing container state in UNHEALTHY_CONTAINER table + // will be updated as EMPTY_MISSING + LambdaTestUtils.await(25000, 1000, () -> { + List allEmptyMissingContainers = + reconContainerManager.getContainerSchemaManager() + .getUnhealthyContainers( + ContainerSchemaDefinition.UnHealthyContainerStates. + EMPTY_MISSING, + 0, 1000); + return (allEmptyMissingContainers.size() == 1); + }); + // Now restart the cluster and verify the container is no longer missing. cluster.restartHddsDatanode(pipeline.getFirstNode(), true); LambdaTestUtils.await(25000, 1000, () -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 9589b1c40056..d52b0e99b2fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.HashMap; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; @@ -46,6 +47,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -65,8 +67,6 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -202,7 +202,7 @@ public void testOmDBSyncing() throws Exception { // verify sequence number after full snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); //add 4 keys to check for delta updates addKeys(1, 5); @@ -220,7 +220,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson( taskStatusResponse, @@ -260,7 +260,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); } // This test simulates the mis-match in sequence number between Recon OM @@ -314,7 +314,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { // verify sequence number after incremental delta snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); String volume = "vol15"; String bucket = "bucket15"; @@ -356,7 +356,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() .getLatestSequenceNumber(); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); assertEquals(omLatestSeqNumber, reconLatestSeqNumber); reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() @@ -381,16 +381,23 @@ private static OmKeyLocationInfoGroup getOmKeyLocationInfoGroup() { private long getReconTaskAttributeFromJson(String taskStatusResponse, String taskName, - String entityAttribute) { - ArrayList taskStatusList = new Gson() - .fromJson(taskStatusResponse, ArrayList.class); - Optional taskEntity = - taskStatusList - .stream() - .filter(task -> task.get("taskName").equals(taskName)) - .findFirst(); - assertTrue(taskEntity.isPresent()); - return (long) (double) taskEntity.get().get(entityAttribute); + String entityAttribute) + throws IOException { + List> taskStatusList = + JsonUtils.readTreeAsListOfMaps(taskStatusResponse); + + // Stream through the list to find the task entity matching the taskName + Optional> taskEntity = taskStatusList.stream() + .filter(task -> taskName.equals(task.get("taskName"))) + .findFirst(); + + if (taskEntity.isPresent()) { + Number number = (Number) taskEntity.get().get(entityAttribute); + return number.longValue(); + } else { + throw new IOException( + "Task entity for task name " + taskName + " not found"); + } } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 8baad9cb97b4..0d7cb5fbf075 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -73,12 +73,12 @@ public void setup() throws Exception { dbConf.setSyncOption(true); conf.setFromObject(dbConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(OM_SERVICE_ID) - .setNumDatanodes(1) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(3) - .includeRecon(true) - .build(); + .setNumDatanodes(1) + .includeRecon(true); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 9d0552a169fe..6f6c5439d8c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -96,7 +96,7 @@ public void init() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index a79e2de245da..3a9f7e322b9e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -80,7 +80,6 @@ public class TestOzoneContainerUpgradeShell { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class); - private static String omServiceId; private static MiniOzoneCluster cluster = null; private static OzoneClient client; private static OzoneConfiguration conf = null; @@ -88,12 +87,7 @@ public class TestOzoneContainerUpgradeShell { private static final String BUCKET_NAME = UUID.randomUUID().toString(); protected static void startCluster() throws Exception { - // Init HA cluster - omServiceId = "om-service-test-upgrade-container1"; - final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index b50cea759ea4..15d9746fcb6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -88,7 +88,6 @@ protected static void startCluster() throws Exception { omServiceId = "om-service-test1"; final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 6eb892659820..085858f71179 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -63,6 +64,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; @@ -77,14 +79,17 @@ import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; - import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -104,6 +109,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -130,7 +137,7 @@ public class TestOzoneShellHA { private static File baseDir; private static File testFile; private static String testFilePathString; - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static File testDir; private static MiniKMS miniKMS; private static OzoneClient client; @@ -186,11 +193,12 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { final int numDNs = 5; conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(numDNs) - .build(); + .setNumDatanodes(numDNs); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } @@ -287,8 +295,7 @@ private void executeWithError(OzoneShell shell, String[] args, * @return the leader OM's Node ID in the MiniOzoneHACluster. */ private String getLeaderOMNodeId() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); return omLeader.getOMNodeId(); } @@ -884,6 +891,34 @@ public void testLinkBucketOrphan() throws Exception { } } + @Test + @Timeout(10) + public void testListBucket() throws Exception { + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + OzoneConfiguration clientConf = + getClientConfForOFS(hostPrefix, cluster.getConf()); + int pageSize = 20; + clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); + URI uri = FileSystem.getDefaultUri(clientConf); + clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + OzoneFsShell shell = new OzoneFsShell(clientConf); + + String volName = "testlistbucket"; + int numBuckets = pageSize; + + try { + generateBuckets("/" + volName, numBuckets); + out.reset(); + int res = ToolRunner.run(shell, new String[]{"-ls", "/" + volName}); + assertEquals(0, res); + String r = out.toString(DEFAULT_ENCODING); + assertThat(r).matches("(?s)^Found " + numBuckets + " items.*"); + + } finally { + shell.close(); + } + } + @Test public void testDeleteTrashNoSkipTrash() throws Exception { @@ -1975,9 +2010,10 @@ public void testVolumeListKeys() OMException exception = (OMException) execution.getCause(); assertEquals(VOLUME_NOT_FOUND, exception.getResult()); } - - @Test - public void testRecursiveVolumeDelete() + + @ParameterizedTest + @ValueSource(ints = {1, 5}) + public void testRecursiveVolumeDelete(int threadCount) throws Exception { String volume1 = "volume10"; String volume2 = "volume20"; @@ -1986,47 +2022,19 @@ public void testRecursiveVolumeDelete() // Create bucket bucket1 with layout FILE_SYSTEM_OPTIMIZED // Insert some keys into it generateKeys(OZONE_URI_DELIMITER + volume1, - "/bucketfso", + "/fsobucket1", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create another volume volume2 with bucket and some keys into it. + // Create another volume volume2 with bucket and some keys into it. generateKeys(OZONE_URI_DELIMITER + volume2, "/bucket2", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create OBS bucket in volume1 - String[] args = new String[] {"bucket", "create", "--layout", - BucketLayout.OBJECT_STORE.toString(), volume1 + "/bucketobs"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into OBS bucket - String keyName = OZONE_URI_DELIMITER + volume1 + "/bucketobs" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); - - // Create Legacy bucket in volume1 - args = new String[] {"bucket", "create", "--layout", - BucketLayout.LEGACY.toString(), volume1 + "/bucketlegacy"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into legacy bucket - keyName = OZONE_URI_DELIMITER + volume1 + "/bucketlegacy" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); + createBucketAndGenerateKeys(volume1, FILE_SYSTEM_OPTIMIZED, "fsobucket2"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket1"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket2"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket1"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket2"); // Try volume delete without recursive // It should fail as volume is not empty @@ -2041,22 +2049,50 @@ public void testRecursiveVolumeDelete() assertEquals(client.getObjectStore().getVolume(volume1) .getName(), volume1); - // Delete volume1(containing OBS, FSO and Legacy buckets) recursively - args = - new String[] {"volume", "delete", volume1, "-r", "--yes"}; + // Delete volume1(containing OBS, FSO and Legacy buckets) recursively with thread count + String[] args = new String[] {"volume", "delete", volume1, "-r", "--yes", "-t", String.valueOf(threadCount)}; execute(ozoneShell, args); out.reset(); + // volume1 should not exist + omExecution = assertThrows(OMException.class, + () -> client.getObjectStore().getVolume(volume1)); + assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); + // volume2 should still exist assertEquals(client.getObjectStore().getVolume(volume2) .getName(), volume2); - // volume1 should not exist + // Delete volume2 recursively + args = new String[] {"volume", "delete", volume2, "-r", "--yes"}; + execute(ozoneShell, args); + out.reset(); + + // volume2 should not exist omExecution = assertThrows(OMException.class, - () -> client.getObjectStore().getVolume(volume1)); + () -> client.getObjectStore().getVolume(volume2)); assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); } + private void createBucketAndGenerateKeys(String volume, BucketLayout layout, String bucketName) { + // Create bucket + String[] args = new String[] {"bucket", "create", volume + "/" + bucketName, + "--layout", layout.toString()}; + execute(ozoneShell, args); + out.reset(); + + // Insert keys + String keyName = OZONE_URI_DELIMITER + volume + "/" + bucketName + + OZONE_URI_DELIMITER + "key"; + for (int i = 0; i < 5; i++) { + args = new String[] { + "key", "put", "o3://" + omServiceId + keyName + i, + testFile.getPath()}; + execute(ozoneShell, args); + } + out.reset(); + } + @Test public void testLinkedAndNonLinkedBucketMetaData() throws Exception { @@ -2117,6 +2153,43 @@ public void testLinkedAndNonLinkedBucketMetaData() out.reset(); } + @Test + public void testKeyDeleteLegacyWithEnableFileSystemPath() throws IOException { + String volumeName = "vol5"; + String bucketName = "legacybucket"; + String[] args = new String[] {"volume", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + volumeName}; + execute(ozoneShell, args); + + args = new String[] {"bucket", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + + volumeName + OZONE_URI_DELIMITER + bucketName, "--layout", BucketLayout.LEGACY.toString()}; + execute(ozoneShell, args); + + String dirPath = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + + bucketName + OZONE_URI_DELIMITER + "dir/"; + String keyPath = dirPath + "key1"; + + // Create key, it will generate two keys, one with dirPath other with keyPath + args = new String[] {"key", "put", "o3://" + omServiceId + keyPath, testFile.getPath()}; + execute(ozoneShell, args); + + // Enable fileSystem path for client config + String fileSystemEnable = generateSetConfString(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + // Delete dirPath key, it should fail + args = new String[] {fileSystemEnable, "key", "delete", dirPath}; + execute(ozoneShell, args); + + // Check number of keys + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + List files = bucket.listStatus("", true, "", 5); + // Two keys should still exist, dirPath and keyPath + assertEquals(2, files.size()); + + // cleanup + args = new String[] {"volume", "delete", volumeName, "-r", "--yes"}; + execute(ozoneShell, args); + } + private static String getKeyProviderURI(MiniKMS kms) { return KMSClientProvider.SCHEME_NAME + "://" + kms.getKMSUrl().toExternalForm().replace("://", "@"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 6abfbed2bd38..5d6475071419 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -96,8 +96,7 @@ public class TestOzoneTenantShell { private static final File AUDIT_LOG_FILE = new File("audit.log"); private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static MiniOzoneHAClusterImpl haCluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneShell ozoneSh = null; private static TenantShell tenantShell = null; @@ -153,12 +152,11 @@ public static void init() throws Exception { // Init cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .withoutDatanodes() // Remove this once we are actually writing data - .build(); - haCluster = (MiniOzoneHAClusterImpl) cluster; + .withoutDatanodes(); // Remove this once we are actually writing data + cluster = builder.build(); cluster.waitForClusterToBeReady(); } @@ -377,17 +375,17 @@ public void testAssignAdmin() throws IOException { executeHA(tenantShell, new String[] {"--verbose", "user", "assign-admin", tenantName + "$" + userName, "--tenant=" + tenantName, "--delegated=true"}); - checkOutput(out, "{\n" + " \"accessId\": \"devaa$alice\",\n" - + " \"tenantId\": \"devaa\",\n" + " \"isAdmin\": true,\n" - + " \"isDelegatedAdmin\": true\n" + "}\n", true, true); + checkOutput(out, "{\n" + " \"accessId\" : \"devaa$alice\",\n" + + " \"tenantId\" : \"devaa\",\n" + " \"isAdmin\" : true,\n" + + " \"isDelegatedAdmin\" : true\n" + "}\n", true, true); checkOutput(err, "", true); // Clean up executeHA(tenantShell, new String[] {"--verbose", "user", "revoke-admin", tenantName + "$" + userName, "--tenant=" + tenantName}); - checkOutput(out, "{\n" + " \"accessId\": \"devaa$alice\",\n" - + " \"tenantId\": \"devaa\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + "}\n", true, true); + checkOutput(out, "{\n" + " \"accessId\" : \"devaa$alice\",\n" + + " \"tenantId\" : \"devaa\",\n" + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + "}\n", true, true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -460,7 +458,7 @@ public void testOzoneTenantBasicOperations() throws IOException { executeHA(tenantShell, new String[] {"list", "--json"}); // Not checking the full output here - checkOutput(out, "\"tenantId\": \"dev\",", false); + checkOutput(out, "\"tenantId\" : \"dev\",", false); checkOutput(err, "", true); // Attempt user getsecret before assignment, should fail @@ -529,16 +527,26 @@ public void testOzoneTenantBasicOperations() throws IOException { executeHA(tenantShell, new String[] { "user", "info", "--json", "bob"}); - checkOutput(out, "{\n" + " \"user\": \"bob\",\n" + " \"tenants\": [\n" - + " {\n" + " \"accessId\": \"research$bob\",\n" - + " \"tenantId\": \"research\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + " },\n" + " {\n" - + " \"accessId\": \"finance$bob\",\n" - + " \"tenantId\": \"finance\",\n" + " \"isAdmin\": false,\n" - + " \"isDelegatedAdmin\": false\n" + " },\n" + " {\n" - + " \"accessId\": \"dev$bob\",\n" - + " \"tenantId\": \"dev\",\n" + " \"isAdmin\": true,\n" - + " \"isDelegatedAdmin\": true\n" + " }\n" + " ]\n" + "}\n", + checkOutput(out, + "{\n" + + " \"user\" : \"bob\",\n" + + " \"tenants\" : [ {\n" + + " \"accessId\" : \"research$bob\",\n" + + " \"tenantId\" : \"research\",\n" + + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + + " }, {\n" + + " \"accessId\" : \"finance$bob\",\n" + + " \"tenantId\" : \"finance\",\n" + + " \"isAdmin\" : false,\n" + + " \"isDelegatedAdmin\" : false\n" + + " }, {\n" + + " \"accessId\" : \"dev$bob\",\n" + + " \"tenantId\" : \"dev\",\n" + + " \"isAdmin\" : true,\n" + + " \"isDelegatedAdmin\" : true\n" + + " } ]\n" + + "}\n", true, true); checkOutput(err, "", true); @@ -641,7 +649,7 @@ public void testOzoneTenantBasicOperations() throws IOException { // Because InMemoryMultiTenantAccessController is used in OMs for this // integration test, we need to trigger BG sync on all OMs just // in case a leader changed right after the last operation. - haCluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() + cluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() .getOMRangerBGSyncService().triggerRangerSyncOnce()); // Delete dev volume should fail because the volume reference count > 0L @@ -664,8 +672,8 @@ public void testOzoneTenantBasicOperations() throws IOException { // Then delete tenant, should succeed executeHA(tenantShell, new String[] {"--verbose", "delete", "dev"}); - checkOutput(out, "{\n" + " \"tenantId\": \"dev\",\n" - + " \"volumeName\": \"dev\",\n" + " \"volumeRefCount\": 0\n" + "}\n", + checkOutput(out, "{\n" + " \"tenantId\" : \"dev\",\n" + + " \"volumeName\" : \"dev\",\n" + " \"volumeRefCount\" : 0\n" + "}\n", true, true); checkOutput(err, "Deleted tenant 'dev'.\n", false); deleteVolume("dev"); @@ -680,7 +688,7 @@ public void testOzoneTenantBasicOperations() throws IOException { public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] {"--verbose", "create", "tenant1"}); checkOutput(out, "{\n" + - " \"tenantId\": \"tenant1\"\n" + "}\n", true, true); + " \"tenantId\" : \"tenant1\"\n" + "}\n", true, true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -704,10 +712,14 @@ public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] { "user", "list", "tenant1", "--json"}); - checkOutput(out, "[\n" + " {\n" + " \"user\": \"bob\",\n" - + " \"accessId\": \"tenant1$bob\"\n" + " },\n" + " {\n" - + " \"user\": \"alice\",\n" + " \"accessId\": \"tenant1$alice\"\n" - + " }\n" + "]\n", true); + checkOutput(out, + "[ {\n" + + " \"user\" : \"bob\",\n" + + " \"accessId\" : \"tenant1$bob\"\n" + + "}, {\n" + + " \"user\" : \"alice\",\n" + + " \"accessId\" : \"tenant1$alice\"\n" + + "} ]\n", true); checkOutput(err, "", true); executeHA(tenantShell, new String[] { @@ -718,8 +730,10 @@ public void testListTenantUsers() throws IOException { executeHA(tenantShell, new String[] { "user", "list", "tenant1", "--prefix=b", "--json"}); - checkOutput(out, "[\n" + " {\n" + " \"user\": \"bob\",\n" - + " \"accessId\": \"tenant1$bob\"\n" + " }\n" + "]\n", true); + checkOutput(out, "[ {\n" + + " \"user\" : \"bob\",\n" + + " \"accessId\" : \"tenant1$bob\"\n" + + "} ]\n", true); checkOutput(err, "", true); int exitCode = executeHA(tenantShell, new String[] { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 427b36d9a952..97a43c248a14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -67,7 +67,7 @@ public class TestReconfigShell { public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .setNumOfStorageContainerManagers(1) @@ -95,7 +95,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { HddsDatanodeClientProtocolServer server = datanodeService.getClientProtocolServer(); InetSocketAddress socket = server.getClientRpcAddress(); - executeAndAssertProperties(datanodeService.getReconfigurationHandler(), + executeAndAssertProperties(datanodeService.getReconfigurationHandler(), "--service=DATANODE", socket, capture); } } @@ -105,7 +105,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { public void testOzoneManagerGetReconfigurationProperties() throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = ozoneManager.getOmRpcServerAddr(); - executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), + executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), "--service=OM", socket, capture); } } @@ -116,17 +116,17 @@ public void testStorageContainerManagerGetReconfigurationProperties() try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = storageContainerManager.getClientRpcAddress(); executeAndAssertProperties( - storageContainerManager.getReconfigurationHandler(), socket, capture); + storageContainerManager.getReconfigurationHandler(), "--service=SCM", socket, capture); } } private void executeAndAssertProperties( - ReconfigurableBase reconfigurableBase, + ReconfigurableBase reconfigurableBase, String service, InetSocketAddress socket, SystemOutCapturer capture) throws UnsupportedEncodingException { String address = socket.getHostString() + ":" + socket.getPort(); ozoneAdmin.execute( - new String[] {"reconfig", "--address", address, "properties"}); + new String[] {"reconfig", service, "--address", address, "properties"}); assertReconfigurablePropertiesOutput( reconfigurableBase.getReconfigurableProperties(), capture.getOutput()); } @@ -171,7 +171,7 @@ private void executeAndAssertBulkReconfigCount(int except) throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { ozoneAdmin.execute(new String[] { - "reconfig", "--in-service-datanodes", "properties"}); + "reconfig", "--service=DATANODE", "--in-service-datanodes", "properties"}); String output = capture.getOutput(); assertThat(capture.getOutput()).contains(String.format("successfully %d", except)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 2e1b7a78736f..c1d55accfd70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -45,13 +45,13 @@ public static void init() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); conf.setQuietMode(false); // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index 62d50708c83a..d3d7c7766e7b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -66,7 +66,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java index 8985af2ac56a..71f1b682d0f4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/tools/contract/AbstractContractDistCpTest.java @@ -55,6 +55,7 @@ import org.apache.hadoop.tools.util.DistCpTestUtils; import org.apache.hadoop.util.functional.RemoteIterators; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; @@ -184,6 +185,7 @@ public void setup() throws Exception { remoteFS.delete(remoteDir, true); } + @AfterEach @Override public void teardown() throws Exception { // if remote FS supports IOStatistics log it. diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 4e79ae97fc24..779ed2b785cb 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -37,7 +37,7 @@ - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 4 @@ -52,10 +52,25 @@ - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled true + + + hdds.heartbeat.interval + 1s + + + ozone.scm.heartbeat.thread.interval + 100ms + + + + ozone.scm.ratis.pipeline.limit + 3 + + ozone.scm.close.container.wait.duration 1s @@ -67,7 +82,8 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 8MB diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 2c1e03ce3f86..b92de2f5bc1e 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -44,6 +44,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9cafd9b31b85..2106628a568c 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -146,6 +146,7 @@ enum Type { SetSnapshotProperty = 128; ListStatusLight = 129; GetSnapshotInfo = 130; + RenameSnapshot = 131; ListOpenFiles = 132; } @@ -283,8 +284,9 @@ message OMRequest { optional MultipartUploadsExpiredAbortRequest multipartUploadsExpiredAbortRequest = 126; optional SetSnapshotPropertyRequest SetSnapshotPropertyRequest = 127; optional SnapshotInfoRequest SnapshotInfoRequest = 128; + optional RenameSnapshotRequest RenameSnapshotRequest = 129; - optional ListOpenFilesRequest ListOpenFilesRequest = 132; + optional ListOpenFilesRequest ListOpenFilesRequest = 130; } message OMResponse { @@ -407,8 +409,9 @@ message OMResponse { optional ListStatusLightResponse listStatusLightResponse = 129; optional SnapshotInfoResponse SnapshotInfoResponse = 130; optional OMLockDetailsProto omLockDetails = 131; + optional RenameSnapshotResponse RenameSnapshotResponse = 132; - optional ListOpenFilesResponse ListOpenFilesResponse = 132; + optional ListOpenFilesResponse ListOpenFilesResponse = 133; } enum Status { @@ -1614,8 +1617,9 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. optional string partName = 1; + // This one is returned as Etag for S3. + optional string eTag = 2; } message MultipartUploadCompleteRequest { @@ -1633,6 +1637,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; required string partName = 2; + optional string eTag = 3; } message MultipartUploadAbortRequest { @@ -1705,6 +1710,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; + optional string eTag = 5; } /** @@ -1857,6 +1863,14 @@ message CreateSnapshotRequest { optional uint64 creationTime = 5; } +message RenameSnapshotRequest { + optional string volumeName = 1; + optional string bucketName = 2; + optional string snapshotOldName = 3; + optional string snapshotNewName = 4; + optional uint64 renameTime = 5; +} + message ListSnapshotRequest { optional string volumeName = 1; optional string bucketName = 2; @@ -2019,6 +2033,10 @@ message DeleteSnapshotResponse { } +message RenameSnapshotResponse { + optional SnapshotInfo snapshotInfo = 1; +} + message SnapshotInfoResponse { optional SnapshotInfo snapshotInfo = 1; } @@ -2117,6 +2135,7 @@ message RecoverLeaseRequest { message RecoverLeaseResponse { optional bool response = 1 [deprecated=true]; optional KeyInfo keyInfo = 2; + optional bool isKeyInfo = 3 [default = true]; } message SetTimesRequest { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 4cc76868f745..604d550f7b84 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -25,13 +25,11 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedPrefixInfo; -import java.util.BitSet; -import java.util.HashMap; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; /** * Wrapper class for Ozone prefix path info, currently mainly target for ACL but @@ -48,16 +46,13 @@ public static Codec getCodec() { return CODEC; } - private String name; - private List acls; + private final String name; + private final List acls; - public OmPrefixInfo(String name, List acls, - Map metadata, long objectId, long updateId) { - this.name = name; - this.acls = acls; - this.metadata = metadata; - this.objectID = objectId; - this.updateID = updateId; + private OmPrefixInfo(Builder b) { + super(b); + name = b.name; + acls = new ArrayList<>(b.acls); } /** @@ -100,17 +95,19 @@ public static OmPrefixInfo.Builder newBuilder() { /** * Builder for OmPrefixInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String name; - private List acls; - private Map metadata; - private long objectID; - private long updateID; + private final List acls; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); + } + + public Builder(OmPrefixInfo obj) { + super(obj); + setName(obj.name); + acls = new ArrayList<>(obj.getAcls()); } public Builder setAcls(List listOfAcls) { @@ -125,26 +122,28 @@ public Builder setName(String n) { return this; } + @Override public OmPrefixInfo.Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public OmPrefixInfo.Builder addAllMetadata( Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -154,7 +153,7 @@ public Builder setUpdateID(long id) { */ public OmPrefixInfo build() { Preconditions.checkNotNull(name); - return new OmPrefixInfo(name, acls, metadata, objectID, updateID); + return new OmPrefixInfo(this); } } @@ -164,9 +163,9 @@ public OmPrefixInfo build() { public PersistedPrefixInfo getProtobuf() { PersistedPrefixInfo.Builder pib = PersistedPrefixInfo.newBuilder().setName(name) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); if (acls != null) { pib.addAllAcls(OzoneAclStorageUtil.toProtobuf(acls)); } @@ -210,14 +209,14 @@ public boolean equals(Object o) { OmPrefixInfo that = (OmPrefixInfo) o; return name.equals(that.name) && Objects.equals(acls, that.acls) && - Objects.equals(metadata, that.metadata) && - objectID == that.objectID && - updateID == that.updateID; + Objects.equals(getMetadata(), that.getMetadata()) && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID(); } @Override public int hashCode() { - return Objects.hash(name, acls, metadata, objectID, updateID); + return Objects.hash(name, acls, getMetadata(), getObjectID(), getUpdateID()); } @Override @@ -225,9 +224,9 @@ public String toString() { return "OmPrefixInfo{" + "name='" + name + '\'' + ", acls=" + acls + - ", metadata=" + metadata + - ", objectID=" + objectID + - ", updateID=" + updateID + + ", metadata=" + getMetadata() + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + '}'; } @@ -235,16 +234,11 @@ public String toString() { * Return a new copy of the object. */ public OmPrefixInfo copyObject() { - List aclList = acls.stream().map(acl -> - new OzoneAcl(acl.getType(), acl.getName(), - (BitSet) acl.getAclBitSet().clone(), acl.getAclScope())) - .collect(Collectors.toList()); + return toBuilder().build(); + } - Map metadataList = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> metadataList.put(k, v)); - } - return new OmPrefixInfo(name, aclList, metadataList, objectID, updateID); + public Builder toBuilder() { + return new Builder(this); } } diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java index abc6359efcaf..cb9bdc2b4be4 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java @@ -18,9 +18,15 @@ package org.apache.hadoop.ozone.om.helpers; import com.google.protobuf.ByteString; + import java.util.BitSet; +import java.util.EnumSet; +import java.util.List; +import java.util.stream.Collectors; + import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneAcl.AclScope; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.OzoneAclInfo; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclScope; @@ -49,15 +55,18 @@ public static OzoneAclInfo toProtobuf(OzoneAcl acl) { .setName(acl.getName()) .setType(OzoneAclType.valueOf(acl.getType().name())) .setAclScope(OzoneAclScope.valueOf(acl.getAclScope().name())) - .setRights(ByteString.copyFrom(acl.getAclBitSet().toByteArray())); + .setRights(ByteString.copyFrom(acl.getAclByteArray())); return builder.build(); } public static OzoneAcl fromProtobuf(OzoneAclInfo protoAcl) { BitSet aclRights = BitSet.valueOf(protoAcl.getRights().toByteArray()); + List aclTypeList = aclRights.stream() + .mapToObj(a -> IAccessAuthorizer.ACLType.values()[a]) + .collect(Collectors.toList()); + EnumSet aclSet = EnumSet.copyOf(aclTypeList); return new OzoneAcl(ACLIdentityType.valueOf(protoAcl.getType().name()), - protoAcl.getName(), aclRights, - AclScope.valueOf(protoAcl.getAclScope().name())); + protoAcl.getName(), AclScope.valueOf(protoAcl.getAclScope().name()), aclSet); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 5226f315c8be..07eed9a53997 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -25,8 +25,8 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.junit.jupiter.api.Test; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -40,7 +40,7 @@ public class TestOmPrefixInfo { private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( String aclString) { OzoneAcl oacl = OzoneAcl.parseAcl(aclString); - ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); + ByteString rights = ByteString.copyFrom(oacl.getAclByteArray()); return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) .setName(oacl.getName()) @@ -73,10 +73,14 @@ private OmPrefixInfo getOmPrefixInfoForTest(String path, String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - aclType, scope)), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } @@ -97,7 +101,7 @@ public void testCopyObject() { // Change acls and check. omPrefixInfo.addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, username, - IAccessAuthorizer.ACLType.READ, ACCESS)); + ACCESS, IAccessAuthorizer.ACLType.READ)); assertNotEquals(omPrefixInfo, clonePrefixInfo); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java index f3ad1d8c7628..4820b37e1ba7 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfoCodec.java @@ -45,7 +45,7 @@ public void testToAndFromPersistedFormat() throws IOException { List acls = new LinkedList<>(); OzoneAcl ozoneAcl = new OzoneAcl(ACLIdentityType.USER, - "hive", ACLType.ALL, ACCESS); + "hive", ACCESS, ACLType.ALL); acls.add(ozoneAcl); OmPrefixInfo opiSave = OmPrefixInfo.newBuilder() .setName("/user/hive/warehouse") diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index d076e12932d0..5af36fb2291d 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -231,11 +231,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.jmockit - jmockit - test - diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index e641d132702d..54de09f3328b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -98,6 +98,7 @@ public enum OMAction implements AuditAction { CREATE_SNAPSHOT, LIST_SNAPSHOT, DELETE_SNAPSHOT, + RENAME_SNAPSHOT, SNAPSHOT_MOVE_DELETED_KEYS, SNAPSHOT_INFO, SET_TIMES, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 5bc894b2b922..68429c36d084 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -154,7 +154,8 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) && context.getAclRights() != ACLType.READ); if (bucketNeedResolved || - ozObject.getResourceType() == OzoneObj.ResourceType.KEY) { + ozObject.getResourceType() == OzoneObj.ResourceType.KEY || + ozObject.getResourceType() == OzoneObj.ResourceType.PREFIX) { try { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java new file mode 100644 index 000000000000..d5916c6adc5d --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketUtilizationMetrics.java @@ -0,0 +1,118 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; + +import java.util.Iterator; +import java.util.Map.Entry; + +/** + * A class for collecting and reporting bucket utilization metrics. + *

    + * Available metrics: + *

      + *
    • Bytes used in bucket. + *
    • Bucket quote in bytes. + *
    • Bucket quota in namespace. + *
    • Bucket available space. Calculated from difference between used bytes in bucket and bucket quota. + * If bucket quote is not set then this metric show -1 as value. + *
    + */ +@InterfaceAudience.Private +@Metrics(about = "Ozone Bucket Utilization Metrics", context = OzoneConsts.OZONE) +public class BucketUtilizationMetrics implements MetricsSource { + + private static final String SOURCE = BucketUtilizationMetrics.class.getSimpleName(); + + private final OMMetadataManager metadataManager; + + public BucketUtilizationMetrics(OMMetadataManager metadataManager) { + this.metadataManager = metadataManager; + } + + public static BucketUtilizationMetrics create(OMMetadataManager metadataManager) { + MetricsSystem ms = DefaultMetricsSystem.instance(); + return ms.register(SOURCE, "Bucket Utilization Metrics", new BucketUtilizationMetrics(metadataManager)); + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + Iterator, CacheValue>> bucketIterator = metadataManager.getBucketIterator(); + + while (bucketIterator.hasNext()) { + Entry, CacheValue> entry = bucketIterator.next(); + OmBucketInfo bucketInfo = entry.getValue().getCacheValue(); + if (bucketInfo == null) { + continue; + } + + long availableSpace; + long quotaInBytes = bucketInfo.getQuotaInBytes(); + if (quotaInBytes == -1) { + availableSpace = quotaInBytes; + } else { + availableSpace = Math.max(bucketInfo.getQuotaInBytes() - bucketInfo.getUsedBytes(), 0); + } + + collector.addRecord(SOURCE) + .setContext("Bucket metrics") + .tag(BucketMetricsInfo.VolumeName, bucketInfo.getVolumeName()) + .tag(BucketMetricsInfo.BucketName, bucketInfo.getBucketName()) + .addGauge(BucketMetricsInfo.BucketUsedBytes, bucketInfo.getUsedBytes()) + .addGauge(BucketMetricsInfo.BucketQuotaBytes, bucketInfo.getQuotaInBytes()) + .addGauge(BucketMetricsInfo.BucketQuotaNamespace, bucketInfo.getQuotaInNamespace()) + .addGauge(BucketMetricsInfo.BucketAvailableBytes, availableSpace); + } + } + + public void unRegister() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + ms.unregisterSource(SOURCE); + } + + enum BucketMetricsInfo implements MetricsInfo { + VolumeName("Volume Metrics."), + BucketName("Bucket Metrics."), + BucketUsedBytes("Bytes used by bucket."), + BucketQuotaBytes("Bucket quote in bytes."), + BucketQuotaNamespace("Bucket quota in namespace."), + BucketAvailableBytes("Bucket available space."); + + private final String desc; + + BucketMetricsInfo(String desc) { + this.desc = desc; + } + + @Override + public String description() { + return desc; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 590fe9ef2725..2527f1ec9a6c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.Stack; import java.util.TreeMap; @@ -44,10 +45,15 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -56,6 +62,9 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.net.CachedDNSToSwitchMapping; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.TableMapping; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -94,6 +103,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -106,6 +116,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; +import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -121,6 +132,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -187,6 +199,7 @@ public class KeyManagerImpl implements KeyManager { private BackgroundService openKeyCleanupService; private BackgroundService multipartUploadCleanupService; private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; + private DNSToSwitchMapping dnsToSwitchMapping; public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, OMPerformanceMetrics metrics) { @@ -336,6 +349,16 @@ public void start(OzoneConfiguration configuration) { ozoneManager, configuration); multipartUploadCleanupService.start(); } + + Class dnsToSwitchMappingClass = + configuration.getClass( + DFSConfigKeysLegacy.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + TableMapping.class, DNSToSwitchMapping.class); + DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( + dnsToSwitchMappingClass, configuration); + dnsToSwitchMapping = + ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance + : new CachedDNSToSwitchMapping(newInstance)); } KeyProviderCryptoExtension getKMSProvider() { @@ -609,13 +632,17 @@ public ListKeysResult listKeys(String volumeName, String bucketName, int maxKeys) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - + OmBucketInfo omBucketInfo = getBucketInfo(volumeName, bucketName); + if (omBucketInfo == null) { + throw new OMException("Bucket " + bucketName + " not found.", + ResultCodes.BUCKET_NOT_FOUND); + } + BucketLayout bucketLayout = omBucketInfo.getBucketLayout(); // We don't take a lock in this path, since we walk the // underlying table using an iterator. That automatically creates a // snapshot of the data, so we don't need these locks at a higher level // when we iterate. - - if (enableFileSystemPaths) { + if (bucketLayout.shouldNormalizePaths(enableFileSystemPaths)) { startKey = OmUtils.normalizeKey(startKey, true); keyPrefix = OmUtils.normalizeKey(keyPrefix, true); } @@ -817,10 +844,19 @@ public OmMultipartUploadListParts listParts(String volumeName, if (nextPartNumberMarker > partNumberMarker) { String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName); + // Before HDDS-9680, MPU part does not have eTag metadata, for + // this case, we return null. The S3G will handle this case by + // using the MPU part name as the eTag field instead. + Optional eTag = partKeyInfo.getPartKeyInfo() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst(); OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); + partKeyInfo.getPartKeyInfo().getDataSize(), + eTag.map(HddsProtos.KeyValue::getValue).orElse(null)); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts @@ -907,12 +943,6 @@ private String getPartName(PartKeyInfo partKeyInfo, String volName, return partName; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); @@ -1834,8 +1864,7 @@ private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) return encInfo; } - @VisibleForTesting - void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { + private void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { if (keyInfos != null && clientMachine != null) { Map, List> sortedPipelines = new HashMap<>(); for (OmKeyInfo keyInfo : keyInfos) { @@ -1855,8 +1884,7 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.warn("No datanodes in pipeline {}", pipeline.getId()); continue; } - sortedNodes = sortDatanodes(clientMachine, nodes, keyInfo, - uuidList); + sortedNodes = sortDatanodes(nodes, clientMachine); if (sortedNodes != null) { sortedPipelines.put(uuidSet, sortedNodes); } @@ -1864,30 +1892,67 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.debug("Found sorted datanodes for pipeline {} and client {} " + "in cache", pipeline.getId(), clientMachine); } - pipeline.setNodesInOrder(sortedNodes); + if (!Objects.equals(pipeline.getNodesInOrder(), sortedNodes)) { + k.setPipeline(pipeline.copyWithNodesInOrder(sortedNodes)); + } } } } } - private List sortDatanodes(String clientMachine, - List nodes, OmKeyInfo keyInfo, List nodeList) { - List sortedNodes = null; + @VisibleForTesting + public List sortDatanodes(List nodes, + String clientMachine) { + final Node client = getClientNode(clientMachine, nodes); + return ozoneManager.getClusterMap() + .sortByDistanceCost(client, nodes, nodes.size()); + } + + private Node getClientNode(String clientMachine, + List nodes) { + List matchingNodes = new ArrayList<>(); + boolean useHostname = ozoneManager.getConfiguration().getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + for (DatanodeDetails node : nodes) { + if ((useHostname ? node.getHostName() : node.getIpAddress()).equals( + clientMachine)) { + matchingNodes.add(node); + } + } + return !matchingNodes.isEmpty() ? matchingNodes.get(0) : + getOtherNode(clientMachine); + } + + private Node getOtherNode(String clientMachine) { try { - sortedNodes = scmClient.getBlockClient() - .sortDatanodes(nodeList, clientMachine); - if (LOG.isDebugEnabled()) { - LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, - clientMachine, sortedNodes); + String clientLocation = resolveNodeLocation(clientMachine); + if (clientLocation != null) { + Node rack = ozoneManager.getClusterMap().getNode(clientLocation); + if (rack instanceof InnerNode) { + return new NodeImpl(clientMachine, clientLocation, + (InnerNode) rack, rack.getLevel() + 1, + NODE_COST_DEFAULT); + } } - } catch (IOException e) { - LOG.warn("Unable to sort datanodes based on distance to client, " - + " volume={}, bucket={}, key={}, client={}, datanodes={}, " - + " exception={}", - keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), clientMachine, nodeList, e.getMessage()); + } catch (Exception e) { + LOG.info("Could not resolve client {}: {}", + clientMachine, e.getMessage()); + } + return null; + } + + private String resolveNodeLocation(String hostname) { + List hosts = Collections.singletonList(hostname); + List resolvedHosts = dnsToSwitchMapping.resolve(hosts); + if (resolvedHosts != null && !resolvedHosts.isEmpty()) { + String location = resolvedHosts.get(0); + LOG.debug("Node {} resolved to location {}", hostname, location); + return location; + } else { + LOG.debug("Node resolution did not yield any result for {}", hostname); + return null; } - return sortedNodes; } private static List toNodeUuid(Collection nodes) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 7981222c4c6a..86d8352697ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -342,7 +342,7 @@ public HeapEntry next() { } public void close() throws IOException { - iterators.forEach(IOUtils::closeQuietly); + IOUtils.closeQuietly(iterators); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 2fbbbe153040..1c0ec78cfb22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -26,6 +26,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; /** * This class is for maintaining Ozone Manager statistics. @@ -74,7 +75,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotLists; private @Metric MutableCounterLong numSnapshotDiffJobs; private @Metric MutableCounterLong numSnapshotInfos; + private @Metric MutableCounterLong numSnapshotPurges; + private @Metric MutableCounterLong numSnapshotSetProperties; + private @Metric MutableGaugeInt numSnapshotCacheSize; private @Metric MutableCounterLong numGetFileStatus; private @Metric MutableCounterLong numCreateDirectory; private @Metric MutableCounterLong numCreateFile; @@ -137,6 +141,8 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotListFails; private @Metric MutableCounterLong numSnapshotDiffJobFails; private @Metric MutableCounterLong numSnapshotInfoFails; + private @Metric MutableCounterLong numSnapshotPurgeFails; + private @Metric MutableCounterLong numSnapshotSetPropertyFails; private @Metric MutableCounterLong numSnapshotActive; private @Metric MutableCounterLong numSnapshotDeleted; @@ -489,6 +495,14 @@ public void incNumSnapshotInfos() { numSnapshotInfos.incr(); } + public void incNumSnapshotPurges() { + numSnapshotPurges.incr(); + } + + public void incNumSnapshotSetProperties() { + numSnapshotSetProperties.incr(); + } + public void incNumSnapshotDiffJobs() { numSnapshotDiffJobs.incr(); } @@ -504,6 +518,15 @@ public void incNumSnapshotDiffJobFails() { public void incNumSnapshotInfoFails() { numSnapshotInfoFails.incr(); } + + public void incNumSnapshotPurgeFails() { + numSnapshotPurgeFails.incr(); + } + + public void incNumSnapshotSetPropertyFails() { + numSnapshotSetPropertyFails.incr(); + } + public void setNumSnapshotActive(long num) { long currVal = numSnapshotActive.value(); numSnapshotActive.incr(num - currVal); @@ -530,6 +553,17 @@ public void decNumSnapshotDeleted() { numSnapshotDeleted.incr(-1); } + public int getNumSnapshotCacheSize() { + return numSnapshotCacheSize.value(); + } + public void incNumSnapshotCacheSize() { + numSnapshotCacheSize.incr(); + } + + public void decNumSnapshotCacheSize() { + numSnapshotCacheSize.decr(); + } + public void incNumCompleteMultipartUploadFails() { numCompleteMultipartUploadFails.incr(); } @@ -1305,6 +1339,14 @@ public long getNumSnapshotDiffJobs() { return numSnapshotDiffJobs.value(); } + public long getNumSnapshotPurges() { + return numSnapshotPurges.value(); + } + + public long getNumSnapshotSetProperties() { + return numSnapshotSetProperties.value(); + } + public long getNumSnapshotCreateFails() { return numSnapshotCreateFails.value(); } @@ -1329,6 +1371,13 @@ public long getNumSnapshotDeleted() { return numSnapshotDeleted.value(); } + public long getNumSnapshotPurgeFails() { + return numSnapshotPurgeFails.value(); + } + + public long getNumSnapshotSetPropertyFails() { + return numSnapshotSetPropertyFails.value(); + } public void incNumTrashRenames() { numTrashRenames.incr(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index d118e2f4ecc9..f2f11025158d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -123,20 +123,20 @@ public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } - public MutableRate getLookupRefreshLocationLatencyNs() { + MutableRate getLookupRefreshLocationLatencyNs() { return lookupRefreshLocationLatencyNs; } - public MutableRate getLookupGenerateBlockTokenLatencyNs() { + MutableRate getLookupGenerateBlockTokenLatencyNs() { return lookupGenerateBlockTokenLatencyNs; } - public MutableRate getLookupReadKeyInfoLatencyNs() { + MutableRate getLookupReadKeyInfoLatencyNs() { return lookupReadKeyInfoLatencyNs; } - public MutableRate getLookupAclCheckLatencyNs() { + MutableRate getLookupAclCheckLatencyNs() { return lookupAclCheckLatencyNs; } @@ -144,7 +144,7 @@ public void addS3VolumeContextLatencyNs(long latencyInNs) { s3VolumeContextLatencyNs.add(latencyInNs); } - public MutableRate getLookupResolveBucketLatencyNs() { + MutableRate getLookupResolveBucketLatencyNs() { return lookupResolveBucketLatencyNs; } @@ -152,27 +152,27 @@ public void addGetKeyInfoLatencyNs(long value) { getKeyInfoLatencyNs.add(value); } - public MutableRate getGetKeyInfoAclCheckLatencyNs() { + MutableRate getGetKeyInfoAclCheckLatencyNs() { return getKeyInfoAclCheckLatencyNs; } - public MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { + MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { return getKeyInfoGenerateBlockTokenLatencyNs; } - public MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { + MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { return getKeyInfoReadKeyInfoLatencyNs; } - public MutableRate getGetKeyInfoRefreshLocationLatencyNs() { + MutableRate getGetKeyInfoRefreshLocationLatencyNs() { return getKeyInfoRefreshLocationLatencyNs; } - public MutableRate getGetKeyInfoResolveBucketLatencyNs() { + MutableRate getGetKeyInfoResolveBucketLatencyNs() { return getKeyInfoResolveBucketLatencyNs; } - public MutableRate getGetKeyInfoSortDatanodesLatencyNs() { + MutableRate getGetKeyInfoSortDatanodesLatencyNs() { return getKeyInfoSortDatanodesLatencyNs; } @@ -216,11 +216,11 @@ public MutableRate getValidateAndUpdateCacheLatencyNs() { return validateAndUpdateCacheLatencyNs; } - public MutableRate getListKeysAclCheckLatencyNs() { + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } - public MutableRate getListKeysResolveBucketLatencyNs() { + MutableRate getListKeysResolveBucketLatencyNs() { return listKeysResolveBucketLatencyNs; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java index 970cd8b95f16..66c525f0712a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.ozone.om.protocol.OMInterServiceProtocol; import org.apache.hadoop.ozone.om.protocol.OMAdminProtocol; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; @@ -31,6 +32,7 @@ import java.util.List; import java.util.function.Supplier; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys .OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL; @@ -61,7 +63,9 @@ public static OMPolicyProvider getInstance() { new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, OMInterServiceProtocol.class), new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, - OMAdminProtocol.class) + OMAdminProtocol.class), + new Service(OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6bcefc47cb71..afc9eae859a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -91,7 +91,6 @@ import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; @@ -111,7 +110,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; @@ -908,40 +906,6 @@ public long getOmEpoch() { return omEpoch; } - /** - * Returns true if the firstArray startsWith the bytes of secondArray. - * - * @param firstArray - Byte array - * @param secondArray - Byte array - * @return true if the first array bytes match the bytes in the second array. - */ - private boolean startsWith(byte[] firstArray, byte[] secondArray) { - - if (firstArray == null) { - // if both are null, then the arrays match, else if first is null and - // second is not, then this function returns false. - return secondArray == null; - } - - - if (secondArray != null) { - // If the second array is longer then first array cannot be starting with - // the bytes of second array. - if (secondArray.length > firstArray.length) { - return false; - } - - for (int ndx = 0; ndx < secondArray.length; ndx++) { - if (firstArray[ndx] != secondArray[ndx]) { - return false; - } - } - return true; //match, return true. - } - return false; // if first is not null and second is null, we define that - // array does not start with same chars. - } - /** * Given a volume, check if it is empty, i.e there are no buckets inside it. * We iterate in the bucket table and see if there is any key that starts with @@ -1632,7 +1596,7 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, OmBucketInfo bucketInfo = getBucketTable().get(bucketKey); // Get the latest snapshot in snapshot path. - try (ReferenceCounted + try (ReferenceCounted rcLatestSnapshot = getLatestActiveSnapshot( keySplit[1], keySplit[2], omSnapshotManager)) { @@ -1650,13 +1614,12 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, if (rcLatestSnapshot != null) { Table prevKeyTable = - ((OmSnapshot) rcLatestSnapshot.get()) + rcLatestSnapshot.get() .getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); Table prevDeletedTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedTable(); String prevKeyTableDBKey = getSnapshotRenamedTable() .get(dbRenameKey); String prevDelTableDBKey = getOzoneKey(info.getVolumeName(), @@ -1742,8 +1705,7 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, /** * Get the latest OmSnapshot for a snapshot path. */ - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( + public ReferenceCounted getLatestActiveSnapshot( String volumeName, String bucketName, OmSnapshotManager snapshotManager) throws IOException { @@ -1777,13 +1739,12 @@ IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( } } - Optional> rcOmSnapshot = + Optional> rcOmSnapshot = snapshotInfo.isPresent() ? Optional.ofNullable( - snapshotManager.checkForSnapshot(volumeName, + snapshotManager.getSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfo.get().getName()), - true) + snapshotInfo.get().getName()) ) : Optional.empty(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index 7c332788d28a..84a5148720b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -387,7 +387,7 @@ public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); String bucketName = obj.getBucketName(); String keyName = obj.getKeyName(); - if (obj.getResourceType() == ResourceType.KEY) { + if (obj.getResourceType() == ResourceType.KEY || obj.getResourceType() == ResourceType.PREFIX) { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( Pair.of(volumeName, bucketName)); volumeName = resolvedBucket.realVolume(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 2dab56ede67b..a3799b389c51 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -35,6 +35,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.UUID; import com.google.common.cache.RemovalListener; import org.apache.hadoop.hdds.StringUtils; @@ -244,7 +245,7 @@ public OmSnapshotManager(OzoneManager ozoneManager) { OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT); - CacheLoader loader = createCacheLoader(); + CacheLoader loader = createCacheLoader(); // TODO: [SNAPSHOT] Remove this if not going to make SnapshotCache impl // pluggable. @@ -271,10 +272,10 @@ public OmSnapshotManager(OzoneManager ozoneManager) { }; // Init snapshot cache - this.snapshotCache = new SnapshotCache(this, loader, softCacheSize); + this.snapshotCache = new SnapshotCache(loader, softCacheSize, ozoneManager.getMetrics()); this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, - ozoneManager, snapshotCache, snapDiffJobCf, snapDiffReportCf, + ozoneManager, snapDiffJobCf, snapDiffReportCf, columnFamilyOptions, codecRegistry); diffCleanupServiceInterval = ozoneManager.getConfiguration() @@ -325,19 +326,25 @@ public boolean canDisableFsSnapshot(OMMetadataManager ommm) { return isSnapshotInfoTableEmpty; } - private CacheLoader createCacheLoader() { - return new CacheLoader() { + private CacheLoader createCacheLoader() { + return new CacheLoader() { @Nonnull @Override - public OmSnapshot load(@Nonnull String snapshotTableKey) - throws IOException { - // Check if the snapshot exists - final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); + public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { + String snapshotTableKey = ((OmMetadataManagerImpl) ozoneManager.getMetadataManager()) + .getSnapshotChainManager() + .getTableKey(snapshotId); + + // SnapshotChain maintains in-memory reverse mapping of snapshotId to snapshotName based on snapshotInfoTable. + // So it should not happen ideally. + // If it happens, then either snapshot has been purged in between or SnapshotChain is corrupted + // and missing some entries which needs investigation. + if (snapshotTableKey == null) { + throw new IOException("No snapshot exist with snapshotId: " + snapshotId); + } - // Block snapshot from loading when it is no longer active e.g. DELETED, - // unless this is called from SnapshotDeletingService. - checkSnapshotActive(snapshotInfo, true); + final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); CacheValue cacheValue = ozoneManager.getMetadataManager() .getSnapshotInfoTable() @@ -365,7 +372,7 @@ public OmSnapshot load(@Nonnull String snapshotTableKey) try { // create the other manager instances based on snapshot // metadataManager - PrefixManagerImpl pm = new PrefixManagerImpl(snapshotMetadataManager, + PrefixManagerImpl pm = new PrefixManagerImpl(ozoneManager, snapshotMetadataManager, false); KeyManagerImpl km = new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), snapshotMetadataManager, conf, @@ -397,11 +404,32 @@ private static CodecRegistry createCodecRegistryForSnapDiff() { } /** - * Get snapshot instance LRU cache. - * @return LoadingCache + * Get snapshot instance LRU cache size. + * @return cache size. */ - public SnapshotCache getSnapshotCache() { - return snapshotCache; + @VisibleForTesting + public int getSnapshotCacheSize() { + return snapshotCache == null ? 0 : snapshotCache.size(); + } + + /** + * Immediately invalidate all entries and close their DB instances in cache. + */ + public void invalidateCache() { + if (snapshotCache != null) { + snapshotCache.invalidateAll(); + } + } + + /** + * Immediately invalidate an entry. + * + * @param key SnapshotId. + */ + public void invalidateCacheEntry(UUID key) throws IOException { + if (snapshotCache != null) { + snapshotCache.invalidate(key); + } } /** @@ -590,11 +618,11 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( } // Get OmSnapshot if the keyName has ".snapshot" key indicator - public ReferenceCounted checkForSnapshot( + @SuppressWarnings("unchecked") + public ReferenceCounted getActiveFsMetadataOrSnapshot( String volumeName, String bucketName, - String keyName, - boolean skipActiveCheck) throws IOException { + String keyName) throws IOException { if (keyName == null || !ozoneManager.isFilesystemSnapshotEnabled()) { return ozoneManager.getOmMetadataReader(); } @@ -603,31 +631,57 @@ public ReferenceCounted checkForSnapshot( String[] keyParts = keyName.split(OM_KEY_PREFIX); if (isSnapshotKey(keyParts)) { String snapshotName = keyParts[1]; - if (snapshotName == null || snapshotName.isEmpty()) { - // don't allow snapshot indicator without snapshot name - throw new OMException(INVALID_KEY_NAME); - } - String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - // Block FS API reads when snapshot is not active. - if (!skipActiveCheck) { - checkSnapshotActive(ozoneManager, snapshotTableKey); - } - - // Warn if actual cache size exceeds the soft limit already. - if (snapshotCache.size() > softCacheSize) { - LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", - snapshotCache.size(), softCacheSize); - } - - // retrieve the snapshot from the cache - return snapshotCache.get(snapshotTableKey, skipActiveCheck); + return (ReferenceCounted) (ReferenceCounted) + getActiveSnapshot(volumeName, bucketName, snapshotName); } else { return ozoneManager.getOmMetadataReader(); } } + public ReferenceCounted getActiveSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, false); + } + + public ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, true); + } + + private ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName, + boolean skipActiveCheck) throws IOException { + + if (snapshotName == null || snapshotName.isEmpty()) { + // don't allow snapshot indicator without snapshot name + throw new OMException(INVALID_KEY_NAME); + } + + String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + + return getSnapshot(snapshotTableKey, skipActiveCheck); + } + + private ReferenceCounted getSnapshot(String snapshotTableKey, boolean skipActiveCheck) + throws IOException { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, snapshotTableKey); + // Block FS API reads when snapshot is not active. + if (!skipActiveCheck) { + checkSnapshotActive(snapshotInfo, false); + } + + // retrieve the snapshot from the cache + return snapshotCache.get(snapshotInfo.getSnapshotId()); + } + /** * Returns true if the snapshot is in given status. * @param key DB snapshot table key @@ -894,9 +948,9 @@ public void close() { if (snapshotDiffManager != null) { snapshotDiffManager.close(); } - if (snapshotCache != null) { - snapshotCache.invalidateAll(); - } + + invalidateCache(); + if (snapshotDiffCleanupService != null) { snapshotDiffCleanupService.shutdown(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 4b654e3d195d..5966d969de70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -76,19 +76,22 @@ import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolOmPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.lock.OMLockDetails; @@ -102,7 +105,6 @@ import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.security.acl.OzoneAuthorizerFactory; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; @@ -360,6 +362,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; private SecretKeySignerClient secretKeyClient; + private ScmTopologyClient scmTopologyClient; private final Text omRpcAddressTxt; private OzoneConfiguration configuration; private RPC.Server omRpcServer; @@ -458,6 +461,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private final OzoneLockProvider ozoneLockProvider; private final OMPerformanceMetrics perfMetrics; + private final BucketUtilizationMetrics bucketUtilizationMetrics; private boolean fsSnapshotEnabled; @@ -490,7 +494,7 @@ private enum State { private OmMetadataReader omMetadataReader; // Wrap active DB metadata reader in ReferenceCounted once to avoid // instance creation every single time. - private ReferenceCounted rcOmMetadataReader; + private ReferenceCounted rcOmMetadataReader; private OmSnapshotManager omSnapshotManager; @SuppressWarnings("methodlength") @@ -609,6 +613,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) final StorageContainerLocationProtocol scmContainerClient = getScmContainerClient(configuration); // verifies that the SCM info in the OM Version file is correct. final ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(configuration); + scmTopologyClient = new ScmTopologyClient(scmBlockClient); this.scmClient = new ScmClient(scmBlockClient, scmContainerClient, configuration); this.ozoneLockProvider = new OzoneLockProvider(getKeyPathLockEnabled(), @@ -720,6 +725,8 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) } else { omState = State.INITIALIZED; } + + bucketUtilizationMetrics = BucketUtilizationMetrics.create(metadataManager); } public boolean isStopped() { @@ -845,7 +852,7 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { delegationTokenMgr = createDelegationTokenSecretManager(configuration); } - prefixManager = new PrefixManagerImpl(metadataManager, isRatisEnabled); + prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); accessAuthorizer = OzoneAuthorizerFactory.forOM(this); @@ -1141,6 +1148,20 @@ public void setCertClient(CertificateClient newClient) throws IOException { serviceInfo = new ServiceInfoProvider(secConfig, this, certClient); } + /** + * For testing purpose only. This allows setting up ScmBlockLocationClient + * without having to fully setup a working cluster. + */ + @VisibleForTesting + public void setScmTopologyClient( + ScmTopologyClient scmTopologyClient) { + this.scmTopologyClient = scmTopologyClient; + } + + public NetworkTopology getClusterMap() { + return scmTopologyClient.getClusterMap(); + } + /** * For testing purpose only. This allows testing token in integration test * without fully setting up a working secure cluster. @@ -1289,7 +1310,7 @@ private RPC.Server startRpcServer(OzoneConfiguration conf, interOMProtocolService, rpcServer); HddsServerUtil.addPBProtocol(conf, OMAdminProtocolPB.class, omAdminProtocolService, rpcServer); - HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolPB.class, + HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolOmPB.class, reconfigureProtocolService, rpcServer); if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, @@ -1636,13 +1657,12 @@ public void start() throws IOException { metadataManager.start(configuration); + startSecretManagerIfNecessary(); // Start Ratis services if (omRatisServer != null) { omRatisServer.start(); } - startSecretManagerIfNecessary(); - upgradeFinalizer.runPrefinalizeStateActions(omStorage, this); Integer layoutVersionInDB = getLayoutVersionInDB(); if (layoutVersionInDB == null || @@ -1677,6 +1697,13 @@ public void start() throws IOException { metricsTimer = new Timer(); metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period); + try { + scmTopologyClient.start(configuration); + } catch (IOException ex) { + LOG.error("Unable to initialize network topology schema file. ", ex); + throw new UncheckedIOException(ex); + } + keyManager.start(configuration); try { @@ -2159,15 +2186,16 @@ public long getObjectIdFromTxId(long trxnId) { long getLastTrxnIndexForNonRatis() throws IOException { TransactionInfo transactionInfo = TransactionInfo.readTransactionInfo(metadataManager); - // If the OMTransactionInfo does not exist in DB or if the term is not -1 - // (corresponding to non-Ratis cluster), return 0 so that new incoming + // If the OMTransactionInfo does not exist in DB, return 0 so that new incoming // requests can have transaction index starting from 1. - if (transactionInfo == null || transactionInfo.getTerm() != -1) { + if (transactionInfo == null) { return 0; } - // If there exists a last transaction index in DB, the new incoming - // requests in non-Ratis cluster must have transaction index - // incrementally increasing from the stored transaction index onwards. + // If there exists a last transaction index in DB, including two cases: + // 1. transactionInfo.getTerm() == -1 corresponds to a non-Ratis cluster + // 2. transactionInfo.getTerm() != -1 indicates that the DB may be migrated from Ratis cluster + // For both cases above, the new incoming requests in non-Ratis cluster must have + // transaction index incrementally increasing from the stored transaction index onwards. return transactionInfo.getTransactionIndex(); } @@ -2237,6 +2265,11 @@ public boolean stop() { } keyManager.stop(); stopSecretManager(); + + if (scmTopologyClient != null) { + scmTopologyClient.stop(); + } + if (httpServer != null) { httpServer.stop(); } @@ -2266,6 +2299,10 @@ public boolean stop() { OMHAMetrics.unRegister(); } omRatisServer = null; + + if (bucketUtilizationMetrics != null) { + bucketUtilizationMetrics.unRegister(); + } return true; } catch (Exception e) { LOG.error("OzoneManager stop failed.", e); @@ -2585,8 +2622,7 @@ public boolean getAllowListAllVolumes() { return allowListAllVolumes; } - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getOmMetadataReader() { + public ReferenceCounted getOmMetadataReader() { return rcOmMetadataReader; } @@ -2856,8 +2892,7 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) */ @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - try (ReferenceCounted - rcReader = getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupKey(args); } } @@ -2869,8 +2904,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, boolean assumeS3Context) throws IOException { - try (ReferenceCounted rcReader = - getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getKeyInfo(args, assumeS3Context); } } @@ -2882,7 +2916,7 @@ public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, public ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(volumeName, bucketName, keyPrefix)) { return rcReader.get().listKeys( volumeName, bucketName, startKey, keyPrefix, maxKeys); @@ -3720,7 +3754,7 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, */ @Override public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getFileStatus(args); } @@ -3731,7 +3765,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { */ @Override public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupFile(args); } @@ -3750,7 +3784,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().listStatus( args, recursive, startKey, numEntries, allowPartialPrefixes); @@ -3774,7 +3808,7 @@ public List listStatusLight(OmKeyArgs args, */ @Override public List getAcl(OzoneObj obj) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(obj)) { return rcReader.get().getAcl(obj); } @@ -3842,7 +3876,7 @@ TermIndex installCheckpoint(String leaderId, Path checkpointLocation, keyManager.stop(); stopSecretManager(); stopTrashEmptier(); - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // Pause the State Machine so that no new transactions can be applied. // This action also clears the OM Double Buffer so that if there are any // pending transactions in the buffer, they are discarded. @@ -4089,7 +4123,7 @@ private void reloadOMState() throws IOException { startSecretManagerIfNecessary(); startTrashEmptier(configuration); - // Set metrics and start metrics back ground thread + // Set metrics and start metrics background thread metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager .getVolumeTable())); metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager @@ -4103,7 +4137,7 @@ private void reloadOMState() throws IOException { metrics.setNumFiles(metadataManager .countEstimatedRowsInTable(metadataManager.getFileTable())); - // Delete the omMetrics file if it exists and save the a new metrics file + // Delete the omMetrics file if it exists and save a new metrics file // with new data Files.deleteIfExists(getMetricsStorageFile().toPath()); saveOmMetrics(); @@ -4546,13 +4580,13 @@ private OmVolumeArgs createS3VolumeContext(String s3Volume, List listOfAcls = new ArrayList<>(); //User ACL listOfAcls.add(new OzoneAcl(ACLIdentityType.USER, - userName, ACLType.ALL, ACCESS)); + userName, ACCESS, ACLType.ALL)); //Group ACLs of the User List userGroups = Arrays.asList(UserGroupInformation .createRemoteUser(userName).getGroupNames()); userGroups.forEach((group) -> listOfAcls.add( - new OzoneAcl(ACLIdentityType.GROUP, group, ACLType.ALL, ACCESS))); + new OzoneAcl(ACLIdentityType.GROUP, group, ACCESS, ACLType.ALL))); // Add ACLs for (OzoneAcl ozoneAcl : listOfAcls) { @@ -4683,7 +4717,7 @@ public EchoRPCResponse echoRPCReq(byte[] payloadReq, int payloadSizeResp, } @Override - public OmKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) { + public LeaseKeyInfo recoverLease(String volumeName, String bucketName, String keyName, boolean force) { return null; } @@ -4795,12 +4829,10 @@ public static HddsProtos.OzoneManagerDetailsProto getOmDetailsProto( * @param keyArgs OmKeyArgs * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) + private ReferenceCounted getReader(OmKeyArgs keyArgs) throws IOException { - return omSnapshotManager.checkForSnapshot( - keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName(), - false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName()); } /** @@ -4812,11 +4844,10 @@ IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) * @param key key path * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader( + private ReferenceCounted getReader( String volumeName, String bucketName, String key) throws IOException { - return omSnapshotManager.checkForSnapshot( - volumeName, bucketName, key, false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + volumeName, bucketName, key); } /** @@ -4826,14 +4857,12 @@ IOmMetadataReader, SnapshotCache> getReader( * @param ozoneObj OzoneObj * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OzoneObj ozoneObj) + private ReferenceCounted getReader(OzoneObj ozoneObj) throws IOException { - return omSnapshotManager.checkForSnapshot( + return omSnapshotManager.getActiveFsMetadataOrSnapshot( ozoneObj.getVolumeName(), ozoneObj.getBucketName(), - ozoneObj.getKeyName(), - false); + ozoneObj.getKeyName()); } @SuppressWarnings("parameternumber") diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index d801d1dbf331..e8e930891df6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -18,6 +18,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -39,9 +41,12 @@ import java.util.Objects; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; @@ -53,6 +58,7 @@ public class PrefixManagerImpl implements PrefixManager { LoggerFactory.getLogger(PrefixManagerImpl.class); private static final List EMPTY_ACL_LIST = new ArrayList<>(); + private final OzoneManager ozoneManager; private final OMMetadataManager metadataManager; // In-memory prefix tree to optimize ACL evaluation @@ -62,9 +68,10 @@ public class PrefixManagerImpl implements PrefixManager { // where we integrate both HA and Non-HA code. private boolean isRatisEnabled; - public PrefixManagerImpl(OMMetadataManager metadataManager, + public PrefixManagerImpl(OzoneManager ozoneManager, OMMetadataManager metadataManager, boolean isRatisEnabled) { this.isRatisEnabled = isRatisEnabled; + this.ozoneManager = ozoneManager; this.metadataManager = metadataManager; loadPrefixTree(); } @@ -90,16 +97,11 @@ public OMMetadataManager getMetadataManager() { return metadataManager; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); - String prefixPath = obj.getPath(); + OzoneObj resolvedObj = getResolvedPrefixObj(obj); + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -149,7 +151,14 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) Objects.requireNonNull(ozObject); Objects.requireNonNull(context); - String prefixPath = ozObject.getPath(); + OzoneObj resolvedObj; + try { + resolvedObj = getResolvedPrefixObj(ozObject); + } catch (IOException e) { + throw new OMException("Failed to resolveBucketLink:", e, INTERNAL_ERROR); + } + + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -218,8 +227,8 @@ public void validateOzoneObj(OzoneObj obj) throws OMException { throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND); } if (!prefixName.endsWith("/")) { - throw new OMException("Invalid prefix name: " + prefixName, - PREFIX_NOT_FOUND); + throw new OMException("Missing trailing slash '/' in prefix name: " + prefixName, + INVALID_PATH_IN_ACL_REQUEST); } } @@ -294,7 +303,7 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo) OmPrefixInfo parentPrefixInfo = prefixList.get(prefixList.size() - 1); if (parentPrefixInfo != null) { prefixParentFound = OzoneAclUtil.inheritDefaultAcls( - aclsToBeSet, parentPrefixInfo.getAcls()); + aclsToBeSet, parentPrefixInfo.getAcls(), ACCESS); } } @@ -305,13 +314,14 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo) OmBucketInfo bucketInfo = metadataManager.getBucketTable(). get(bucketKey); if (bucketInfo != null) { - OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls()); + OzoneAclUtil.inheritDefaultAcls(aclsToBeSet, bucketInfo.getAcls(), ACCESS); } } } public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, OmPrefixInfo prefixInfo, long transactionLogIndex) throws IOException { + boolean newPrefix = false; if (prefixInfo == null) { OmPrefixInfo.Builder prefixInfoBuilder = new OmPrefixInfo.Builder() @@ -322,10 +332,13 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, prefixInfoBuilder.setUpdateID(transactionLogIndex); } prefixInfo = prefixInfoBuilder.build(); + newPrefix = true; } boolean changed = prefixInfo.setAcls(ozoneAcls); - inheritParentAcl(ozoneObj, prefixInfo); + if (newPrefix) { + inheritParentAcl(ozoneObj, prefixInfo); + } prefixTree.insert(ozoneObj.getPath(), prefixInfo); if (!isRatisEnabled) { metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); @@ -333,12 +346,31 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, return new OMPrefixAclOpResult(prefixInfo, changed); } + /** + * Get the resolved prefix object to handle prefix that is under a link bucket. + * @param obj prefix object + * @return the resolved prefix object if the object belongs under a link bucket. + * Otherwise, return the same prefix object. + * @throws IOException Exception thrown when resolving the bucket link. + */ + public OzoneObj getResolvedPrefixObj(OzoneObj obj) throws IOException { + if (StringUtils.isEmpty(obj.getVolumeName()) || StringUtils.isEmpty(obj.getBucketName())) { + return obj; + } + + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( + Pair.of(obj.getVolumeName(), obj.getBucketName())); + return resolvedBucket.update(obj); + } + /** * Result of the prefix acl operation. */ public static class OMPrefixAclOpResult { - private OmPrefixInfo omPrefixInfo; - private boolean operationsResult; + /** The updated prefix info after applying the prefix acl operation. */ + private final OmPrefixInfo omPrefixInfo; + /** Operation result, success if the underlying ACL is changed, false otherwise. */ + private final boolean operationsResult; public OMPrefixAclOpResult(OmPrefixInfo omPrefixInfo, boolean operationsResult) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java index 9c304ac2f1cc..af1db8bad368 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java @@ -23,6 +23,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import java.util.LinkedHashMap; import java.util.Map; @@ -120,6 +122,15 @@ public KeyArgs update(KeyArgs args) { : args; } + public OzoneObj update(OzoneObj ozoneObj) { + return isLink() + ? OzoneObjInfo.Builder.fromOzoneObj(ozoneObj) + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : ozoneObj; + } + public boolean isLink() { return !Objects.equals(requestedVolume, realVolume) || !Objects.equals(requestedBucket, realBucket); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index 18deca1a4ff0..60353590e75c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -348,6 +348,14 @@ public synchronized void addSnapshot(SnapshotInfo snapshotInfo) snapshotInfo.getTableKey()); } + /** + * Update snapshot chain when snapshot changes (e.g. renamed). + */ + public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { + snapshotIdToTableKey.computeIfPresent(snapshotInfo.getSnapshotId(), + (snapshotId, dbTableKey) -> snapshotInfo.getTableKey()); + } + /** * Delete snapshot from snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index cae9bc4b3fca..20d0ab0e53eb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -147,10 +146,9 @@ private void markSSTFilteredFlagForSnapshot(String volume, String bucket, @Override public BackgroundTaskResult call() throws Exception { - Optional snapshotCache = Optional.ofNullable(ozoneManager) - .map(OzoneManager::getOmSnapshotManager) - .map(OmSnapshotManager::getSnapshotCache); - if (!snapshotCache.isPresent()) { + Optional snapshotManager = Optional.ofNullable(ozoneManager) + .map(OzoneManager::getOmSnapshotManager); + if (!snapshotManager.isPresent()) { return BackgroundTaskResult.EmptyTaskResult.newResult(); } Table snapshotInfoTable = @@ -183,10 +181,12 @@ public BackgroundTaskResult call() throws Exception { snapshotInfo.getBucketName()); try ( - ReferenceCounted - snapshotMetadataReader = snapshotCache.get().get( - snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + ReferenceCounted snapshotMetadataReader = + snapshotManager.get().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); RDBStore rdbStore = (RDBStore) omSnapshot.getMetadataManager() .getStore(); RocksDatabase db = rdbStore.getDb(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java deleted file mode 100644 index de2987090906..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/multitenant/RangerRestMultiTenantAccessController.java +++ /dev/null @@ -1,681 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om.multitenant; - -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonDeserializationContext; -import com.google.gson.JsonDeserializer; -import com.google.gson.JsonElement; -import com.google.gson.JsonObject; -import com.google.gson.JsonParseException; -import com.google.gson.JsonParser; -import com.google.gson.JsonPrimitive; -import com.google.gson.JsonSerializationContext; -import com.google.gson.JsonSerializer; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; -import org.apache.http.auth.BasicUserPrincipal; -import org.apache.kerby.util.Base64; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.net.ssl.HttpsURLConnection; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.X509TrustManager; -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.lang.reflect.Type; -import java.net.URL; -import java.nio.charset.StandardCharsets; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumMap; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RANGER_HTTPS_ADMIN_API_USER; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_TIMEOUT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_CONNECTION_TIMEOUT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_IGNORE_SERVER_CERT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_OM_IGNORE_SERVER_CERT_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_SERVICE; - -/** - * Access controller for multi-tenancy implemented using Ranger's REST API. - * This class is for testing and is not intended for production use. - * - * TODO: REMOVE. - */ -public class RangerRestMultiTenantAccessController - implements MultiTenantAccessController { - - public static final String OZONE_RANGER_POLICY_HTTP_ENDPOINT = - "/service/public/v2/api/policy/"; - - public static final String OZONE_RANGER_ROLE_HTTP_ENDPOINT = - "/service/public/v2/api/roles/"; - - private String getPolicyByNameEndpoint(String policyName) { - // /service/public/v2/api/service/{servicename}/policy/{policyname} - return rangerHttpsAddress + "/service/public/v2/api/service/" + - rangerService + "/policy/" + policyName; - } - - private String getRoleByNameEndpoint(String roleName) { - // /service/public/v2/api/roles/name/ - return rangerHttpsAddress + "/service/public/v2/api/roles/name/" + roleName; - } - - private static final Logger LOG = LoggerFactory - .getLogger(RangerRestMultiTenantAccessController.class); - - private final OzoneConfiguration conf; - private boolean ignoreServerCert = false; - private int connectionTimeout; - private int connectionRequestTimeout; - private String authHeaderValue; - private final String rangerHttpsAddress; - private final Gson jsonConverter; - private final String rangerService; - private final Map aclToString; - private final Map stringToAcl; - private long lastPolicyUpdateTimeEpochMillis = -1; - - public RangerRestMultiTenantAccessController(Configuration configuration) - throws IOException { - conf = new OzoneConfiguration(configuration); - rangerHttpsAddress = conf.get(OZONE_RANGER_HTTPS_ADDRESS_KEY); - rangerService = conf.get(OZONE_RANGER_SERVICE); - - GsonBuilder gsonBuilder = new GsonBuilder(); - gsonBuilder.registerTypeAdapter(Policy.class, policySerializer); - gsonBuilder.registerTypeAdapter(Policy.class, policyDeserializer); - gsonBuilder.registerTypeAdapter(Role.class, roleSerializer); - gsonBuilder.registerTypeAdapter(Role.class, roleDeserializer); - gsonBuilder.registerTypeAdapter(BasicUserPrincipal.class, userSerializer); - jsonConverter = gsonBuilder.create(); - - aclToString = new EnumMap<>(IAccessAuthorizer.ACLType.class); - stringToAcl = new HashMap<>(); - fillRangerAclStrings(); - initializeRangerConnection(); - } - - private void fillRangerAclStrings() { - aclToString.put(IAccessAuthorizer.ACLType.ALL, "all"); - aclToString.put(IAccessAuthorizer.ACLType.LIST, "list"); - aclToString.put(IAccessAuthorizer.ACLType.READ, "read"); - aclToString.put(IAccessAuthorizer.ACLType.WRITE, "write"); - aclToString.put(IAccessAuthorizer.ACLType.CREATE, "create"); - aclToString.put(IAccessAuthorizer.ACLType.DELETE, "delete"); - aclToString.put(IAccessAuthorizer.ACLType.READ_ACL, "read_acl"); - aclToString.put(IAccessAuthorizer.ACLType.WRITE_ACL, "write_acl"); - aclToString.put(IAccessAuthorizer.ACLType.NONE, ""); - - stringToAcl.put("all", IAccessAuthorizer.ACLType.ALL); - stringToAcl.put("list", IAccessAuthorizer.ACLType.LIST); - stringToAcl.put("read", IAccessAuthorizer.ACLType.READ); - stringToAcl.put("write", IAccessAuthorizer.ACLType.WRITE); - stringToAcl.put("create", IAccessAuthorizer.ACLType.CREATE); - stringToAcl.put("delete", IAccessAuthorizer.ACLType.DELETE); - stringToAcl.put("read_acl", IAccessAuthorizer.ACLType.READ_ACL); - stringToAcl.put("write_acl", IAccessAuthorizer.ACLType.WRITE_ACL); - stringToAcl.put("", IAccessAuthorizer.ACLType.NONE); - } - - private void initializeRangerConnection() { - setupRangerConnectionConfig(); - if (ignoreServerCert) { - setupRangerIgnoreServerCertificate(); - } - setupRangerConnectionAuthHeader(); - } - - private void setupRangerConnectionConfig() { - connectionTimeout = (int) conf.getTimeDuration( - OZONE_RANGER_OM_CONNECTION_TIMEOUT, - conf.get( - OZONE_RANGER_OM_CONNECTION_TIMEOUT, - OZONE_RANGER_OM_CONNECTION_TIMEOUT_DEFAULT), - TimeUnit.MILLISECONDS); - connectionRequestTimeout = (int)conf.getTimeDuration( - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT, - conf.get( - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT, - OZONE_RANGER_OM_CONNECTION_REQUEST_TIMEOUT_DEFAULT), - TimeUnit.MILLISECONDS - ); - ignoreServerCert = conf.getBoolean( - OZONE_RANGER_OM_IGNORE_SERVER_CERT, - OZONE_RANGER_OM_IGNORE_SERVER_CERT_DEFAULT); - } - - private void setupRangerIgnoreServerCertificate() { - // Create a trust manager that does not validate certificate chains - TrustManager[] trustAllCerts = new TrustManager[]{ - new X509TrustManager() { - public java.security.cert.X509Certificate[] getAcceptedIssuers() { - return null; - } - public void checkClientTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - public void checkServerTrusted( - java.security.cert.X509Certificate[] certs, String authType) { - } - } - }; - - try { - SSLContext sc = SSLContext.getInstance("SSL"); - sc.init(null, trustAllCerts, new java.security.SecureRandom()); - HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory()); - } catch (Exception e) { - LOG.info("Setting DefaultSSLSocketFactory failed."); - } - } - - private void setupRangerConnectionAuthHeader() { - String userName = conf.get(OZONE_OM_RANGER_HTTPS_ADMIN_API_USER); - String passwd = conf.get(OZONE_OM_RANGER_HTTPS_ADMIN_API_PASSWD); - String auth = userName + ":" + passwd; - byte[] encodedAuth = - Base64.encodeBase64(auth.getBytes(StandardCharsets.UTF_8)); - authHeaderValue = "Basic " + - new String(encodedAuth, StandardCharsets.UTF_8); - } - - - @Override - public Policy createPolicy(Policy policy) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsPostCall(rangerAdminUrl, - jsonConverter.toJsonTree(policy).getAsJsonObject()); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to create policy %s. " + - "Http response code: %d", policy.getName(), conn.getResponseCode())); - } - getResponseData(conn); - - // TODO: Should reconstruct from response data. - return policy; - } - - @Override - public void deletePolicy(String policyName) throws IOException { - String rangerAdminUrl = getPolicyByNameEndpoint(policyName); - HttpsURLConnection conn = makeHttpsDeleteCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to delete policy '%s'. " + - "Http response code: %d", policyName, conn.getResponseCode())); - } - } - - public Map getPolicies() throws Exception { - // This API gets all policies for all services. The - // /public/v2/api/policies/{serviceDefName}/for-resource endpoint is - // supposed to get policies for only a specified service, but it does not - // seem to work. This implementation should be ok for testing purposes as - // this class is intended. - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get all policies. " + - "Http response code: %d", conn.getResponseCode())); - } - String allPoliciesString = getResponseData(conn); - // Filter out policies not for Ozone service. - JsonArray jsonPoliciesArray = new JsonParser().parse(allPoliciesString) - .getAsJsonArray(); - Map policies = new HashMap<>(); - for (JsonElement jsonPolicy: jsonPoliciesArray) { - JsonObject jsonPolicyObject = jsonPolicy.getAsJsonObject(); - String service = jsonPolicyObject.get("service").getAsString(); - if (service.equals(rangerService)) { - long id = jsonPolicyObject.get("id").getAsLong(); - policies.put(id, jsonConverter.fromJson(jsonPolicyObject, - Policy.class)); - } - } - - return policies; - } - - @Override - public Policy getPolicy(String policyName) throws IOException { - String rangerAdminUrl = getPolicyByNameEndpoint(policyName); - - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get policy '%s'. " + - "Http response code: %d", policyName, conn.getResponseCode())); - } - String policyInfo = getResponseData(conn); - return jsonConverter.fromJson(policyInfo, Policy.class); - } - - @Override - public List getLabeledPolicies(String label) throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - @Override - public Policy updatePolicy(Policy policy) throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - public void updatePolicy(long policyID, Policy policy) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_POLICY_HTTP_ENDPOINT + policyID; - - HttpsURLConnection conn = makeHttpsPutCall(rangerAdminUrl, - jsonConverter.toJsonTree(policy)); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to update policy %d. " + - "Http response code: %d", policyID, conn.getResponseCode())); - } - } - - @Override - public Role createRole(Role role) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT; - - HttpsURLConnection conn = makeHttpsPostCall(rangerAdminUrl, - jsonConverter.toJsonTree(role).getAsJsonObject()); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to create role %s. " + - "Http response code: %d", role.getName(), conn.getResponseCode())); - } - String responseString = getResponseData(conn); - JsonObject jObject = new JsonParser().parse(responseString) - .getAsJsonObject(); -// return jObject.get("id").getAsLong(); - - // TODO: Should reconstruct from response data. - return role; - } - - @Override - public void deleteRole(String roleName) throws IOException { - String rangerAdminUrl = getRoleByNameEndpoint(roleName); - HttpsURLConnection conn = makeHttpsDeleteCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to delete role '%s'. " + - "Http response code: %d", roleName, conn.getResponseCode())); - } - } - - @Override - public long getRangerServicePolicyVersion() throws IOException { - throw new NotImplementedException("Not Implemented"); - } - - public Map getRoles() throws Exception { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT; - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get all roles. " + - "Http response code: %d", conn.getResponseCode())); - } - - String allRolesString = getResponseData(conn); - JsonArray rolesArrayJson = - new JsonParser().parse(allRolesString).getAsJsonArray(); - Map roles = new HashMap<>(); - for (JsonElement roleJson: rolesArrayJson) { - long id = roleJson.getAsJsonObject().get("id").getAsLong(); - roles.put(id, jsonConverter.fromJson(roleJson, Role.class)); - } - - return roles; - } - - @Override - public Role getRole(String roleName) throws IOException { - String rangerAdminUrl = getRoleByNameEndpoint(roleName); - - HttpsURLConnection conn = makeHttpsGetCall(rangerAdminUrl); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to get role '%s'. " + - "Http response code: %d", roleName, conn.getResponseCode())); - } - String roleInfo = getResponseData(conn); - return jsonConverter.fromJson(roleInfo, Role.class); - } - - @Override - public Role updateRole(long roleId, Role role) throws IOException { - String rangerAdminUrl = - rangerHttpsAddress + OZONE_RANGER_ROLE_HTTP_ENDPOINT + roleId; - - HttpsURLConnection conn = makeHttpsPutCall(rangerAdminUrl, - jsonConverter.toJsonTree(role)); - if (!successfulResponseCode(conn.getResponseCode())) { - throw new IOException(String.format("Failed to update role %d. " + - "Http response code: %d", roleId, conn.getResponseCode())); - } - - // TODO: Should reconstruct from response data. - return role; - } - - private HttpsURLConnection makeHttpsPutCall(String url, JsonElement content) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(url); - connection.setRequestMethod("PUT"); - return addJsonContentToConnection(connection, content); - } - - private HttpsURLConnection makeHttpsPostCall(String url, JsonElement content) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(url); - connection.setRequestMethod("POST"); - return addJsonContentToConnection(connection, content); - } - - private HttpsURLConnection addJsonContentToConnection( - HttpsURLConnection connection, JsonElement content) throws IOException { - connection.setDoOutput(true); - connection.setRequestProperty("Content-Type", "application/json;"); - try (OutputStream os = connection.getOutputStream()) { - byte[] input = content.toString().getBytes(StandardCharsets.UTF_8); - os.write(input, 0, input.length); - os.flush(); - } - - return connection; - } - - private HttpsURLConnection makeHttpsGetCall(String urlString) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(urlString); - connection.setRequestMethod("GET"); - return connection; - } - - private HttpsURLConnection makeHttpsDeleteCall(String urlString) - throws IOException { - HttpsURLConnection connection = makeBaseHttpsURLConnection(urlString); - connection.setRequestMethod("DELETE"); - return connection; - } - - private HttpsURLConnection makeBaseHttpsURLConnection(String urlString) - throws IOException { - URL url = new URL(urlString); - HttpsURLConnection urlConnection = (HttpsURLConnection)url.openConnection(); - urlConnection.setConnectTimeout(connectionTimeout); - urlConnection.setReadTimeout(connectionRequestTimeout); - urlConnection.setRequestProperty("Accept", "application/json"); - urlConnection.setRequestProperty("Authorization", authHeaderValue); - - return urlConnection; - } - - private String getResponseData(HttpsURLConnection urlConnection) - throws IOException { - StringBuilder response = new StringBuilder(); - try (BufferedReader br = new BufferedReader( - new InputStreamReader( - urlConnection.getInputStream(), StandardCharsets.UTF_8))) { - String responseLine; - while ((responseLine = br.readLine()) != null) { - response.append(responseLine.trim()); - } - } - return response.toString(); - } - - private boolean successfulResponseCode(long responseCode) { - return responseCode >= 200 && responseCode < 300; - } - - /// SERIALIZATION /// - - private final JsonDeserializer policyDeserializer = - new JsonDeserializer() { - @Override public Policy deserialize(JsonElement jsonElement, Type type, - JsonDeserializationContext jsonDeserializationContext) - throws JsonParseException { - JsonObject policyJson = jsonElement.getAsJsonObject(); - String name = policyJson.get("name").getAsString(); - Policy.Builder policyB = new Policy.Builder(); - policyB.setName(name); - if (policyJson.has("description")) { - policyB.setDescription(policyJson.get("description").getAsString()); - } - policyB.setEnabled(policyJson.get("isEnabled").getAsBoolean()); - - // Read volume, bucket, keys from json. - JsonObject resourcesJson = - policyJson.get("resources").getAsJsonObject(); - // All Ozone Ranger policies specify at least a volume. - JsonObject jsonVolumeResource = - resourcesJson.get("volume").getAsJsonObject(); - JsonArray volumes = jsonVolumeResource.get("values").getAsJsonArray(); - volumes.forEach(vol -> policyB.addVolume(vol.getAsString())); - - if (resourcesJson.has("bucket")) { - JsonObject jsonBucketResource = - resourcesJson.get("bucket").getAsJsonObject(); - JsonArray buckets = - jsonBucketResource.get("values").getAsJsonArray(); - buckets.forEach(bucket -> policyB.addBucket(bucket.getAsString())); - } - - if (resourcesJson.has("key")) { - JsonObject jsonKeysResource = - resourcesJson.get("key").getAsJsonObject(); - JsonArray keys = jsonKeysResource.get("values").getAsJsonArray(); - keys.forEach(key -> policyB.addKey(key.getAsString())); - } - - // Read Roles and their ACLs. - JsonArray policyItemsJson = policyJson.getAsJsonArray("policyItems"); - for (JsonElement policyItemElement : policyItemsJson) { - JsonObject policyItemJson = policyItemElement.getAsJsonObject(); - JsonArray jsonRoles = policyItemJson.getAsJsonArray("roles"); - JsonArray jsonAclArray = policyItemJson.getAsJsonArray("accesses"); - - for (JsonElement jsonAclElem : jsonAclArray) { - JsonObject jsonAcl = jsonAclElem.getAsJsonObject(); - String aclType = jsonAcl.get("type").getAsString(); - Acl acl; - if (jsonAcl.get("isAllowed").getAsBoolean()) { - acl = Acl.allow(stringToAcl.get(aclType)); - } else { - acl = Acl.deny(stringToAcl.get(aclType)); - } - - for (JsonElement roleNameJson : jsonRoles) { - policyB.addRoleAcl(roleNameJson.getAsString(), - Collections.singleton(acl)); - } - } - } - - return policyB.build(); - } - }; - - private final JsonDeserializer roleDeserializer = - new JsonDeserializer() { - @Override public Role deserialize(JsonElement jsonElement, Type type, - JsonDeserializationContext jsonDeserializationContext) - throws JsonParseException { - JsonObject roleJson = jsonElement.getAsJsonObject(); - String name = roleJson.get("name").getAsString(); - Role.Builder role = new Role.Builder(); - role.setName(name); - if (roleJson.has("description")) { - role.setDescription(roleJson.get("description").getAsString()); - } - for (JsonElement jsonUser : roleJson.get("users").getAsJsonArray()) { - String userName = - jsonUser.getAsJsonObject().get("name").getAsString(); - role.addUser(userName, false); - } - - return role.build(); - } - }; - - private final JsonSerializer policySerializer = - new JsonSerializer() { - @Override public JsonElement serialize(Policy javaPolicy, - Type typeOfSrc, JsonSerializationContext context) { - JsonObject jsonPolicy = new JsonObject(); - jsonPolicy.addProperty("name", javaPolicy.getName()); - jsonPolicy.addProperty("service", rangerService); - jsonPolicy.addProperty("isEnabled", javaPolicy.isEnabled()); - if (javaPolicy.getDescription().isPresent()) { - jsonPolicy.addProperty("description", - javaPolicy.getDescription().get()); - } - - // All resources under this policy are added to this object. - JsonObject jsonResources = new JsonObject(); - - // Add volumes. Ranger requires at least one volume to be specified. - JsonArray jsonVolumeNameArray = new JsonArray(); - for (String volumeName : javaPolicy.getVolumes()) { - jsonVolumeNameArray.add(new JsonPrimitive(volumeName)); - } - JsonObject jsonVolumeResource = new JsonObject(); - jsonVolumeResource.add("values", jsonVolumeNameArray); - jsonVolumeResource.addProperty("isRecursive", false); - jsonVolumeResource.addProperty("isExcludes", false); - jsonResources.add("volume", jsonVolumeResource); - - // Add buckets. - JsonArray jsonBucketNameArray = new JsonArray(); - for (String bucketName : javaPolicy.getBuckets()) { - jsonBucketNameArray.add(new JsonPrimitive(bucketName)); - } - - if (jsonBucketNameArray.size() > 0) { - JsonObject jsonBucketResource = new JsonObject(); - jsonBucketResource.add("values", jsonBucketNameArray); - jsonBucketResource.addProperty("isRecursive", false); - jsonBucketResource.addProperty("isExcludes", false); - jsonResources.add("bucket", jsonBucketResource); - } - - // Add keys. - JsonArray jsonKeyNameArray = new JsonArray(); - for (String keyName : javaPolicy.getKeys()) { - jsonKeyNameArray.add(new JsonPrimitive(keyName)); - } - if (jsonKeyNameArray.size() > 0) { - JsonObject jsonKeyResource = new JsonObject(); - jsonKeyResource.add("values", jsonKeyNameArray); - jsonKeyResource.addProperty("isRecursive", false); - jsonKeyResource.addProperty("isExcludes", false); - jsonResources.add("key", jsonKeyResource); - } - - jsonPolicy.add("resources", jsonResources); - - // Add roles and their acls to the policy. - JsonArray jsonPolicyItemArray = new JsonArray(); - - // Make a new policy item for each role in the map. - Map> roleAcls = javaPolicy.getRoleAcls(); - for (Map.Entry> entry : roleAcls.entrySet()) { - // Add role to the policy item. - String roleName = entry.getKey(); - JsonObject jsonPolicyItem = new JsonObject(); - JsonArray jsonRoles = new JsonArray(); - jsonRoles.add(new JsonPrimitive(roleName)); - jsonPolicyItem.add("roles", jsonRoles); - - // Add acls to the policy item. - JsonArray jsonAclArray = new JsonArray(); - for (Acl acl : entry.getValue()) { - JsonObject jsonAcl = new JsonObject(); - jsonAcl.addProperty("type", aclToString.get(acl.getAclType())); - jsonAcl.addProperty("isAllowed", acl.isAllowed()); - jsonAclArray.add(jsonAcl); - jsonPolicyItem.add("accesses", jsonAclArray); - } - jsonPolicyItemArray.add(jsonPolicyItem); - } - jsonPolicy.add("policyItems", jsonPolicyItemArray); - - return jsonPolicy; - } - }; - - private final JsonSerializer roleSerializer = - new JsonSerializer() { - @Override public JsonElement serialize(Role javaRole, Type typeOfSrc, - JsonSerializationContext context) { - JsonObject jsonRole = new JsonObject(); - jsonRole.addProperty("name", javaRole.getName()); - - JsonArray jsonUserArray = new JsonArray(); - for (String javaUser : javaRole.getUsersMap().keySet()) { - jsonUserArray.add(jsonConverter.toJsonTree(javaUser)); - } - - jsonRole.add("users", jsonUserArray); - return jsonRole; - } - }; - - private final JsonSerializer userSerializer = - new JsonSerializer() { - @Override public JsonElement serialize(BasicUserPrincipal user, - Type typeOfSrc, JsonSerializationContext context) { - JsonObject jsonMember = new JsonObject(); - jsonMember.addProperty("name", user.getName()); - jsonMember.addProperty("isAdmin", false); - return jsonMember; - } - }; - - public void setPolicyLastUpdateTime(long mtime) { - lastPolicyUpdateTimeEpochMillis = mtime; - } - - public long getPolicyLastUpdateTime() { - return lastPolicyUpdateTimeEpochMillis; - } - - public HashSet getRoleList() { - return null; - } -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 2c1276c43e73..857005bd9292 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -211,11 +210,15 @@ private OzoneManagerDoubleBuffer(Builder b) { this.isTracingEnabled = b.isTracingEnabled; - isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); daemon.setName(b.threadPrefix + "OMDoubleBufferFlushThread"); + } + + public OzoneManagerDoubleBuffer start() { daemon.start(); + isRunning.set(true); + return this; } private boolean isRatisEnabled() { @@ -591,29 +594,24 @@ private synchronized void swapCurrentAndReadyBuffer() { readyBuffer = temp; } - @VisibleForTesting OzoneManagerDoubleBufferMetrics getMetrics() { return metrics; } /** @return the flushed transaction count to OM DB. */ - @VisibleForTesting long getFlushedTransactionCountForTesting() { return flushedTransactionCount.get(); } /** @return total number of flush iterations run by sync thread. */ - @VisibleForTesting long getFlushIterationsForTesting() { return flushIterations.get(); } - @VisibleForTesting int getCurrentBufferSize() { return currentBuffer.size(); } - @VisibleForTesting int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java similarity index 95% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java index 351f18528931..afa162cc3ad8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java @@ -16,9 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.ratis.metrics; - -import com.google.common.annotations.VisibleForTesting; +package org.apache.hadoop.ozone.om.ratis; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; @@ -114,8 +112,7 @@ public void updateFlushTime(long time) { flushTime.add(time); } - @VisibleForTesting - public MutableRate getFlushTime() { + MutableRate getFlushTime() { return flushTime; } @@ -142,8 +139,7 @@ public void updateFlush(int flushedTransactionsInOneIteration) { updateQueueSize(flushedTransactionsInOneIteration); } - @VisibleForTesting - public MutableStat getQueueSize() { + MutableStat getQueueSize() { return queueSize; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 90fcba40f5d0..54f65ff8870a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -88,7 +88,7 @@ public class OzoneManagerStateMachine extends BaseStateMachine { private final OzoneManager ozoneManager; private RequestHandler handler; private RaftGroupId raftGroupId; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; + private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; private final boolean isTracingEnabled; @@ -109,9 +109,7 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, this.threadPrefix = ozoneManager.getThreadNamePrefix(); this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - - this.handler = new OzoneManagerRequestHandler(ozoneManager, - ozoneManagerDoubleBuffer); + this.handler = new OzoneManagerRequestHandler(ozoneManager); ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat(threadPrefix + @@ -163,12 +161,18 @@ public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, /** Notified by Ratis for non-StateMachine term-index update. */ @Override public synchronized void notifyTermIndexUpdated(long currentTerm, long newIndex) { + // lastSkippedIndex is start of sequence (one less) of continuous notification from ratis + // if there is any applyTransaction (double buffer index), then this gap is handled during double buffer + // notification and lastSkippedIndex will be the start of last continuous sequence. final long oldIndex = lastNotifiedTermIndex.getIndex(); if (newIndex - oldIndex > 1) { lastSkippedIndex = newIndex - 1; } final TermIndex newTermIndex = TermIndex.valueOf(currentTerm, newIndex); lastNotifiedTermIndex = assertUpdateIncreasingly("lastNotified", lastNotifiedTermIndex, newTermIndex); + if (lastNotifiedTermIndex.getIndex() - getLastAppliedTermIndex().getIndex() == 1) { + updateLastAppliedTermIndex(lastNotifiedTermIndex); + } } public TermIndex getLastNotifiedTermIndex() { @@ -177,7 +181,15 @@ public TermIndex getLastNotifiedTermIndex() { @Override protected synchronized boolean updateLastAppliedTermIndex(TermIndex newTermIndex) { - assertUpdateIncreasingly("lastApplied", getLastAppliedTermIndex(), newTermIndex); + TermIndex lastApplied = getLastAppliedTermIndex(); + assertUpdateIncreasingly("lastApplied", lastApplied, newTermIndex); + // if newTermIndex getting updated is within sequence of notifiedTermIndex (i.e. from lastSkippedIndex and + // notifiedTermIndex), then can update directly to lastNotifiedTermIndex as it ensure previous double buffer's + // Index is notified or getting notified matching lastSkippedIndex + if (newTermIndex.getIndex() < getLastNotifiedTermIndex().getIndex() + && lastApplied.getIndex() >= lastSkippedIndex) { + newTermIndex = getLastNotifiedTermIndex(); + } return super.updateLastAppliedTermIndex(newTermIndex); } @@ -415,7 +427,6 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - handler.updateDoubleBuffer(ozoneManagerDoubleBuffer); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); }); @@ -434,7 +445,8 @@ public OzoneManagerDoubleBuffer buildDoubleBufferForRatis() { .setS3SecretManager(ozoneManager.getS3SecretManager()) .enableRatis(true) .enableTracing(isTracingEnabled) - .build(); + .build() + .start(); } /** @@ -524,7 +536,8 @@ public void close() { */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { - OMClientResponse omClientResponse = handler.handleWriteRequest(request, termIndex); + final OMClientResponse omClientResponse = handler.handleWriteRequest( + request, termIndex, ozoneManagerDoubleBuffer); OMLockDetails omLockDetails = omClientResponse.getOmLockDetails(); OMResponse omResponse = omClientResponse.getOMResponse(); if (omLockDetails != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java deleted file mode 100644 index e41c645b581a..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * package which contains metrics classes. - */ -package org.apache.hadoop.ozone.om.ratis.metrics; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 3ab65346e7eb..b055a1f92f82 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -79,6 +79,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMCancelPrepareRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest; @@ -224,6 +225,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotCreateRequest(omRequest); case DeleteSnapshot: return new OMSnapshotDeleteRequest(omRequest); + case RenameSnapshot: + return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); case SnapshotPurge: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 2698d12f9f89..6c8a66ee7ea7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -79,8 +78,7 @@ public abstract class OMClientRequest implements RequestAuditor { private UserGroupInformation userGroupInformation; private InetAddress inetAddress; - private final ThreadLocal omLockDetails = - ThreadLocal.withInitial(OMLockDetails::new); + private final OMLockDetails omLockDetails = new OMLockDetails(); /** * Stores the result of request execution in @@ -95,7 +93,7 @@ public enum Result { public OMClientRequest(OMRequest omRequest) { Preconditions.checkNotNull(omRequest); this.omRequest = omRequest; - this.omLockDetails.get().clear(); + this.omLockDetails.clear(); } /** * Perform pre-execute steps on a OMRequest. @@ -296,7 +294,7 @@ protected void checkACLsWithFSO(OzoneManager ozoneManager, String volumeName, contextBuilder.setOwnerName(bucketOwner); } - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OmMetadataReader omMetadataReader = (OmMetadataReader) rcMetadataReader.get(); @@ -362,7 +360,7 @@ public void checkAcls(OzoneManager ozoneManager, String bucketOwner) throws IOException { - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OzoneAclUtils.checkAllAcls((OmMetadataReader) rcMetadataReader.get(), resType, storeType, aclType, @@ -576,10 +574,10 @@ public static String isValidKeyPath(String path) throws OMException { } public OMLockDetails getOmLockDetails() { - return omLockDetails.get(); + return omLockDetails; } public void mergeOmLockDetails(OMLockDetails details) { - omLockDetails.get().merge(details); + omLockDetails.merge(details); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 93b7c92902b6..c0872db0fd61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -22,6 +22,8 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditAction; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -68,10 +70,16 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName()); auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(keyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (keyArgs.getType() != null) ? keyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null); + if (keyArgs.hasType()) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, keyArgs.getType().name()); + } + if (keyArgs.hasFactor() && keyArgs.getFactor() != HddsProtos.ReplicationFactor.ZERO) { + auditMap.put(OzoneConsts.REPLICATION_FACTOR, keyArgs.getFactor().name()); + } + if (keyArgs.hasEcReplicationConfig()) { + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + ECReplicationConfig.toString(keyArgs.getEcReplicationConfig())); + } return auditMap; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 7cce3ac456f9..f2c343e0d161 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -67,6 +67,7 @@ import java.util.ArrayList; import java.util.List; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -330,7 +331,7 @@ private void addDefaultAcls(OmBucketInfo omBucketInfo, // Add default acls from volume. List defaultVolumeAcls = omVolumeArgs.getDefaultAcls(); - OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls); + OzoneAclUtil.inheritDefaultAcls(acls, defaultVolumeAcls, ACCESS); omBucketInfo.setAcls(acls); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index d4bc91dbfdf6..9b9fb4e7cc5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -154,10 +154,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .map(info -> info.getProtobuf(getOmRequest().getVersion())) .collect(Collectors.toList())); + generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), ozoneManager, IAccessAuthorizer.ACLType.CREATE); - - generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); CreateFileRequest.Builder newCreateFileRequest = createFileRequest.toBuilder().setKeyArgs(resolvedArgs) .setClientID(UniqueId.next()); @@ -255,6 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 393be170a5b4..6910061c771c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -172,6 +172,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 798fed7dccf2..6116ed81e879 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -248,34 +248,44 @@ private RecoverLeaseResponse doWork(OzoneManager ozoneManager, List openKeyLocationInfoList = openKeyLatestVersionLocations.getLocationList(); OmKeyLocationInfo finalBlock = null; + OmKeyLocationInfo penultimateBlock = null; boolean returnKeyInfo = true; if (openKeyLocationInfoList.size() > keyLocationInfoList.size() && openKeyModificationTime > keyInfo.getModificationTime() && openKeyLocationInfoList.size() > 0) { finalBlock = openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 1); + if (openKeyLocationInfoList.size() > 1) { + penultimateBlock = openKeyLocationInfoList.get(openKeyLocationInfoList.size() - 2); + } returnKeyInfo = false; } else if (keyLocationInfoList.size() > 0) { finalBlock = keyLocationInfoList.get(keyLocationInfoList.size() - 1); } - if (finalBlock != null) { + updateBlockInfo(ozoneManager, finalBlock); + updateBlockInfo(ozoneManager, penultimateBlock); + + RecoverLeaseResponse.Builder rb = RecoverLeaseResponse.newBuilder(); + rb.setKeyInfo(returnKeyInfo ? keyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true) : + openKeyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true)); + rb.setIsKeyInfo(returnKeyInfo); + + return rb.build(); + } + + private void updateBlockInfo(OzoneManager ozoneManager, OmKeyLocationInfo blockInfo) throws IOException { + if (blockInfo != null) { // set token to last block if enabled if (ozoneManager.isGrpcBlockTokenEnabled()) { String remoteUser = getRemoteUser().getShortUserName(); OzoneBlockTokenSecretManager secretManager = ozoneManager.getBlockTokenSecretManager(); - finalBlock.setToken(secretManager.generateToken(remoteUser, finalBlock.getBlockID(), - EnumSet.of(READ, WRITE), finalBlock.getLength())); + blockInfo.setToken(secretManager.generateToken(remoteUser, blockInfo.getBlockID(), + EnumSet.of(READ, WRITE), blockInfo.getLength())); } // refresh last block pipeline ContainerWithPipeline containerWithPipeline = - ozoneManager.getScmClient().getContainerClient().getContainerWithPipeline(finalBlock.getContainerID()); - finalBlock.setPipeline(containerWithPipeline.getPipeline()); + ozoneManager.getScmClient().getContainerClient().getContainerWithPipeline(blockInfo.getContainerID()); + blockInfo.setPipeline(containerWithPipeline.getPipeline()); } - - RecoverLeaseResponse.Builder rb = RecoverLeaseResponse.newBuilder(); - rb.setKeyInfo(returnKeyInfo ? keyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true) : - openKeyInfo.getNetworkProtobuf(getOmRequest().getVersion(), true)); - - return rb.build(); } private OmKeyInfo getKey(String dbOzoneKey) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index d182e4f6c3dc..830ab70827cb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -300,8 +300,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); String delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), dbOzoneKey); + pseudoObjId, dbOzoneKey); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -333,8 +335,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index f6f8f8b9cb3b..0362f068e8e1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -227,8 +227,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); String delKeyName = omMetadataManager .getOzoneKey(volumeName, bucketName, fileName); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), delKeyName); + pseudoObjId, delKeyName); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -262,8 +264,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 48805d6e4e5a..e9a9f007197a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -281,6 +281,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), replicationConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 0dec9fa459f6..6fe8c1208586 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -157,6 +157,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenFileName = omMetadataManager diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index d7cdd3632005..ae3715be7bfd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -92,10 +92,13 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OBJECT_ID_RECLAIM_BLOCKS; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -343,7 +346,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Add all acls from direct parent to key. OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); if (prefixInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls(), ACCESS)) { return acls; } } @@ -353,7 +356,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Inherit DEFAULT acls from parent-dir only if DEFAULT acls for // prefix are not set if (omPathInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), ACCESS)) { return acls; } } @@ -361,7 +364,7 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, // Inherit DEFAULT acls from bucket only if DEFAULT acls for // parent-dir are not set. if (bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { + if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), ACCESS)) { return acls; } } @@ -383,17 +386,13 @@ protected static List getAclsForDir(KeyArgs keyArgs, // Inherit DEFAULT acls from parent-dir if (omPathInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls())) { - OzoneAclUtil.toDefaultScope(acls); - } + OzoneAclUtil.inheritDefaultAcls(acls, omPathInfo.getAcls(), DEFAULT); } // Inherit DEFAULT acls from bucket only if DEFAULT acls for // parent-dir are not set. if (acls.isEmpty() && bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { - OzoneAclUtil.toDefaultScope(acls); - } + OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls(), DEFAULT); } // add itself acls @@ -591,9 +590,14 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, omMetadataManager.getOpenKeyTable(getBucketLayout()) .get(dbMultipartOpenKey); - if (omKeyInfo != null && omKeyInfo.getFileEncryptionInfo() != null) { - newKeyArgs.setFileEncryptionInfo( - OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + if (omKeyInfo != null) { + if (omKeyInfo.getFileEncryptionInfo() != null) { + newKeyArgs.setFileEncryptionInfo( + OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + } + } else { + LOG.warn("omKeyInfo not found. Key: " + dbMultipartOpenKey + + ". The upload id " + keyArgs.getMultipartUploadID() + " may be invalid."); } } finally { if (acquireLock) { @@ -769,6 +773,14 @@ protected OmKeyInfo prepareFileInfo( dbKeyInfo.setModificationTime(keyArgs.getModificationTime()); dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled); dbKeyInfo.setReplicationConfig(replicationConfig); + + // Construct a new metadata map from KeyArgs. + // Clear the old one when the key is overwritten. + dbKeyInfo.getMetadata().clear(); + dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf( + keyArgs.getMetadataList())); + + dbKeyInfo.setFileEncryptionInfo(encInfo); return dbKeyInfo; } @@ -1051,4 +1063,11 @@ protected void filterOutBlocksStillInUse(OmKeyInfo referenceKey, LOG.debug("After block filtering, keysToBeFiltered = {}", keysToBeFiltered); } + + protected void validateEncryptionKeyInfo(OmBucketInfo bucketInfo, KeyArgs keyArgs) throws OMException { + if (bucketInfo.getEncryptionKeyInfo() != null && !keyArgs.hasFileEncryptionInfo()) { + throw new OMException("Attempting to create unencrypted file " + + keyArgs.getKeyName() + " in encrypted bucket " + keyArgs.getBucketName(), INVALID_REQUEST); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 1f5e623da0d2..e14cfaaad281 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -74,10 +74,12 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .setKeyName(normalizedKeyPath) .build(); + OzoneManagerProtocolProtos.KeyArgs newKeyArgs = resolveBucketLink(ozoneManager, keyArgs); + return request.toBuilder() .setSetTimesRequest( setTimesRequest.toBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(newKeyArgs) .setMtime(getModificationTime())) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 345886c050b5..a8490b111524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -33,9 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.util.ObjectParser; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -66,9 +64,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); boolean lockAcquired = false; - String volume = null; - String bucket = null; - String key = null; + String prefixPath = null; + OzoneObj resolvedPrefixObj = null; OMPrefixAclOpResult operationResult = null; boolean opResult = false; Result result = null; @@ -76,20 +73,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn PrefixManagerImpl prefixManager = (PrefixManagerImpl) ozoneManager.getPrefixManager(); try { + resolvedPrefixObj = prefixManager.getResolvedPrefixObj(getOzoneObj()); prefixManager.validateOzoneObj(getOzoneObj()); - String prefixPath = getOzoneObj().getPath(); - validatePrefixPath(prefixPath); - ObjectParser objectParser = new ObjectParser(prefixPath, - OzoneManagerProtocolProtos.OzoneObj.ObjectType.PREFIX); - volume = objectParser.getVolume(); - bucket = objectParser.getBucket(); - key = objectParser.getKey(); + validatePrefixPath(resolvedPrefixObj.getPath()); + prefixPath = resolvedPrefixObj.getPath(); // check Acl if (ozoneManager.getAclsEnabled()) { checkAcls(ozoneManager, OzoneObj.ResourceType.PREFIX, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, bucket, key); + resolvedPrefixObj.getVolumeName(), resolvedPrefixObj.getBucketName(), + resolvedPrefixObj.getPrefixName()); } mergeOmLockDetails(omMetadataManager.getLock() @@ -102,7 +96,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } try { - operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); + operationResult = apply(resolvedPrefixObj, prefixManager, omPrefixInfo, trxnLogIndex); } catch (IOException ex) { // In HA case this will never happen. // As in add/remove/setAcl method we have logic to update database, @@ -145,16 +139,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } finally { if (lockAcquired) { mergeOmLockDetails(omMetadataManager.getLock() - .releaseWriteLock(PREFIX_LOCK, getOzoneObj().getPath())); + .releaseWriteLock(PREFIX_LOCK, prefixPath)); } if (omClientResponse != null) { omClientResponse.setOmLockDetails(getOmLockDetails()); } } - OzoneObj obj = getOzoneObj(); + OzoneObj obj = resolvedPrefixObj; + if (obj == null) { + // Fall back to the prefix under link bucket + obj = getOzoneObj(); + } + Map auditMap = obj.toAuditMap(); - onComplete(opResult, exception, ozoneManager.getMetrics(), result, + onComplete(obj, opResult, exception, ozoneManager.getMetrics(), result, trxnLogIndex, ozoneManager.getAuditLogger(), auditMap); return omClientResponse; @@ -168,24 +167,26 @@ private void validatePrefixPath(String prefixPath) throws OMException { } /** - * Get the path name from the request. - * @return path name + * Get the prefix ozone object passed in the request. + * Note: The ozone object might still refer to a prefix under a link bucket which + * might require to be resolved. + * @return Prefix ozone object. */ abstract OzoneObj getOzoneObj(); // TODO: Finer grain metrics can be moved to these callbacks. They can also // be abstracted into separate interfaces in future. /** - * Get the initial om response builder with lock. - * @return om response builder. + * Get the initial OM response builder with lock. + * @return OM response builder. */ abstract OMResponse.Builder onInit(); /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omPrefixInfo - * @param operationResult + * Get the OM client response on success case with lock. + * @param omResponse OM response builder. + * @param omPrefixInfo The updated prefix info. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. * @return OMClientResponse */ abstract OMClientResponse onSuccess( @@ -194,8 +195,8 @@ abstract OMClientResponse onSuccess( /** * Get the om client response on failure case with lock. - * @param omResponse - * @param exception + * @param omResponse OM response builder. + * @param exception Exception thrown while processing the request. * @return OMClientResponse */ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, @@ -204,23 +205,28 @@ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - * @param omMetrics + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. + * @param exception Exception thrown while processing the request. + * @param omMetrics OM metrics used to update the relevant metrics. */ - abstract void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap); + @SuppressWarnings("checkstyle:ParameterNumber") + abstract void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, + AuditLogger auditLogger, Map auditMap); /** - * Apply the acl operation, if successfully completed returns true, - * else false. - * @param prefixManager - * @param omPrefixInfo - * @param trxnLogIndex - * @throws IOException + * Apply the acl operation to underlying storage (prefix tree and table cache). + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param prefixManager Prefix manager used to update the underlying prefix storage. + * @param omPrefixInfo Previous prefix info, null if there is no existing prefix info. + * @param trxnLogIndex Transaction log index. + * @return result of the prefix operation, see {@link OMPrefixAclOpResult}. + * @throws IOException Exception thrown when updating the underlying prefix storage. */ - abstract OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + abstract OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java index fe75928795b6..c290b08939c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java @@ -19,10 +19,8 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; import java.io.IOException; -import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -55,8 +53,8 @@ public class OMPrefixAddAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final OzoneAcl ozoneAcl; public OMPrefixAddAclRequest(OMRequest omRequest) { super(omRequest); @@ -65,8 +63,7 @@ public OMPrefixAddAclRequest(OMRequest omRequest) { // TODO: conversion of OzoneObj to protobuf can be avoided when we have // single code path for HA and Non-HA ozoneObj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj()); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); + ozoneAcl = OzoneAcl.fromProtobuf(addAclRequest.getAcl()); } @Override @@ -96,41 +93,41 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + LOG.debug("Add acl: {} to path: {} success!", ozoneAcl, + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} already exists in path {}", - ozoneAcls, ozoneObj.getPath()); + ozoneAcl, resolvedOzoneObj.getPath()); } } break; case FAILURE: - LOG.error("Add acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + LOG.error("Add acl {} to path {} failed!", ozoneAcl, + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixAddAclRequest: {}", getOmRequest()); } - if (ozoneAcls != null) { - auditMap.put(OzoneConsts.ACL, ozoneAcls.toString()); + if (ozoneAcl != null) { + auditMap.put(OzoneConsts.ACL, ozoneAcl.toString()); } auditLog(auditLogger, buildAuditMessage(OMAction.ADD_ACL, auditMap, exception, getOmRequest().getUserInfo())); } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo, + return prefixManager.addAcl(resolvedOzoneObj, ozoneAcl, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java index 67b704121676..7c2666944c57 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; /** - * Handle add Acl request for prefix. + * Handle remove Acl request for prefix. */ public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixRemoveAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixRemoveAclRequest(OMRequest omRequest) { super(omRequest); @@ -93,25 +93,24 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} not removed from path {} as it does not exist", - ozoneAcls, ozoneObj.getPath()); + ozoneAcls, resolvedOzoneObj.getPath()); } } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Remove acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixRemoveAclRequest: {}", @@ -126,9 +125,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo); + return prefixManager.removeAcl(resolvedOzoneObj, ozoneAcls.get(0), omPrefixInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java index 6e93e8ffe5e0..11fc0d150eea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; /** - * Handle add Acl request for prefix. + * Handle set Acl request for prefix. */ public class OMPrefixSetAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixSetAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixSetAclRequest(OMRequest omRequest) { super(omRequest); @@ -94,20 +94,19 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap) { + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, + long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Set acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixSetAclRequest: {}", @@ -122,9 +121,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo, + return prefixManager.setAcl(resolvedOzoneObj, ozoneAcls, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f461bbd1719a..a3e7840ccce5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -238,9 +238,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); omBucketInfo.incrUsedBytes(correctedSpace); - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName)); + MultipartCommitUploadPartResponse.Builder commitResponseBuilder = MultipartCommitUploadPartResponse.newBuilder() + .setPartName(partName); + String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG); + if (eTag != null) { + commitResponseBuilder.setETag(eTag); + } + omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 99c98e3b48b2..83b46de7fd1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -27,6 +27,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; @@ -80,6 +82,32 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); + private BiFunction eTagBasedValidator = + (part, partKeyInfo) -> { + String eTag = part.getETag(); + AtomicReference dbPartETag = new AtomicReference<>(); + String dbPartName = null; + if (partKeyInfo != null) { + partKeyInfo.getPartKeyInfo().getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue())); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || StringUtils.equals(eTag, dbPartName)); + }; + private BiFunction partNameBasedValidator = + (part, partKeyInfo) -> { + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; + public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { super(omRequest, bucketLayout); @@ -249,7 +277,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setVolume(requestedVolume) .setBucket(requestedBucket) .setKey(keyName) - .setHash(omKeyInfo.getMetadata().get("ETag"))); + .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); long volumeId = omMetadataManager.getVolumeId(volumeName); long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); @@ -389,7 +417,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) - .addMetadata("ETag", + .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); // Check if db entry has ObjectID. This check is required because // it is possible that between multipart key uploads and complete, @@ -419,7 +447,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); - omKeyInfo.getMetadata().put("ETag", + omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); } omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -491,24 +519,19 @@ private long getMultipartDataSize(String requestedVolume, OzoneManager ozoneManager) throws OMException { long dataSize = 0; int currentPartCount = 0; + boolean eTagBasedValidationAvailable = partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag); // Now do actual logic, and check for any Invalid part during this. for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String partName = part.getPartName(); - PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - - String dbPartName = null; - if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); - } - if (!StringUtils.equals(partName, dbPartName)) { - String omPartName = partKeyInfo == null ? null : dbPartName; + MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ? + eTagBasedValidator.apply(part, partKeyInfo) : partNameBasedValidator.apply(part, partKeyInfo); + if (!requestPart.isValid()) { throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + partName + ", " + partNumber + - "}, whereas OM has partName " + omPartName, + ". Provided Part info is { " + requestPart.getRequestPartId() + ", " + partNumber + + "}, whereas OM has eTag " + requestPart.getOmPartId(), OMException.ResultCodes.INVALID_PART); } @@ -641,11 +664,41 @@ private String multipartUploadedKeyHash( OmMultipartKeyInfo.PartKeyInfoMap partsList) { StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { - keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get("ETag")); + String partPropertyToComputeHash = KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList()) + .get(OzoneConsts.ETAG); + if (partPropertyToComputeHash == null) { + partPropertyToComputeHash = partKeyInfo.getPartName(); + } + keysConcatenated.append(partPropertyToComputeHash); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size(); } + private static class MultipartCommitRequestPart { + private String requestPartId; + + private String omPartId; + + private boolean isValid; + + MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + this.requestPartId = requestPartId; + this.omPartId = omPartId; + this.isValid = isValid; + } + + public String getRequestPartId() { + return requestPartId; + } + + public String getOmPartId() { + return omPartId; + } + + public boolean isValid() { + return isValid; + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index b7dba8260269..29c7628e3cca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,10 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -35,27 +39,38 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; + /** * Handles OMSnapshotPurge Request. * This is an OM internal request. Does not need @RequireSnapshotFeatureState. */ public class OMSnapshotPurgeRequest extends OMClientRequest { + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + final long trxnLogIndex = termIndex.getIndex(); + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -76,50 +91,118 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); - // Snapshots that are purged by the SnapshotDeletingService - // will update the next snapshot so that is can be deep cleaned - // by the KeyDeletingService in the next run. + // Each snapshot purge operation does three things: + // 1. Update the snapshot chain, + // 2. Update the deep clean flag for the next active snapshot (So that it can be + // deep cleaned by the KeyDeletingService in the next run), + // 3. Finally, purge the snapshot. + // All of these steps have to be performed only when it acquires all the necessary + // locks (lock on the snapshot to be purged, lock on the next active snapshot, and + // lock on the next path and global previous snapshots). Ideally, there is no need + // for locks for snapshot purge and can rely on OMStateMachine because OMStateMachine + // is going to process each request sequentially. + // + // But there is a problem with that. After filtering unnecessary SST files for a snapshot, + // SstFilteringService updates that snapshot's SstFilter flag. SstFilteringService cannot + // use SetSnapshotProperty API because it runs on each OM independently and One OM does + // not know if the snapshot has been filtered on the other OM in HA environment. + // + // If locks are not taken snapshot purge and SstFilteringService will cause a race condition + // and override one's update with another. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() - .get(snapTableKey); - - SnapshotInfo nextSnapshot = SnapshotUtils - .getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); - - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos); - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, - trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - ozoneManager.getOmSnapshotManager().getSnapshotCache() - .invalidate(snapTableKey); + // To acquire all the locks, a set is maintained which is keyed by snapshotTableKey. + // snapshotTableKey is nothing but /volumeName/bucketName/snapshotName. + // Once all the locks are acquired, it performs the three steps mentioned above and + // release all the locks after that. + Set> lockSet = new HashSet<>(4, 1); + try { + if (omMetadataManager.getSnapshotInfoTable().get(snapTableKey) == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } + + acquireLock(lockSet, snapTableKey, omMetadataManager); + SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); + + SnapshotInfo nextSnapshot = + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); + + if (nextSnapshot != null) { + acquireLock(lockSet, nextSnapshot.getTableKey(), omMetadataManager); + } + + // Update the chain first so that it has all the necessary locks before updating deep clean. + updateSnapshotChainAndCache(lockSet, omMetadataManager, fromSnapshot, trxnLogIndex, + updatedPathPreviousAndGlobalSnapshots); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + // Remove and close snapshot's RocksDB instance from SnapshotCache. + omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); + // Update SnapshotInfoTable cache. + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + } finally { + for (Triple lockKey: lockSet) { + omMetadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight()); + } + } } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapInfos, updatedPathPreviousAndGlobalSnapshots); + + omMetrics.incNumSnapshotPurges(); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + + "snapshots: {} and global and previous for snapshots:{}.", + snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotPurgeFails(); + LOG.error("Failed to execute snapshotPurgeRequest:{{}}.", snapshotPurgeRequest, ex); } return omClientResponse; } + private void acquireLock(Set> lockSet, String snapshotTableKey, + OMMetadataManager omMetadataManager) throws IOException { + SnapshotInfo snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + + // It should not be the case that lock is required for non-existing snapshot. + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotTableKey); + throw new OMException("Snapshot: '{" + snapshotTableKey + "}' doesn't not exist in snapshot table.", + OMException.ResultCodes.FILE_NOT_FOUND); + } + Triple lockKey = Triple.of(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()); + if (!lockSet.contains(lockKey)) { + mergeOmLockDetails(omMetadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight())); + lockSet.add(lockKey); + } + } + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) { + Map updatedSnapInfos) throws IOException { if (snapInfo != null) { + // Fetch the latest value again after acquiring lock. + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially // reclaim more keys in the next snapshot. - snapInfo.setDeepClean(false); + updatedSnapshotInfo.setDeepClean(false); // Update table cache first - omMetadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(snapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(updatedSnapshotInfo.getTableKey()), + CacheValue.get(trxnLogIndex, updatedSnapshotInfo)); + updatedSnapInfos.put(updatedSnapshotInfo.getTableKey(), updatedSnapshotInfo); } } @@ -130,6 +213,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, * update in DB. */ private void updateSnapshotChainAndCache( + Set> lockSet, OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, long trxnLogIndex, @@ -141,7 +225,6 @@ private void updateSnapshotChainAndCache( SnapshotChainManager snapshotChainManager = metadataManager .getSnapshotChainManager(); - SnapshotInfo nextPathSnapInfo = null; // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -157,58 +240,63 @@ private void updateSnapshotChainAndCache( return; } - // Updates next path snapshot's previous snapshot ID + String nextPathSnapshotKey = null; + if (hasNextPathSnapshot) { UUID nextPathSnapshotId = snapshotChainManager.nextPathSnapshot( snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager + nextPathSnapshotKey = snapshotChainManager .getTableKey(nextPathSnapshotId); - nextPathSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - if (nextPathSnapInfo != null) { - nextPathSnapInfo.setPathPreviousSnapshotId( - snapInfo.getPathPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } + + // Acquire lock from the snapshot + acquireLock(lockSet, nextPathSnapshotKey, metadataManager); } - // Updates next global snapshot's previous snapshot ID + String nextGlobalSnapshotKey = null; if (hasNextGlobalSnapshot) { - UUID nextGlobalSnapshotId = - snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager - .getTableKey(nextGlobalSnapshotId); - - SnapshotInfo nextGlobalSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals( - nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextGlobalSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); - } + UUID nextGlobalSnapshotId = snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); + nextGlobalSnapshotKey = snapshotChainManager.getTableKey(nextGlobalSnapshotId); + + // Acquire lock from the snapshot + acquireLock(lockSet, nextGlobalSnapshotKey, metadataManager); + } + + SnapshotInfo nextPathSnapInfo = + nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + + // Updates next path snapshot's previous snapshot ID + if (nextPathSnapInfo != null) { + nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } + + // Updates next global snapshot's previous snapshot ID + // If both next global and path snapshot are same, it may overwrite + // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check + // will prevent it. + if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && + nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { + nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } else if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId( + snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextGlobalSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java new file mode 100644 index 000000000000..9f1875f65d89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotRenameResponse; +import org.apache.hadoop.ozone.om.snapshot.RequireSnapshotFeatureState; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; +import org.apache.ratis.server.protocol.TermIndex; + +/** + * Changes snapshot name. + */ +public class OMSnapshotRenameRequest extends OMClientRequest { + + public OMSnapshotRenameRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + @RequireSnapshotFeatureState(true) + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + final OMRequest omRequest = super.preExecute(ozoneManager); + + final RenameSnapshotRequest renameSnapshotRequest = + omRequest.getRenameSnapshotRequest(); + + final String snapshotNewName = renameSnapshotRequest.getSnapshotNewName(); + + OmUtils.validateSnapshotName(snapshotNewName); + + String volumeName = renameSnapshotRequest.getVolumeName(); + String bucketName = renameSnapshotRequest.getBucketName(); + + // Permission check + UserGroupInformation ugi = createUGIForApi(); + String bucketOwner = ozoneManager.getBucketOwner(volumeName, bucketName, + IAccessAuthorizer.ACLType.READ, OzoneObj.ResourceType.BUCKET); + if (!ozoneManager.isAdmin(ugi) && + !ozoneManager.isOwner(ugi, bucketOwner)) { + throw new OMException( + "Only bucket owners and Ozone admins can rename snapshots", + OMException.ResultCodes.PERMISSION_DENIED); + } + + // Set rename time here so OM leader and follower would have the + // exact same timestamp. + OMRequest.Builder omRequestBuilder = omRequest.toBuilder() + .setRenameSnapshotRequest( + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotNewName(snapshotNewName) + .setSnapshotOldName(renameSnapshotRequest.getSnapshotOldName()) + .setRenameTime(Time.now())); + + return omRequestBuilder.build(); + } + + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + TermIndex termIndex) { + boolean acquiredBucketLock = false; + boolean acquiredSnapshotOldLock = false; + boolean acquiredSnapshotNewLock = false; + Exception exception = null; + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) + ozoneManager.getMetadataManager(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMClientResponse omClientResponse = null; + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + + UserInfo userInfo = getOmRequest().getUserInfo(); + + final RenameSnapshotRequest request = + getOmRequest().getRenameSnapshotRequest(); + + final String volumeName = request.getVolumeName(); + final String bucketName = request.getBucketName(); + final String snapshotNewName = request.getSnapshotNewName(); + final String snapshotOldName = request.getSnapshotOldName(); + + SnapshotInfo snapshotOldInfo = null; + + try { + // Acquire bucket lock + mergeOmLockDetails( + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName)); + acquiredBucketLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotOldName)); + acquiredSnapshotOldLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotNewName)); + acquiredSnapshotNewLock = getOmLockDetails().isLockAcquired(); + + // Retrieve SnapshotInfo from the table + String snapshotNewTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotNewName); + + if (omMetadataManager.getSnapshotInfoTable().isExist(snapshotNewTableKey)) { + throw new OMException("Snapshot with name " + snapshotNewName + "already exist", + FILE_ALREADY_EXISTS); + } + + // Retrieve SnapshotInfo from the table + String snapshotOldTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, + snapshotOldName); + snapshotOldInfo = + omMetadataManager.getSnapshotInfoTable().get(snapshotOldTableKey); + + if (snapshotOldInfo == null) { + // Snapshot does not exist + throw new OMException("Snapshot with name " + snapshotOldName + "does not exist", + FILE_NOT_FOUND); + } + + switch (snapshotOldInfo.getSnapshotStatus()) { + case SNAPSHOT_DELETED: + throw new OMException("Snapshot is already deleted. " + + "Pending reclamation.", FILE_NOT_FOUND); + case SNAPSHOT_ACTIVE: + break; + default: + // Unknown snapshot non-active state + throw new OMException("Snapshot exists but no longer in active state", + FILE_NOT_FOUND); + } + + snapshotOldInfo.setName(snapshotNewName); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotOldTableKey), + CacheValue.get(termIndex.getIndex())); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotNewTableKey), + CacheValue.get(termIndex.getIndex(), snapshotOldInfo)); + + omMetadataManager.getSnapshotChainManager().updateSnapshot(snapshotOldInfo); + + omResponse.setRenameSnapshotResponse( + OzoneManagerProtocolProtos.RenameSnapshotResponse.newBuilder() + .setSnapshotInfo(snapshotOldInfo.getProtobuf())); + omClientResponse = new OMSnapshotRenameResponse( + omResponse.build(), snapshotOldTableKey, snapshotNewTableKey, snapshotOldInfo); + + } catch (IOException | InvalidPathException ex) { + exception = ex; + omClientResponse = new OMSnapshotRenameResponse( + createErrorOMResponse(omResponse, exception)); + } finally { + if (acquiredSnapshotNewLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotNewName)); + } + if (acquiredSnapshotOldLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotOldName)); + } + if (acquiredBucketLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + if (snapshotOldInfo == null) { + // Dummy SnapshotInfo for logging and audit logging when erred + snapshotOldInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotOldName, null, Time.now()); + } + + // Perform audit logging outside the lock + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_SNAPSHOT, + snapshotOldInfo.toAuditMap(), exception, userInfo)); + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index b3dd5206c993..c4ca3dc99e3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -36,7 +37,8 @@ import java.io.IOException; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; /** * Updates the exclusive size of the snapshot. @@ -51,6 +53,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse = null; OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); @@ -62,16 +65,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .getSetSnapshotPropertyRequest(); SnapshotInfo updatedSnapInfo = null; + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + boolean acquiredSnapshotLock = false; + String volumeName = null; + String bucketName = null; + String snapshotName = null; + try { - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotKey); + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); + throw new OMException("Snapshot: '{" + snapshotKey + "}' doesn't not exist in snapshot table.", FILE_NOT_FOUND); + } + + volumeName = snapshotInfo.getVolumeName(); + bucketName = snapshotInfo.getBucketName(); + snapshotName = snapshotInfo.getName(); + + mergeOmLockDetails(metadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + + acquiredSnapshotLock = getOmLockDetails().isLockAcquired(); + updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); - if (updatedSnapInfo == null) { - LOG.error("SnapshotInfo for Snapshot: {} is not found", snapshotKey); - throw new OMException("SnapshotInfo for Snapshot: " + snapshotKey + - " is not found", INVALID_SNAPSHOT_ERROR); - } if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest @@ -101,9 +119,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn CacheValue.get(termIndex.getIndex(), updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); + omMetrics.incNumSnapshotSetProperties(); + LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest); } catch (IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotSetPropertyFails(); + LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest, ex); + } finally { + if (acquiredSnapshotLock) { + mergeOmLockDetails(metadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java index 8eeb7bf0e4aa..610949e0f8a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java @@ -71,7 +71,7 @@ public class ValidatorRegistry { Reflections reflections = new Reflections(new ConfigurationBuilder() .setUrls(searchUrls) .setScanners(new MethodAnnotationsScanner()) - .useParallelExecutor() + .setParallel(true) ); Set describedValidators = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index bb9562dff21a..848c5c308906 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -36,7 +35,6 @@ import org.apache.hadoop.ozone.om.request.key.OMDirectoriesPurgeRequestWithFSO; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; @@ -50,7 +48,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. @@ -86,13 +83,12 @@ public void addToDBBatch(OMMetadataManager metadataManager, ((OmMetadataManagerImpl) metadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcFromSnapshotInfo = omSnapshotManager.getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true)) { - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshotInfo.get(); + fromSnapshotInfo.getName())) { + OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index 4e9ee7563310..b16ba95d78f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -29,7 +28,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; @@ -41,7 +39,6 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** @@ -81,14 +78,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 3e390b0288ec..9fb843dcbe14 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -32,7 +32,6 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; @@ -58,7 +57,6 @@ public class S3MultipartUploadCompleteResponse extends OmKeyResponse { private List allKeyInfoToRemove; private OmBucketInfo omBucketInfo; - @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponse( @Nonnull OMResponse omResponse, @Nonnull String multipartKey, @@ -66,7 +64,7 @@ public S3MultipartUploadCompleteResponse( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo) { + OmBucketInfo omBucketInfo) { super(omResponse, bucketLayout); this.allKeyInfoToRemove = allKeyInfoToRemove; this.multipartKey = multipartKey; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 29edfe382533..8774627ee66b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -61,7 +60,7 @@ public S3MultipartUploadCompleteResponseWithFSO( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo, + OmBucketInfo omBucketInfo, @Nonnull long volumeId, @Nonnull long bucketId) { super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, bucketLayout, omBucketInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 1255e4ae7f41..3726faacfd70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -32,7 +31,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -42,7 +40,6 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -93,24 +90,22 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { - try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot( nextSnapshot.getVolumeName(), nextSnapshot.getBucketName(), - getSnapshotPrefix(nextSnapshot.getName()), - true)) { + nextSnapshot.getName())) { - OmSnapshot nextOmSnapshot = (OmSnapshot) rcOmNextSnapshot.get(); + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index b8db58d7fd9e..d300601b3858 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -80,12 +80,13 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; - updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); updateSnapInfo(metadataManager, batchOperation, updatedPreviousAndGlobalSnapInfos); + updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); for (String dbKey: snapshotDbKeys) { + // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager - .getSnapshotInfoTable().get(dbKey); + .getSnapshotInfoTable().getSkipCache(dbKey); // Even though snapshot existed when SnapshotDeletingService // was running. It might be deleted in the previous run and // the DB might not have been updated yet. So snapshotInfo @@ -96,8 +97,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); - omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, - dbKey); + omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java new file mode 100644 index 000000000000..05bb16a8f514 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.snapshot; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; + +import jakarta.annotation.Nonnull; +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * Response for OMSnapshotRenameRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotRenameResponse extends OMClientResponse { + + private String snapshotOldName; + private String snapshotNewName; + private SnapshotInfo renamedInfo; + + public OMSnapshotRenameResponse(OzoneManagerProtocolProtos.OMResponse omResponse, + String snapshotOldName, String snapshotNewName, + @Nonnull SnapshotInfo renamedInfo) { + super(omResponse); + this.snapshotOldName = snapshotOldName; + this.snapshotNewName = snapshotNewName; + this.renamedInfo = renamedInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotRenameResponse(@Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) + throws IOException { + omMetadataManager.getSnapshotInfoTable() + .putWithBatch(batchOperation, snapshotNewName, renamedInfo); + omMetadataManager.getSnapshotInfoTable() + .deleteWithBatch(batchOperation, snapshotOldName); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index 9643fa82969c..d7205b2c1bbf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -35,7 +34,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; @@ -238,7 +236,7 @@ private boolean previousSnapshotHasDir( OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - try (ReferenceCounted rcLatestSnapshot = + try (ReferenceCounted rcLatestSnapshot = metadataManager.getLatestActiveSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), @@ -249,11 +247,9 @@ private boolean previousSnapshotHasDir( .getRenameKey(deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), deletedDirInfo.getObjectID()); Table prevDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDirectoryTable(); + rcLatestSnapshot.get().getMetadataManager().getDirectoryTable(); Table prevDeletedDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedDirTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedDirTable(); OmKeyInfo prevDeletedDirInfo = prevDeletedDirTable.get(key); if (prevDeletedDirInfo != null) { return true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index e89608e82db2..83991668c9f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -46,7 +45,6 @@ import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -58,7 +56,6 @@ import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; @@ -264,13 +261,12 @@ private void processSnapshotDeepClean(int delCount) continue; } - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - true)) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + currSnapInfo.getName())) { + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedTable = currOmSnapshot.getMetadataManager().getDeletedTable(); @@ -304,18 +300,16 @@ private void processSnapshotDeepClean(int delCount) Table previousKeyTable = null; Table prevRenamedTable = null; - ReferenceCounted - rcPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; // Split RepeatedOmKeyInfo and update current snapshot // deletedKeyTable and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), true); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -324,15 +318,13 @@ private void processSnapshotDeepClean(int delCount) } Table previousToPrevKeyTable = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), true); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index cc275b4e8e6a..29b2b319532b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.KeyManagerImpl; @@ -52,7 +51,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; @@ -78,7 +76,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Background Service to clean-up deleted snapshot and reclaim space. @@ -143,10 +140,8 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = - null; - ReferenceCounted rcOmPreviousSnapshot = - null; + ReferenceCounted rcOmSnapshot = null; + ReferenceCounted rcOmPreviousSnapshot = null; Table snapshotInfoTable = ozoneManager.getMetadataManager().getSnapshotInfoTable(); @@ -169,12 +164,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Note: Can refactor this to use try-with-resources. // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.checkForSnapshot( + rcOmSnapshot = omSnapshotManager.getSnapshot( snapInfo.getVolumeName(), snapInfo.getBucketName(), - getSnapshotPrefix(snapInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + snapInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); Table snapshotDeletedTable = omSnapshot.getMetadataManager().getDeletedTable(); @@ -226,12 +220,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable // and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.checkForSnapshot( + rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), - true); - omPreviousSnapshot = (OmSnapshot) rcOmPreviousSnapshot.get(); + previousSnapshot.getName()); + omPreviousSnapshot = rcOmPreviousSnapshot.get(); previousKeyTable = omPreviousSnapshot .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 9a60f6303861..fe0f6e111ed3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -44,7 +43,6 @@ import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; @@ -63,7 +61,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; @@ -158,10 +155,8 @@ public BackgroundTaskResult call() { continue; } - ReferenceCounted - rcPrevOmSnapshot = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; try { long volumeId = metadataManager .getVolumeId(currSnapInfo.getVolumeName()); @@ -189,12 +184,11 @@ public BackgroundTaskResult call() { Table prevRenamedTable = null; if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), false); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -206,12 +200,11 @@ public BackgroundTaskResult call() { Table previousToPrevKeyTable = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), false); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() @@ -220,14 +213,13 @@ public BackgroundTaskResult call() { String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getActiveSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - false)) { + currSnapInfo.getName())) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedDirTable = currOmSnapshot.getMetadataManager().getDeletedDirTable(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java index 808a5ed4c192..0a9d47fc861c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java @@ -25,7 +25,7 @@ /** * Add reference counter to an object instance. */ -public class ReferenceCounted +public class ReferenceCounted implements AutoCloseable { /** @@ -51,10 +51,10 @@ public class ReferenceCounted /** * Parent instance whose callback will be triggered upon this RC closure. */ - private final U parentWithCallback; + private final Object parentWithCallback; public ReferenceCounted(T obj, boolean disableCounter, - U parentWithCallback) { + Object parentWithCallback) { // A param to allow disabling ref counting to reduce active DB // access penalties due to AtomicLong operations. this.obj = obj; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 226acbb7dd1b..f14837462b0a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -19,9 +19,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,10 +28,10 @@ import java.io.IOException; import java.util.Iterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; /** * Thread-safe custom unbounded LRU cache to manage open snapshot DB instances. @@ -42,29 +41,28 @@ public class SnapshotCache { static final Logger LOG = LoggerFactory.getLogger(SnapshotCache.class); // Snapshot cache internal hash map. - // Key: DB snapshot table key + // Key: SnapshotId // Value: OmSnapshot instance, each holds a DB instance handle inside // TODO: [SNAPSHOT] Consider wrapping SoftReference<> around IOmMetadataReader - private final ConcurrentHashMap> dbMap; + private final ConcurrentHashMap> dbMap; + + private final CacheLoader cacheLoader; - private final OmSnapshotManager omSnapshotManager; - private final CacheLoader cacheLoader; // Soft-limit of the total number of snapshot DB instances allowed to be // opened on the OM. private final int cacheSizeLimit; - public SnapshotCache( - OmSnapshotManager omSnapshotManager, - CacheLoader cacheLoader, - int cacheSizeLimit) { + private final OMMetrics omMetrics; + + public SnapshotCache(CacheLoader cacheLoader, int cacheSizeLimit, OMMetrics omMetrics) { this.dbMap = new ConcurrentHashMap<>(); - this.omSnapshotManager = omSnapshotManager; this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; + this.omMetrics = omMetrics; } @VisibleForTesting - ConcurrentHashMap> getDbMap() { + ConcurrentHashMap> getDbMap() { return dbMap; } @@ -77,18 +75,19 @@ public int size() { /** * Immediately invalidate an entry. - * @param key DB snapshot table key + * @param key SnapshotId */ - public void invalidate(String key) throws IOException { + public void invalidate(UUID key) throws IOException { dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.warn("Key: '{}' does not exist in cache.", k); + LOG.warn("SnapshotId: '{}' does not exist in snapshot cache.", k); } else { try { - ((OmSnapshot) v.get()).close(); + v.get().close(); } catch (IOException e) { - throw new IllegalStateException("Failed to close snapshot: " + key, e); + throw new IllegalStateException("Failed to close snapshotId: " + key, e); } + omMetrics.decNumSnapshotCacheSize(); } return null; }); @@ -98,12 +97,11 @@ public void invalidate(String key) throws IOException { * Immediately invalidate all entries and close their DB instances in cache. */ public void invalidateAll() { - Iterator>> - it = dbMap.entrySet().iterator(); + Iterator>> it = dbMap.entrySet().iterator(); while (it.hasNext()) { - Map.Entry> entry = it.next(); - OmSnapshot omSnapshot = (OmSnapshot) entry.getValue().get(); + Map.Entry> entry = it.next(); + OmSnapshot omSnapshot = entry.getValue().get(); try { // TODO: If wrapped with SoftReference<>, omSnapshot could be null? omSnapshot.close(); @@ -111,6 +109,7 @@ public void invalidateAll() { throw new IllegalStateException("Failed to close snapshot", e); } it.remove(); + omMetrics.decNumSnapshotCacheSize(); } } @@ -120,31 +119,31 @@ public void invalidateAll() { */ public enum Reason { FS_API_READ, - SNAPDIFF_READ, + SNAP_DIFF_READ, DEEP_CLEAN_WRITE, GARBAGE_COLLECTION_WRITE } - public ReferenceCounted get(String key) throws IOException { - return get(key, false); - } - /** * Get or load OmSnapshot. Shall be close()d after use. * TODO: [SNAPSHOT] Can add reason enum to param list later. - * @param key snapshot table key + * @param key SnapshotId * @return an OmSnapshot instance, or null on error */ - public ReferenceCounted get(String key, boolean skipActiveCheck) - throws IOException { + public ReferenceCounted get(UUID key) throws IOException { + // Warn if actual cache size exceeds the soft limit already. + if (size() > cacheSizeLimit) { + LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", + size(), cacheSizeLimit); + } // Atomic operation to initialize the OmSnapshot instance (once) if the key // does not exist, and increment the reference count on the instance. - ReferenceCounted rcOmSnapshot = + ReferenceCounted rcOmSnapshot = dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.info("Loading snapshot. Table key: {}", k); + LOG.info("Loading SnapshotId: '{}'", k); try { - v = new ReferenceCounted<>(cacheLoader.load(k), false, this); + v = new ReferenceCounted<>(cacheLoader.load(key), false, this); } catch (OMException omEx) { // Return null if the snapshot is no longer active if (!omEx.getResult().equals(FILE_NOT_FOUND)) { @@ -157,6 +156,7 @@ public ReferenceCounted get(String key, boolea // Unexpected and unknown exception thrown from CacheLoader#load throw new IllegalStateException(ex); } + omMetrics.incNumSnapshotCacheSize(); } if (v != null) { // When RC OmSnapshot is successfully loaded @@ -164,26 +164,13 @@ public ReferenceCounted get(String key, boolea } return v; }); - if (rcOmSnapshot == null) { // The only exception that would fall through the loader logic above // is OMException with FILE_NOT_FOUND. - throw new OMException("Snapshot table key '" + key + "' not found, " - + "or the snapshot is no longer active", + throw new OMException("SnapshotId: '" + key + "' not found, or the snapshot is no longer active.", OMException.ResultCodes.FILE_NOT_FOUND); } - // If the snapshot is already loaded in cache, the check inside the loader - // above is ignored. But we would still want to reject all get()s except - // when called from SDT (and some) if the snapshot is not active anymore. - if (!skipActiveCheck && !omSnapshotManager.isSnapshotStatus(key, SNAPSHOT_ACTIVE)) { - // Ref count was incremented. Need to decrement on exception here. - rcOmSnapshot.decrementRefCount(); - throw new OMException("Unable to load snapshot. " + - "Snapshot with table key '" + key + "' is no longer active", - FILE_NOT_FOUND); - } - // Check if any entries can be cleaned up. // At this point, cache size might temporarily exceed cacheSizeLimit // even if there are entries that can be evicted, which is fine since it @@ -195,12 +182,12 @@ public ReferenceCounted get(String key, boolea /** * Release the reference count on the OmSnapshot instance. - * @param key snapshot table key + * @param key SnapshotId */ - public void release(String key) { + public void release(UUID key) { dbMap.compute(key, (k, v) -> { if (v == null) { - throw new IllegalArgumentException("Key '" + key + "' does not exist in cache."); + throw new IllegalArgumentException("SnapshotId '" + key + "' does not exist in cache."); } else { v.decrementRefCount(); } @@ -212,15 +199,6 @@ public void release(String key) { cleanup(); } - /** - * Alternatively, can release with OmSnapshot instance directly. - * @param omSnapshot OmSnapshot - */ - public void release(OmSnapshot omSnapshot) { - final String snapshotTableKey = omSnapshot.getSnapshotTableKey(); - release(snapshotTableKey); - } - /** * Wrapper for cleanupInternal() that is synchronized to prevent multiple * threads from interleaving into the cleanup method. @@ -237,25 +215,25 @@ private synchronized void cleanup() { * TODO: [SNAPSHOT] Add new ozone debug CLI command to trigger this directly. */ private void cleanupInternal() { - for (Map.Entry> entry : dbMap.entrySet()) { + for (Map.Entry> entry : dbMap.entrySet()) { dbMap.compute(entry.getKey(), (k, v) -> { if (v == null) { - throw new IllegalStateException("Key '" + k + "' does not exist in cache. The RocksDB " + + throw new IllegalStateException("SnapshotId '" + k + "' does not exist in cache. The RocksDB " + "instance of the Snapshot may not be closed properly."); } if (v.getTotalRefCount() > 0) { - LOG.debug("Snapshot {} is still being referenced ({}), skipping its clean up", - k, v.getTotalRefCount()); + LOG.debug("SnapshotId {} is still being referenced ({}), skipping its clean up.", k, v.getTotalRefCount()); return v; } else { - LOG.debug("Closing Snapshot {}. It is not being referenced anymore.", k); + LOG.debug("Closing SnapshotId {}. It is not being referenced anymore.", k); // Close the instance, which also closes its DB handle. try { - ((OmSnapshot) v.get()).close(); + v.get().close(); } catch (IOException ex) { - throw new IllegalStateException("Error while closing snapshot DB", ex); + throw new IllegalStateException("Error while closing snapshot DB.", ex); } + omMetrics.decNumSnapshotCacheSize(); return null; } }); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 41e990097ecd..a200a36cb25d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -25,19 +25,16 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -89,7 +86,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; @@ -112,9 +108,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; @@ -150,7 +147,6 @@ public class SnapshotDiffManager implements AutoCloseable { private final ManagedRocksDB db; private final RocksDBCheckpointDiffer differ; private final OzoneManager ozoneManager; - private final SnapshotCache snapshotCache; private final CodecRegistry codecRegistry; private final ManagedColumnFamilyOptions familyOptions; // TODO: [SNAPSHOT] Use different wait time based of job status. @@ -185,9 +181,7 @@ public class SnapshotDiffManager implements AutoCloseable { private final boolean diffDisableNativeLibs; - private final Optional sstDumpTool; - - private Optional sstDumpToolExecService; + private final boolean isNativeLibsLoaded; private final BiFunction generateSnapDiffJobKey = @@ -199,7 +193,6 @@ public class SnapshotDiffManager implements AutoCloseable { public SnapshotDiffManager(ManagedRocksDB db, RocksDBCheckpointDiffer differ, OzoneManager ozoneManager, - SnapshotCache snapshotCache, ColumnFamilyHandle snapDiffJobCfh, ColumnFamilyHandle snapDiffReportCfh, ManagedColumnFamilyOptions familyOptions, @@ -207,7 +200,6 @@ public SnapshotDiffManager(ManagedRocksDB db, this.db = db; this.differ = differ; this.ozoneManager = ozoneManager; - this.snapshotCache = snapshotCache; this.familyOptions = familyOptions; this.codecRegistry = codecRegistry; this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration( @@ -264,7 +256,7 @@ public SnapshotDiffManager(ManagedRocksDB db, createEmptySnapDiffDir(path); this.sstBackupDirForSnapDiffJobs = path.toString(); - this.sstDumpTool = initSSTDumpTool(ozoneManager.getConfiguration()); + this.isNativeLibsLoaded = initNativeLibraryForEfficientDiff(ozoneManager.getConfiguration()); // Ideally, loadJobsOnStartUp should run only on OM node, since SnapDiff // is not HA currently and running this on all the nodes would be @@ -287,35 +279,16 @@ public PersistentMap getSnapDiffJobTable() { return snapDiffJobTable; } - private Optional initSSTDumpTool( - final OzoneConfiguration conf) { - if (conf.getBoolean(OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { + private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) { + if (conf.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { try { - int threadPoolSize = conf.getInt( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - int bufferSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES); - this.sstDumpToolExecService = Optional.of(new ThreadPoolExecutor(0, - threadPoolSize, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat(ozoneManager.getThreadNamePrefix() + - "snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), - new ThreadPoolExecutor.DiscardPolicy())); - return Optional.of(new ManagedSSTDumpTool(sstDumpToolExecService.get(), - bufferSize)); + return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); + LOG.error("Native Library for raw sst file reading loading failed.", e); + return false; } } - return Optional.empty(); + return false; } /** @@ -832,8 +805,8 @@ void generateSnapshotDiffReport(final String jobKey, // job by RocksDBCheckpointDiffer#pruneOlderSnapshotsWithCompactionHistory. Path path = Paths.get(sstBackupDirForSnapDiffJobs + "/" + jobId); - ReferenceCounted rcFromSnapshot = null; - ReferenceCounted rcToSnapshot = null; + ReferenceCounted rcFromSnapshot = null; + ReferenceCounted rcToSnapshot = null; try { if (!areDiffJobAndSnapshotsActive(volumeName, bucketName, @@ -841,14 +814,15 @@ void generateSnapshotDiffReport(final String jobKey, return; } - String fsKey = getTableKey(volumeName, bucketName, fromSnapshotName); - String tsKey = getTableKey(volumeName, bucketName, toSnapshotName); - - rcFromSnapshot = snapshotCache.get(fsKey); - rcToSnapshot = snapshotCache.get(tsKey); + rcFromSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, fromSnapshotName); + rcToSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, toSnapshotName); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fsInfo = getSnapshotInfo(ozoneManager, volumeName, bucketName, fromSnapshotName); SnapshotInfo tsInfo = getSnapshotInfo(ozoneManager, @@ -1056,12 +1030,12 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // Workaround to handle deletes if native rocksDb tool for reading // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone - if (skipNativeDiff || !sstDumpTool.isPresent()) { + if (skipNativeDiff || !isNativeLibsLoaded) { deltaFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp)); } addToObjectIdMap(fsTable, tsTable, deltaFiles, - !skipNativeDiff && sstDumpTool.isPresent(), + !skipNativeDiff && isNativeLibsLoaded, oldObjIdToKeyMap, newObjIdToKeyMap, objectIdToIsDirMap, oldParentIds, newParentIds, tablePrefixes); } @@ -1092,12 +1066,9 @@ void addToObjectIdMap(Table fsTable, upperBoundCharArray[upperBoundCharArray.length - 1] += 1; sstFileReaderUpperBound = String.valueOf(upperBoundCharArray); } - try (Stream keysToCheck = - nativeRocksToolsLoaded && sstDumpTool.isPresent() - ? sstFileReader.getKeyStreamWithTombstone(sstDumpTool.get(), - sstFileReaderLowerBound, sstFileReaderUpperBound) - : sstFileReader.getKeyStream(sstFileReaderLowerBound, - sstFileReaderUpperBound)) { + try (Stream keysToCheck = nativeRocksToolsLoaded ? + sstFileReader.getKeyStreamWithTombstone(sstFileReaderLowerBound, sstFileReaderUpperBound) + : sstFileReader.getKeyStream(sstFileReaderLowerBound, sstFileReaderUpperBound)) { keysToCheck.forEach(key -> { try { final WithParentObjectId fromObjectId = fsTable.get(key); @@ -1678,8 +1649,6 @@ public void close() { if (snapDiffExecutor != null) { closeExecutorService(snapDiffExecutor, "SnapDiffExecutor"); } - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); } private void closeExecutorService(ExecutorService executorService, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 89823995d0cd..2041fa791a76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -40,6 +40,7 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; @@ -148,6 +149,10 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + } + try { while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java index d08a0009e36e..f1e9c819e709 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java @@ -96,7 +96,7 @@ protected void registerUpgradeActions(String packageName) { .forPackages(packageName) .setScanners(new TypeAnnotationsScanner(), new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> typesAnnotatedWith = reflections.getTypesAnnotatedWith(UpgradeActionOm.class); typesAnnotatedWith.forEach(actionClass -> { @@ -132,7 +132,7 @@ public static Set> getRequestClasses( .setUrls(ClasspathHelper.forPackage(packageName)) .setScanners(new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> validRequests = new HashSet<>(); Set> subTypes = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index cf9bb4f0bbce..03729aebb509 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -57,6 +57,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.ozone.security.S3SecurityUtil; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,6 +87,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerP // always true, only used in tests private boolean shouldFlushCache = true; + private OMRequest lastRequestToSubmit; + + /** * Constructs an instance of the server handler. * @@ -109,8 +113,9 @@ public OzoneManagerProtocolServerSideTranslatorPB( : OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(ozoneManager.getMetadataManager()) .enableTracing(TracingUtil.isTracingEnabled(ozoneManager.getConfiguration())) - .build(); - this.handler = new OzoneManagerRequestHandler(impl, ozoneManagerDoubleBuffer); + .build() + .start(); + this.handler = new OzoneManagerRequestHandler(impl); this.omRatisServer = ratisServer; dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol", metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug); @@ -210,6 +215,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws assert (omClientRequest != null); OMClientRequest finalOmClientRequest = omClientRequest; requestToSubmit = preExecute(finalOmClientRequest); + this.lastRequestToSubmit = requestToSubmit; } catch (IOException ex) { if (omClientRequest != null) { omClientRequest.handleRequestFailure(ozoneManager); @@ -233,6 +239,11 @@ private OMRequest preExecute(OMClientRequest finalOmClientRequest) () -> finalOmClientRequest.preExecute(ozoneManager)); } + @VisibleForTesting + public OMRequest getLastRequestToSubmit() { + return lastRequestToSubmit; + } + /** * Submits request to OM's Ratis server. */ @@ -278,7 +289,7 @@ private ServiceException createLeaderNotReadyException() { * Submits request directly to OM. */ private OMResponse submitRequestDirectlyToOM(OMRequest request) { - OMClientResponse omClientResponse; + final OMClientResponse omClientResponse; try { if (OmUtils.isReadOnly(request)) { return handler.handleReadRequest(request); @@ -286,8 +297,8 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { OMClientRequest omClientRequest = createClientRequest(request, ozoneManager); request = omClientRequest.preExecute(ozoneManager); - long index = transactionIndex.incrementAndGet(); - omClientResponse = handler.handleWriteRequest(request, TransactionInfo.getTermIndex(index)); + final TermIndex termIndex = TransactionInfo.getTermIndex(transactionIndex.incrementAndGet()); + omClientResponse = handler.handleWriteRequest(request, termIndex, ozoneManagerDoubleBuffer); } } catch (IOException ex) { // As some preExecute returns error. So handle here. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index b97e2160f956..5339c7400d30 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -69,7 +70,6 @@ import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -163,7 +163,6 @@ import static org.apache.hadoop.util.MetricUtil.captureLatencyNs; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; -import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.ProtobufUtils; import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; @@ -177,13 +176,10 @@ public class OzoneManagerRequestHandler implements RequestHandler { static final Logger LOG = LoggerFactory.getLogger(OzoneManagerRequestHandler.class); private final OzoneManager impl; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private FaultInjector injector; - public OzoneManagerRequestHandler(OzoneManager om, - OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) { + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; - this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer; } //TODO simplify it to make it shorter @@ -401,27 +397,14 @@ public OMResponse handleReadRequest(OMRequest request) { } @Override - public OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException { + public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException { injectPause(); OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); return captureLatencyNs( impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), - () -> { - OMClientResponse omClientResponse = - omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex); - Preconditions.checkNotNull(omClientResponse, - "omClientResponse returned by validateAndUpdateCache cannot be null"); - if (omRequest.getCmdType() != Type.Prepare) { - ozoneManagerDoubleBuffer.add(omClientResponse, termIndex); - } - return omClientResponse; - }); - } - - @Override - public void updateDoubleBuffer(OzoneManagerDoubleBuffer omDoubleBuffer) { - this.ozoneManagerDoubleBuffer = omDoubleBuffer; + () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex), + "omClientResponse returned by validateAndUpdateCache cannot be null")); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 17e9f0a7d656..e60362a1ebb3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -20,10 +20,9 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; @@ -50,22 +49,30 @@ public interface RequestHandler { void validateRequest(OMRequest omRequest) throws OMException; /** - * Handle write requests. In HA this will be called from - * OzoneManagerStateMachine applyTransaction method. In non-HA this will be - * called from {@link OzoneManagerProtocolServerSideTranslatorPB} for write - * requests. + * Handle write requests. + * In HA this will be called from OzoneManagerStateMachine applyTransaction method. + * In non-HA this will be called from {@link OzoneManagerProtocolServerSideTranslatorPB}. * - * @param omRequest - * @param termIndex - ratis transaction log (term, index) + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @param ozoneManagerDoubleBuffer for adding response * @return OMClientResponse */ - OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException; + default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex, + OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) throws IOException { + final OMClientResponse response = handleWriteRequestImpl(omRequest, termIndex); + if (omRequest.getCmdType() != Type.Prepare) { + ozoneManagerDoubleBuffer.add(response, termIndex); + } + return response; + } /** - * Update the OzoneManagerDoubleBuffer. This will be called when - * stateMachine is unpaused and set with new doublebuffer object. - * @param ozoneManagerDoubleBuffer + * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @return OMClientResponse */ - void updateDoubleBuffer(OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer); - + OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index 43d29c1608a8..edffd5ed74eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -105,12 +106,16 @@ public OmTestManagers(OzoneConfiguration conf, keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "keyManager"); ScmClient scmClient = new ScmClient(scmBlockClient, containerClient, conf); + ScmTopologyClient scmTopologyClient = + new ScmTopologyClient(scmBlockClient); HddsWhiteboxTestUtils.setInternalState(om, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "secretManager", mock(OzoneBlockTokenSecretManager.class)); + HddsWhiteboxTestUtils.setInternalState(om, + "scmTopologyClient", scmTopologyClient); om.start(); waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java index 8847a2d51e3f..8ba5ca779c1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java @@ -31,6 +31,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -200,6 +203,14 @@ public List sortDatanodes(List nodes, return null; } + @Override + public InnerNode getNetworkTopology() { + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + InnerNode clusterTree = factory.newInnerNode("", "", null, + NetConstants.ROOT_LEVEL, 1); + return clusterTree; + } + /** * Return the number of blocks puesdo deleted by this testing client. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java index b78864e30105..f600158007b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLockImpl; import org.apache.ozone.test.GenericTestUtils; @@ -103,17 +103,14 @@ public void testStampedLockBehavior() throws InterruptedException { public void testLockInOneThreadUnlockInAnother() { final AuthorizerLock authorizerLock = new AuthorizerLockImpl(); - - try { + assertDoesNotThrow(() -> { authorizerLock.tryWriteLockInOMRequest(); // Spawn another thread to release the lock. // Works as long as they share the same AuthorizerLockImpl instance. final Thread thread1 = new Thread(authorizerLock::unlockWriteInOMRequest); thread1.start(); - } catch (IOException e) { - fail("Should not have thrown: " + e.getMessage()); - } + }); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java new file mode 100644 index 000000000000..1be85d204903 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketUtilizationMetrics.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.ozone.om.BucketUtilizationMetrics.BucketMetricsInfo; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.junit.jupiter.api.Test; + +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Test class for BucketUtilizationMetrics. + */ +public class TestBucketUtilizationMetrics { + + private static final String VOLUME_NAME_1 = "volume1"; + private static final String VOLUME_NAME_2 = "volume2"; + private static final String BUCKET_NAME_1 = "bucket1"; + private static final String BUCKET_NAME_2 = "bucket2"; + private static final long USED_BYTES_1 = 100; + private static final long USED_BYTES_2 = 200; + private static final long QUOTA_IN_BYTES_1 = 200; + private static final long QUOTA_IN_BYTES_2 = QUOTA_RESET; + private static final long QUOTA_IN_NAMESPACE_1 = 1; + private static final long QUOTA_IN_NAMESPACE_2 = 2; + + @Test + void testBucketUtilizationMetrics() { + OMMetadataManager omMetadataManager = mock(OMMetadataManager.class); + + Map.Entry, CacheValue> entry1 = createMockEntry(VOLUME_NAME_1, BUCKET_NAME_1, + USED_BYTES_1, QUOTA_IN_BYTES_1, QUOTA_IN_NAMESPACE_1); + Map.Entry, CacheValue> entry2 = createMockEntry(VOLUME_NAME_2, BUCKET_NAME_2, + USED_BYTES_2, QUOTA_IN_BYTES_2, QUOTA_IN_NAMESPACE_2); + + Iterator, CacheValue>> bucketIterator = mock(Iterator.class); + when(bucketIterator.hasNext()) + .thenReturn(true) + .thenReturn(true) + .thenReturn(false); + + when(bucketIterator.next()) + .thenReturn(entry1) + .thenReturn(entry2); + + when(omMetadataManager.getBucketIterator()).thenReturn(bucketIterator); + + MetricsRecordBuilder mb = mock(MetricsRecordBuilder.class); + when(mb.setContext(anyString())).thenReturn(mb); + when(mb.tag(any(MetricsInfo.class), anyString())).thenReturn(mb); + when(mb.addGauge(any(MetricsInfo.class), anyInt())).thenReturn(mb); + when(mb.addGauge(any(MetricsInfo.class), anyLong())).thenReturn(mb); + + MetricsCollector metricsCollector = mock(MetricsCollector.class); + when(metricsCollector.addRecord(anyString())).thenReturn(mb); + + BucketUtilizationMetrics containerMetrics = new BucketUtilizationMetrics(omMetadataManager); + + containerMetrics.getMetrics(metricsCollector, true); + + verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_1); + verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, USED_BYTES_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, QUOTA_IN_BYTES_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, QUOTA_IN_NAMESPACE_1); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes, + QUOTA_IN_BYTES_1 - USED_BYTES_1); + + verify(mb, times(1)).tag(BucketMetricsInfo.VolumeName, VOLUME_NAME_2); + verify(mb, times(1)).tag(BucketMetricsInfo.BucketName, BUCKET_NAME_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketUsedBytes, USED_BYTES_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaBytes, QUOTA_IN_BYTES_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketQuotaNamespace, QUOTA_IN_NAMESPACE_2); + verify(mb, times(1)).addGauge(BucketMetricsInfo.BucketAvailableBytes, QUOTA_RESET); + } + + private static Map.Entry, CacheValue> createMockEntry(String volumeName, + String bucketName, long usedBytes, long quotaInBytes, long quotaInNamespace) { + Map.Entry, CacheValue> entry = mock(Map.Entry.class); + CacheValue cacheValue = mock(CacheValue.class); + OmBucketInfo bucketInfo = mock(OmBucketInfo.class); + + when(bucketInfo.getVolumeName()).thenReturn(volumeName); + when(bucketInfo.getBucketName()).thenReturn(bucketName); + when(bucketInfo.getUsedBytes()).thenReturn(usedBytes); + when(bucketInfo.getQuotaInBytes()).thenReturn(quotaInBytes); + when(bucketInfo.getQuotaInNamespace()).thenReturn(quotaInNamespace); + + when(cacheValue.getCacheValue()).thenReturn(bucketInfo); + + when(entry.getValue()).thenReturn(cacheValue); + + return entry; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java index df7f5b67b4e9..33a33ad807d4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestChunkStreams.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.ozone.client.io.KeyInputStream; import jakarta.annotation.Nonnull; @@ -34,6 +36,8 @@ */ public class TestChunkStreams { + private OzoneConfiguration conf = new OzoneConfiguration(); + @Test public void testReadGroupInputStream() throws Exception { String dataString = RandomStringUtils.randomAscii(500); @@ -90,7 +94,10 @@ private List createInputStreams(String dataString) { } private BlockInputStream createStream(byte[] buf, int offset) { - return new BlockInputStream(null, 100L, null, null, true, null) { + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumVerify(true); + return new BlockInputStream(null, 100L, null, null, null, + clientConfig) { private long pos; private final ByteArrayInputStream in = new ByteArrayInputStream(buf, offset, 100); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 6454a77d66f3..5e2e27e0c1f4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -23,12 +23,10 @@ import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; -import java.util.HashMap; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; @@ -44,6 +42,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -65,6 +66,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -78,14 +80,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static java.util.Comparator.comparing; -import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -123,6 +120,9 @@ void setup(@TempDir Path testDir) throws Exception { configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); containerClient = mock(StorageContainerLocationProtocol.class); blockClient = mock(ScmBlockLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(blockClient.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(configuration, blockClient, containerClient); @@ -161,6 +161,60 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { omMultipartUploadListParts.getPartInfoList().size()); } + @Test + public void listMultipartUploadPartsWithoutEtagField() throws IOException { + // For backward compatibility reasons + final String volume = volumeName(); + final String bucket = "bucketForEtag"; + final String key = "dir/key1"; + createBucket(metadataManager, volume, bucket); + OmMultipartInfo omMultipartInfo = + initMultipartUpload(writeClient, volume, bucket, key); + + + // Commit some MPU parts without eTag field + for (int i = 1; i <= 5; i++) { + OmKeyArgs partKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .build(); + + OpenKeySession openKey = writeClient.openKey(partKeyArgs); + + OmKeyArgs commitPartKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setLocationInfoList(Collections.emptyList()) + .build(); + + writeClient.commitMultipartUploadPart(commitPartKeyArgs, openKey.getId()); + } + + + OmMultipartUploadListParts omMultipartUploadListParts = keyManager + .listParts(volume, bucket, key, omMultipartInfo.getUploadID(), + 0, 10); + assertEquals(5, + omMultipartUploadListParts.getPartInfoList().size()); + + } + private String volumeName() { return getTestName(); } @@ -589,9 +643,6 @@ public void listStatus() throws Exception { OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager); final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); Set containerIDs = new HashSet<>(); List containersWithPipeline = new ArrayList<>(); @@ -641,7 +692,6 @@ public void listStatus() throws Exception { assertEquals(10, fileStatusList.size()); verify(containerClient).getContainerWithPipelineBatch(containerIDs); - verify(blockClient).sortDatanodes(nodes, client); // call list status the second time, and verify no more calls to // SCM. @@ -649,67 +699,4 @@ public void listStatus() throws Exception { null, Long.MAX_VALUE, client); verify(containerClient, times(1)).getContainerWithPipelineBatch(anySet()); } - - @ParameterizedTest - @ValueSource(strings = {"anyhost", ""}) - public void sortDatanodes(String client) throws Exception { - // GIVEN - int pipelineCount = 3; - int keysPerPipeline = 5; - OmKeyInfo[] keyInfos = new OmKeyInfo[pipelineCount * keysPerPipeline]; - List> expectedSortDatanodesInvocations = new ArrayList<>(); - Map> expectedSortedNodes = new HashMap<>(); - int ki = 0; - for (int p = 0; p < pipelineCount; p++) { - final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); - expectedSortDatanodesInvocations.add(nodes); - final List sortedNodes = pipeline.getNodes().stream() - .sorted(comparing(DatanodeDetails::getUuidString)) - .collect(toList()); - expectedSortedNodes.put(pipeline, sortedNodes); - - when(blockClient.sortDatanodes(nodes, client)) - .thenReturn(sortedNodes); - - for (int i = 1; i <= keysPerPipeline; i++) { - OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(i, 1L)) - .setPipeline(pipeline) - .setOffset(0) - .setLength(256000) - .build(); - - OmKeyInfo keyInfo = new OmKeyInfo.Builder() - .setOmKeyLocationInfos(Arrays.asList( - new OmKeyLocationInfoGroup(0, emptyList()), - new OmKeyLocationInfoGroup(1, singletonList(keyLocationInfo)))) - .build(); - keyInfos[ki++] = keyInfo; - } - } - - // WHEN - keyManager.sortDatanodes(client, keyInfos); - - // THEN - // verify all key info locations got updated - for (OmKeyInfo keyInfo : keyInfos) { - OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations(); - assertNotNull(locations); - for (OmKeyLocationInfo locationInfo : locations.getLocationList()) { - Pipeline pipeline = locationInfo.getPipeline(); - List expectedOrder = expectedSortedNodes.get(pipeline); - assertEquals(expectedOrder, pipeline.getNodesInOrder()); - } - } - - // expect one invocation per pipeline - for (List nodes : expectedSortDatanodesInvocations) { - verify(blockClient).sortDatanodes(nodes, client); - } - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index 0079585a85b6..a4ced424522b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -156,7 +156,7 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { // Check that Multi-Tenancy read requests are blocked when not enabled final OzoneManagerRequestHandler ozoneManagerRequestHandler = - new OzoneManagerRequestHandler(ozoneManager, null); + new OzoneManagerRequestHandler(ozoneManager); expectReadRequestToFail(ozoneManagerRequestHandler, OMRequestTestUtils.listUsersInTenantRequest(tenantId)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 7d66ba66578b..c4913879ae90 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -27,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -65,6 +67,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; @@ -589,9 +592,9 @@ public void testListOpenFiles(BucketLayout bucketLayout) throws Exception { int numOpenKeys = 3; List openKeys = new ArrayList<>(); for (int i = 0; i < numOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyPrefix + i, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, Time.now()); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyPrefix + i, + RatisReplicationConfig.getInstance(ONE)) + .build(); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -707,9 +710,10 @@ public void testGetExpiredOpenKeys(BucketLayout bucketLayout) for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) { final long creationTime = i < numExpiredOpenKeys ? expiredOpenKeyCreationTime : Time.now(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -779,10 +783,11 @@ public void testGetExpiredOpenKeysExcludeMPUKeys( // Ensure that "expired" MPU-related open keys are not fetched. // MPU-related open keys, identified by isMultipartKey = false for (int i = 0; i < numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); + assertThat(keyInfo.getModificationTime()).isPositive(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -812,10 +817,10 @@ public void testGetExpiredOpenKeysExcludeMPUKeys( // HDDS-9017. Although these open keys are MPU-related, // the isMultipartKey flags are set to false for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, false); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -879,8 +884,9 @@ public void testGetExpiredMPUs() throws Exception { String keyName = "expired" + i; // Key info to construct the MPU DB key final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); for (int j = 1; j <= numPartsPerMPU; j++) { @@ -952,11 +958,10 @@ private void addKeysToOM(String volumeName, String bucketName, if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } else { OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index e1ae8f57d15e..c865cb7814de 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -66,7 +66,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -167,18 +166,25 @@ public void testCloseOnEviction() throws IOException { SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); SnapshotInfo second = createSnapshotInfo(volumeName, bucketName); + first.setGlobalPreviousSnapshotId(null); + first.setPathPreviousSnapshotId(null); + second.setGlobalPreviousSnapshotId(first.getSnapshotId()); + second.setPathPreviousSnapshotId(first.getSnapshotId()); + when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); when(snapshotInfoTable.get(second.getTableKey())).thenReturn(second); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(first); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(second); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first); // retrieve it and setup store mock OmSnapshotManager omSnapshotManager = om.getOmSnapshotManager(); - OmSnapshot firstSnapshot = (OmSnapshot) omSnapshotManager - .checkForSnapshot(first.getVolumeName(), - first.getBucketName(), getSnapshotPrefix(first.getName()), false).get(); + OmSnapshot firstSnapshot = omSnapshotManager + .getActiveSnapshot(first.getVolumeName(), first.getBucketName(), first.getName()) + .get(); DBStore firstSnapshotStore = mock(DBStore.class); HddsWhiteboxTestUtils.setInternalState( firstSnapshot.getMetadataManager(), "store", firstSnapshotStore); @@ -192,13 +198,12 @@ public void testCloseOnEviction() throws IOException { // read in second snapshot to evict first omSnapshotManager - .checkForSnapshot(second.getVolumeName(), - second.getBucketName(), getSnapshotPrefix(second.getName()), false); + .getActiveSnapshot(second.getVolumeName(), second.getBucketName(), second.getName()); // As a workaround, invalidate all cache entries in order to trigger // instances close in this test case, since JVM GC most likely would not // have triggered and closed the instances yet at this point. - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // confirm store was closed verify(firstSnapshotStore, timeout(3000).times(1)).close(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 1890958cbaad..125c9efcaf2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.S3SecretManagerImpl; import org.apache.hadoop.ozone.om.S3SecretCache; import org.apache.hadoop.ozone.om.S3SecretLockedManager; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; @@ -137,7 +136,8 @@ public void setup() throws IOException { .setMaxUnFlushedTransactionCount(1000) .enableRatis(true) .setFlushNotifier(spyFlushNotifier) - .build(); + .build() + .start(); doNothing().when(omKeyCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index dd8e642721e6..22272182997e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -37,7 +37,6 @@ .CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -79,7 +78,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(10000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index a97b24289cd7..54b04260d556 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -111,7 +112,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(100000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach @@ -416,13 +418,9 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) } private boolean assertRowCount(int expected, Table table) { - long count = 0L; - try { - count = omMetadataManager.countRowsInTable(table); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expected; + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> count.set(omMetadataManager.countRowsInTable(table))); + return count.get() == expected; } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index 93997826bf33..3cd7b10910b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -37,7 +37,6 @@ import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.exceptions.StateMachineException; import org.apache.ratis.server.protocol.TermIndex; -import org.apache.ratis.server.raftlog.RaftLog; import org.apache.ratis.statemachine.TransactionContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -101,12 +100,12 @@ static void assertTermIndex(long expectedTerm, long expectedIndex, TermIndex com @Test public void testLastAppliedIndex() { ozoneManagerStateMachine.notifyTermIndexUpdated(0, 0); - assertTermIndex(0, RaftLog.INVALID_LOG_INDEX, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 0, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(0, 0, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // Conf/metadata transaction. ozoneManagerStateMachine.notifyTermIndexUpdated(0, 1); - assertTermIndex(0, RaftLog.INVALID_LOG_INDEX, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(0, 1, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(0, 1, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // call update last applied index @@ -119,7 +118,7 @@ public void testLastAppliedIndex() { // Conf/metadata transaction. ozoneManagerStateMachine.notifyTermIndexUpdated(1L, 4L); - assertTermIndex(0, 3, ozoneManagerStateMachine.getLastAppliedTermIndex()); + assertTermIndex(1, 4, ozoneManagerStateMachine.getLastAppliedTermIndex()); assertTermIndex(1, 4, ozoneManagerStateMachine.getLastNotifiedTermIndex()); // Add some apply transactions. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 163aefc7d3a2..86263683682a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,19 @@ package org.apache.hadoop.ozone.om.request; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.UUID; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -157,23 +164,22 @@ public static void addVolumeAndBucketToDB( @SuppressWarnings("parameterNumber") public static void addKeyToTableAndCache(String volumeName, String bucketName, - String keyName, long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String keyName, long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(false, true, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, trxnLogIndex, omMetadataManager); + replicationConfig, trxnLogIndex, omMetadataManager); } /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @param locationList * @throws Exception @@ -181,12 +187,11 @@ public static void addKeyToTableAndCache(String volumeName, String bucketName, @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager, + clientID, replicationConfig, 0L, omMetadataManager, locationList, version); } @@ -194,24 +199,23 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager); + clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -225,20 +229,17 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, isMultipartKey, false, - volumeName, bucketName, keyName, clientID, replicationType, - replicationFactor, 0L, omMetadataManager); + volumeName, bucketName, keyName, clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -248,19 +249,20 @@ public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, */ @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, - String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String volumeName, String bucketName, String keyName, long clientID, ReplicationConfig replicationConfig, + long trxnLogIndex, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), version, - false); + replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(trxnLogIndex) + .build(); + omKeyInfo.appendNewBlocks(locationList, false); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, - omMetadataManager); + omMetadataManager); } /** @@ -271,12 +273,11 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex); + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig) + .setObjectID(trxnLogIndex).build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -290,13 +291,13 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, boolean addToCache, String volumeName, String bucketName, String keyName, - long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), 0L, - isMultipartKey); + replicationConfig, new OmKeyLocationInfoGroup(0, new ArrayList<>(), isMultipartKey)) + .setObjectID(trxnLogIndex) + .build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -431,23 +432,22 @@ public static void addPart(PartKeyInfo partKeyInfo, /** * Add key entry to key table cache. + * * @param volumeName * @param bucketName * @param keyName - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager */ @SuppressWarnings("parameterNumber") public static void addKeyToTableCache(String volumeName, String bucketName, String keyName, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + replicationConfig).build(); omMetadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry( new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, @@ -543,87 +543,43 @@ public static void addSnapshotToTable( /** * Create OmKeyInfo. + * Initializes most values to a sensible default. */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, 0L); + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setReplicationConfig(replicationConfig) + .setObjectID(0L) + .setUpdateID(0L) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) + .setDataSize(1000L); + } + + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); } /** * Create OmDirectoryInfo. */ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, - long objectID, - long parentObjID) { + long objectID, + long parentObjID) { return new OmDirectoryInfo.Builder() - .setName(keyName) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setObjectID(objectID) - .setParentObjectID(parentObjID) - .setUpdateID(50) - .build(); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, Time.now()); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo for LEGACY/OBS bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, long version, boolean isMultipartKey) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFileName(OzoneFSUtils.getFileName(keyName)) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) + .setName(keyName) + .setCreationTime(Time.now()) .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig( - ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) .setObjectID(objectID) - .setUpdateID(objectID) + .setParentObjectID(parentObjID) + .setUpdateID(50) .build(); } @@ -631,8 +587,8 @@ private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, * Create OmMultipartKeyInfo for OBS/LEGACY bucket. */ public static OmMultipartKeyInfo createOmMultipartKeyInfo(String uploadId, - long creationTime, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { + long creationTime, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { return new OmMultipartKeyInfo.Builder() .setUploadID(uploadId) .setCreationTime(creationTime) @@ -1057,14 +1013,31 @@ public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) { + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); + KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .setDataSize(size) + .setMultipartNumber(partNumber) + .setMultipartUploadID(multipartUploadID) + .addAllKeyLocations(new ArrayList<>()) + .addMetadata(HddsProtos.KeyValue.newBuilder() + .setKey(OzoneConsts.ETAG) + .setValue(DatatypeConverter.printHexBinary( + new DigestInputStream( + new ByteArrayInputStream( + RandomStringUtils.randomAlphanumeric((int) size) + .getBytes(StandardCharsets.UTF_8)), + eTagProvider) + .getMessageDigest().digest())) + .build()); // Just adding dummy list. As this is for UT only. MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = @@ -1326,6 +1299,41 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + /** + * Create OMRequest for Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + */ + public static OMRequest renameSnapshotRequest(String volumeName, + String bucketName, + String snapshotOldName, + String snapshotNewName) { + OzoneManagerProtocolProtos.RenameSnapshotRequest renameSnapshotRequest = + OzoneManagerProtocolProtos.RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setRenameSnapshotRequest(renameSnapshotRequest) + .setCmdType(Type.RenameSnapshot) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Delete Snapshot. * @param volumeName vol to be used @@ -1408,76 +1416,6 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager, CacheValue.get(1L, omVolumeArgs)); } - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo with isMultipartKey flag. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, - boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, parentID, trxnLogIndex, creationTime, - version, false); - } - - /** - * Create OmKeyInfo for FSO bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version, - boolean isMultipartKey) { - String fileName = OzoneFSUtils.getFileName(keyName); - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) - .setObjectID(objectID) - .setUpdateID(trxnLogIndex) - .setParentObjectID(parentID) - .setFileName(fileName) - .build(); - } - - /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java index d44b16808aff..00b94824bbf7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java @@ -25,8 +25,7 @@ import java.util.UUID; import io.grpc.Context; -import mockit.Mock; -import mockit.MockUp; +import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; @@ -37,7 +36,6 @@ import org.mockito.MockedStatic; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -89,54 +87,46 @@ public void setup() throws Exception { @Test public void testUserInfoInCaseOfHadoopTransport() throws Exception { - new MockUp() { - @Mock - public UserGroupInformation getRemoteUser() { - return userGroupInformation; - } - - @Mock - public InetAddress getRemoteIp() { - return inetAddress; - } - - public InetAddress getRemoteAddress() { - return inetAddress; - } - }; - - String bucketName = UUID.randomUUID().toString(); - String volumeName = UUID.randomUUID().toString(); - BucketInfo.Builder bucketInfo = - newBucketInfoBuilder(bucketName, volumeName) - .setIsVersionEnabled(true) - .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK); - OMRequest omRequest = newCreateBucketRequest(bucketInfo).build(); - - OMBucketCreateRequest omBucketCreateRequest = - new OMBucketCreateRequest(omRequest); - - assertFalse(omRequest.hasUserInfo()); - - OMRequest modifiedRequest = - omBucketCreateRequest.preExecute(ozoneManager); - - assertTrue(modifiedRequest.hasUserInfo()); - - // Now pass modified request to OMBucketCreateRequest and check ugi and - // remote Address. - omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); - - InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress(); - UserGroupInformation ugi = omBucketCreateRequest.createUGI(); - String hostName = omBucketCreateRequest.getHostName(); - - - // Now check we have original user info, remote address and hostname or not. - // Here from OMRequest user info, converted to UGI, InetAddress and String. - assertEquals(inetAddress.getHostAddress(), remoteAddress.getHostAddress()); - assertEquals(userGroupInformation.getUserName(), ugi.getUserName()); - assertEquals(inetAddress.getHostName(), hostName); + try (MockedStatic mockedRpcServer = + mockStatic(Server.class)) { + + mockedRpcServer.when(Server::getRemoteUser).thenReturn(userGroupInformation); + mockedRpcServer.when(Server::getRemoteIp).thenReturn(inetAddress); + mockedRpcServer.when(Server::getRemoteAddress).thenReturn(inetAddress.toString()); + + String bucketName = UUID.randomUUID().toString(); + String volumeName = UUID.randomUUID().toString(); + BucketInfo.Builder bucketInfo = + newBucketInfoBuilder(bucketName, volumeName) + .setIsVersionEnabled(true) + .setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK); + OMRequest omRequest = newCreateBucketRequest(bucketInfo).build(); + + OMBucketCreateRequest omBucketCreateRequest = + new OMBucketCreateRequest(omRequest); + + assertFalse(omRequest.hasUserInfo()); + + OMRequest modifiedRequest = + omBucketCreateRequest.preExecute(ozoneManager); + + assertTrue(modifiedRequest.hasUserInfo()); + + // Now pass modified request to OMBucketCreateRequest and check ugi and + // remote Address. + omBucketCreateRequest = new OMBucketCreateRequest(modifiedRequest); + + InetAddress remoteAddress = omBucketCreateRequest.getRemoteAddress(); + UserGroupInformation ugi = omBucketCreateRequest.createUGI(); + String hostName = omBucketCreateRequest.getHostName(); + + + // Now check we have original user info, remote address and hostname or not. + // Here from OMRequest user info, converted to UGI, InetAddress and String. + assertEquals(inetAddress.getHostAddress(), remoteAddress.getHostAddress()); + assertEquals(userGroupInformation.getUserName(), ugi.getUserName()); + assertEquals(inetAddress.getHostName(), hostName); + } } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index 34f348a688dc..fdc13e369c08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -19,16 +19,21 @@ package org.apache.hadoop.ozone.om.request.bucket; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.ArrayList; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; @@ -119,12 +124,10 @@ public void testBucketContainsIncompleteMPUs() throws Exception { new OMBucketDeleteRequest(omRequest); // Create a MPU key in the MPU table to simulate incomplete MPU - long creationTime = Time.now(); String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, UUID.randomUUID().toString(), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, creationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, UUID.randomUUID().toString(), + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. createOmMultipartKeyInfo(uploadId, Time.now(), HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 275e8a6f2aae..7af60c18d94a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -27,7 +27,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -297,8 +298,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 12), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = @@ -340,7 +340,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -383,8 +383,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { omMetadataManager); // Add a key with first two levels. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 11), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 0eceb2246ee2..e0460ba81a99 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -59,6 +59,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -422,8 +423,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { // Add a file into the FileTable, this is to simulate "file exists" check. OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID++); + bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objID++).build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); @@ -492,21 +492,22 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() // for index=0, parentID is bucketID OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( - dirs.get(0), objID++, parentID); + dirs.get(0), objID++, parentID); OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, - volumeName, bucketName, txnID, omMetadataManager); + volumeName, bucketName, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)) + .setObjectID(objID) + .build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); final String ozoneKey = omMetadataManager.getOzonePathKey( - volumeId, bucketId, parentID, dirs.get(1)); + volumeId, bucketId, parentID, dirs.get(1)); ++txnID; omMetadataManager.getKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(ozoneKey), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index b39068fd7341..20da9d3e5dcc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,8 +24,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -57,6 +62,9 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests OMFileCreateRequest. @@ -190,7 +198,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() .setBucketName(bucketName) .setBucketLayout(getBucketLayout()) .setQuotaInNamespace(1)); - + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -201,6 +209,44 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() OzoneManagerProtocolProtos.Status.QUOTA_EXCEEDED); } + @Test + public void testValidateAndUpdateEncryption() throws Exception { + KeyProviderCryptoExtension.EncryptedKeyVersion eKV = + KeyProviderCryptoExtension.EncryptedKeyVersion.createForDecryption( + "key1", "v1", new byte[0], new byte[0]); + KeyProviderCryptoExtension mockKeyProvider = mock(KeyProviderCryptoExtension.class); + when(mockKeyProvider.generateEncryptedKey(any())).thenReturn(eKV); + + when(ozoneManager.getKmsProvider()).thenReturn(mockKeyProvider); + keyName = "test/" + keyName; + OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, + HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, + false, true); + + // add volume and create bucket with bucket encryption key + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setBucketEncryptionKey( + new BucketEncryptionKeyInfo.Builder() + .setKeyName("key1") + .setSuite(mock(CipherSuite.class)) + .setVersion(mock(CryptoProtocolVersion.class)) + .build())); + + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); + OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); + + OMFileCreateRequest omFileCreateRequestPreExecuted = getOMFileCreateRequest(modifiedOmRequest); + OMClientResponse omClientResponse = omFileCreateRequestPreExecuted + .validateAndUpdateCache(ozoneManager, 100L); + assertEquals( + OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + assertTrue(omClientResponse.getOMResponse().getCreateFileResponse().getKeyInfo().hasFileEncryptionInfo()); + when(ozoneManager.getKmsProvider()).thenReturn(null); + } + @Test public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, @@ -243,19 +289,17 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); testNonRecursivePath("a/b", false, false, true); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/", 0L, replicationConfig, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -275,14 +319,14 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/f", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); } @@ -293,16 +337,17 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() String key = "c/d/e/f"; // Should be able to create file even if parent directories does not exist testNonRecursivePath(key, false, true, false); - + // 3 parent directory created c/d/e assertEquals(omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)) .getUsedNamespace(), 3); - + // Add the key to key table + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -315,23 +360,21 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() throws Exception { String key = "c/d/e/f"; + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/", 0L, replicationConfig, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -449,10 +492,10 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, if (indx == dirs.size() - 1) { // verify file acls assertEquals(omDirInfo.getObjectID(), omKeyInfo.getParentObjectID()); - List fileAcls = omDirInfo.getAcls(); + List fileAcls = omKeyInfo.getAcls(); System.out.println(" file acls : " + omKeyInfo + " ==> " + fileAcls); assertEquals(expectedInheritAcls.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), fileAcls, "Failed to inherit parent DEFAULT acls!"); } @@ -471,7 +514,7 @@ protected void verifyInheritAcls(List dirs, OmKeyInfo omKeyInfo, // Should inherit parent DEFAULT acls // [user:newUser:rw[ACCESS], group:newGroup:rwl[ACCESS]] assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), keyAcls, "Failed to inherit bucket DEFAULT acls!"); // Should not inherit parent ACCESS acls diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index 1b7b7452c82c..e988949c5b85 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -28,11 +29,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -55,8 +56,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { "a/b/c", omMetadataManager); String fileNameD = "d"; OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/" + fileNameD, 0L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -80,7 +80,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, - "/test/a1/a2", HddsProtos.ReplicationFactor.ONE, + "/test/a1/a2", ONE, HddsProtos.ReplicationType.RATIS, false, true); // create bucket with quota limit 1 @@ -114,11 +114,11 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - omDirInfo.getObjectID() + 10, - omDirInfo.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omDirInfo.getObjectID() + 10) + .setParentObjectID(omDirInfo.getObjectID()) + .setUpdateID(100) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -136,23 +136,22 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String fileName = "f"; String key = parentDir + "/" + fileName; OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager, getBucketLayout()); + omMetadataManager, getBucketLayout()); // Create parent dirs for the path long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, - bucketName, parentDir, omMetadataManager); + bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is // non-recursive parent should exist. testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as // overwrite is set to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 5757beeb282d..59d3e211efdb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.file; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -45,7 +46,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockRequest; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -423,7 +423,8 @@ protected OMRequest createAllocateBlockRequest(String volumeName, String bucketN KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .build(); AllocateBlockRequest allocateBlockRequest = @@ -562,8 +563,9 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { String addToOpenFileTable(List locationList, boolean hsyncFlag) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); if (hsyncFlag) { omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, @@ -586,8 +588,9 @@ String addToOpenFileTable(List locationList, boolean hsyncFla String addToFileTable(List locationList, boolean hsyncFlag) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); if (hsyncFlag) { omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index eb99cd932568..9fb0e79953e1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -236,7 +237,8 @@ protected OMRequest createAllocateBlockRequest() { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .build(); AllocateBlockRequest allocateBlockRequest = @@ -253,8 +255,8 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, - omMetadataManager); + keyName, clientID, replicationConfig, + omMetadataManager); return ""; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 33512d355c0d..1ecbfed71624 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -20,10 +20,12 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +33,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; /** @@ -65,10 +66,11 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable OMRequestTestUtils.addFileToKeyTable(true, false, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index f040bd508177..cbb782e184fe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -68,7 +68,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); String ozoneKey = omMetadataManager.getOzoneKey( volumeName, bucket, key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index c9559ff41e1f..b9aa70b4c7e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -20,8 +20,12 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.List; import java.util.UUID; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -247,7 +251,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index 48d92e608b3e..ea9c3223de5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -26,7 +26,8 @@ import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; /** * Test Key ACL requests for prefix layout. @@ -44,20 +45,22 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); final long volumeId = omMetadataManager.getVolumeId( - omKeyInfo.getVolumeName()); + omKeyInfo.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); return omMetadataManager.getOzonePathKey( - volumeId, bucketId, omKeyInfo.getParentObjectID(), - fileName); + volumeId, bucketId, omKeyInfo.getParentObjectID(), + fileName); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 0f77194c88c7..1c38287f55e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -26,7 +26,11 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -52,10 +56,13 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; /** * Class tests OMKeyCommitRequest class. @@ -551,16 +558,17 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { @Test public void testValidateAndUpdateCacheOnOverwrite() throws Exception { + when(ozoneManager.getObjectIdFromTxId(anyLong())).thenAnswer(tx -> + OmUtils.getObjectIdFromTxId(2, tx.getArgument(0))); testValidateAndUpdateCache(); // Become a new client and set next version number clientID = Time.now(); version += 1; - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); + OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest(getKeyLocation(10).subList(4, 10), false)); - OMKeyCommitRequest omKeyCommitRequest = - getOmKeyCommitRequest(modifiedOmRequest); + OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); @@ -572,49 +580,54 @@ public void testValidateAndUpdateCacheOnOverwrite() throws Exception { assertNotNull(omKeyInfo); // Previously committed version - assertEquals(0L, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(0L, omKeyInfo.getLatestVersionLocations().getVersion()); // Append new blocks List allocatedLocationList = - keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + keyArgs.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); addKeyToOpenKeyTable(allocatedLocationList); OMClientResponse omClientResponse = omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 102L); - assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); // New entry should be created in key Table. - omKeyInfo = - omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) - .get(ozoneKey); + omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()).get(ozoneKey); assertNotNull(omKeyInfo); - assertEquals(version, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(version, omKeyInfo.getLatestVersionLocations().getVersion()); // DB keyInfo format verifyKeyName(omKeyInfo); // Check modification time CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), - omKeyInfo.getModificationTime()); + assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); // Check block location. List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs() - .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + commitKeyRequest.getKeyArgs().getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); - assertEquals(locationInfoListFromCommitKeyRequest, - omKeyInfo.getLatestVersionLocations().getLocationList()); - assertEquals(allocatedLocationList, - omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(locationInfoListFromCommitKeyRequest, omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(allocatedLocationList, omKeyInfo.getLatestVersionLocations().getLocationList()); assertEquals(1, omKeyInfo.getKeyLocationVersions().size()); + + // flush response content to db + BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation(); + ((OMKeyCommitResponse) omClientResponse).addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // verify deleted key is unique generated + String deletedKey = omMetadataManager.getOzoneKey(volumeName, omKeyInfo.getBucketName(), keyName); + List> rangeKVs + = omMetadataManager.getDeletedTable().getRangeKVs(null, 100, deletedKey); + assertThat(rangeKVs.size()).isGreaterThan(0); + assertEquals(1, rangeKVs.get(0).getValue().getOmKeyInfoList().size()); + assertFalse(rangeKVs.get(0).getKey().endsWith(rangeKVs.get(0).getValue().getOmKeyInfoList().get(0).getObjectID() + + "")); } /** @@ -682,7 +695,8 @@ private OMRequest createCommitKeyRequest( KeyArgs keyArgs = KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationType).setFactor(replicationFactor) + .setType(replicationConfig.getReplicationType()) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) .addAllKeyLocations(keyLocations).build(); CommitKeyRequest commitKeyRequest = @@ -727,7 +741,7 @@ protected String getOzonePathKey() throws IOException { protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager, + clientID, replicationConfig, omMetadataManager, locationList, version); return omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index d258c1cfde43..48cc52773a33 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -19,19 +19,22 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -78,10 +81,12 @@ protected String addKeyToOpenKeyTable(List locationList) long objectId = 100; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now(), version); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 12d9d02a72d6..0790e2af3b67 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -25,12 +25,14 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; import java.util.Map; +import java.util.Collections; import java.util.HashMap; +import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; @@ -40,14 +42,16 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -65,6 +69,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.assertj.core.api.Assertions.assertThat; @@ -421,7 +426,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound( @MethodSource("data") public void testValidateAndUpdateCacheWithInvalidPath( boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { - PrefixManager prefixManager = new PrefixManagerImpl( + PrefixManager prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); when(ozoneManager.getOzoneLockProvider()).thenReturn( @@ -463,6 +468,107 @@ public void testValidateAndUpdateCacheWithInvalidPath( assertNull(omKeyInfo); } + + @ParameterizedTest + @MethodSource("data") + public void testOverwritingExistingMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + Map initialMetadata = + Collections.singletonMap("initialKey", "initialValue"); + OMRequest initialRequest = + createKeyRequest(false, 0, keyName, initialMetadata); + OMKeyCreateRequest initialOmKeyCreateRequest = + new OMKeyCreateRequest(initialRequest, getBucketLayout()); + OMClientResponse initialResponse = + initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + verifyMetadataInResponse(initialResponse, initialMetadata); + + // We have to add the key to the key table, as validateAndUpdateCache only + // updates the cache and not the DB. + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + keyInfo.setMetadata(initialMetadata); + omMetadataManager.getKeyTable(initialOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + Map updatedMetadata = + Collections.singletonMap("initialKey", "updatedValue"); + OMRequest updatedRequest = + createKeyRequest(false, 0, keyName, updatedMetadata); + OMKeyCreateRequest updatedOmKeyCreateRequest = + new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + + OMClientResponse updatedResponse = + updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + verifyMetadataInResponse(updatedResponse, updatedMetadata); + } + + @ParameterizedTest + @MethodSource("data") + public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + // Create the key request without any initial metadata + OMRequest createRequestWithoutMetadata = createKeyRequest(false, 0, keyName, + null); // Passing 'null' for metadata + OMKeyCreateRequest createOmKeyCreateRequest = + new OMKeyCreateRequest(createRequestWithoutMetadata, getBucketLayout()); + + // Perform the create operation without any metadata + OMClientResponse createResponse = + createOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + // Verify that no metadata exists in the response + assertThat( + createResponse.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList()).isEmpty(); + + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + omMetadataManager.getKeyTable(createOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + // Define new metadata for the overwrite operation + Map overwriteMetadata = new HashMap<>(); + overwriteMetadata.put("newKey", "newValue"); + + // Overwrite the previously created key with new metadata + OMRequest overwriteRequestWithMetadata = + createKeyRequest(false, 0, keyName, overwriteMetadata); + OMKeyCreateRequest overwriteOmKeyCreateRequest = + new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + + // Perform the overwrite operation and capture the response + OMClientResponse overwriteResponse = + overwriteOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + // Verify the new metadata is correctly applied in the response + verifyMetadataInResponse(overwriteResponse, overwriteMetadata); + } + + + private void verifyMetadataInResponse(OMClientResponse response, + Map expectedMetadata) { + // Extract metadata from the response + List metadataList = + response.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList(); + assertEquals(expectedMetadata.size(), metadataList.size()); + metadataList.forEach(kv -> { + String expectedValue = expectedMetadata.get(kv.getKey()); + assertEquals(expectedValue, kv.getValue(), + "Metadata value mismatch for key: " + kv.getKey()); + }); + } + /** * This method calls preExecute and verify the modified request. * @param originalOMRequest @@ -542,24 +648,55 @@ protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, String keyName) { + return createKeyRequest(isMultipartKey, partNumber, keyName, null); + } + /** + * Create OMRequest which encapsulates a CreateKeyRequest, optionally + * with metadata. + * + * @param isMultipartKey Indicates if the key is part of a multipart upload. + * @param partNumber The part number for multipart uploads, ignored if + * isMultipartKey is false. + * @param keyName The name of the key to create or update. + * @param metadata Optional metadata for the key. Pass null or an empty + * map if no metadata is to be set. + * @return OMRequest configured with the provided parameters. + */ + protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName, + Map metadata) { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(replicationFactor).setType(replicationType) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setIsMultipartKey(isMultipartKey) + .setFactor( + ((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .setLatestVersionLocation(true); + // Configure for multipart upload, if applicable if (isMultipartKey) { keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber); } + // Include metadata, if provided + if (metadata != null && !metadata.isEmpty()) { + metadata.forEach((key, value) -> keyArgs.addMetadata(KeyValue.newBuilder() + .setKey(key) + .setValue(value) + .build())); + } + OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); return OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) .setClientId(UUID.randomUUID().toString()) - .setCreateKeyRequest(createKeyRequest).build(); + .setCreateKeyRequest(createKeyRequest) + .build(); } private OMRequest createKeyRequest( @@ -783,7 +920,7 @@ private void verifyKeyInheritAcls(List keyAcls, // Should inherit parent DEFAULT Acls assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), keyAcls, "Failed to inherit parent DEFAULT acls!,"); @@ -793,7 +930,7 @@ private void verifyKeyInheritAcls(List keyAcls, protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); + keyName.substring(1), 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 0750c9512618..2a25a9b09686 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -42,6 +41,7 @@ import java.util.Arrays; import java.util.Collection; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -107,12 +107,13 @@ protected void addToKeyTable(String keyName) throws Exception { Path keyPath = Paths.get(keyName); long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fileName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 00d1883d749c..9f1bee28c047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -192,8 +192,8 @@ protected String addKeyToTable() throws Exception { protected String addKeyToTable(String key) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, key, clientID, replicationType, replicationFactor, - omMetadataManager); + bucketName, key, clientID, replicationConfig, + omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 9dafab090295..07094ad2923f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -72,11 +72,11 @@ protected String addKeyToTable() throws Exception { bucketName, PARENT_DIR, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(FILE_NAME); OMRequestTestUtils.addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); @@ -96,11 +96,11 @@ protected String addKeyToDirTable(String volumeName, String bucketName, bucketName, key, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(key); return omKeyInfo.getPath(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index a1d616c07563..a912f549b3ce 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -31,7 +30,6 @@ import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; @@ -44,7 +42,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -76,7 +73,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); @@ -219,13 +216,12 @@ public void testKeyPurgeInSnapshot() throws Exception { .setName("snap1") .build(); - ReferenceCounted rcOmSnapshot = - ozoneManager.getOmSnapshotManager().checkForSnapshot( + ReferenceCounted rcOmSnapshot = + ozoneManager.getOmSnapshotManager().getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + fromSnapshotInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable for (String deletedKey : deletedKeyNames) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index a6015870d09b..0a2dcfd5d67a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -240,7 +240,7 @@ protected OMRequest createRenameKeyRequest( protected OmKeyInfo getOmKeyInfo(String keyName) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + replicationConfig).build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java index c91b8e158214..40c5156b5dbe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; + import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -37,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -179,10 +180,10 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100L) + .setParentObjectID(bucketId + 101L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 4dfb3c67c963..e5c3f19b8506 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -25,6 +25,7 @@ import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -44,7 +45,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; @@ -58,7 +58,6 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -115,8 +114,7 @@ public class TestOMKeyRequest { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationType replicationType; - protected HddsProtos.ReplicationFactor replicationFactor; + protected ReplicationConfig replicationConfig; protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; @@ -177,7 +175,7 @@ public void setup() throws Exception { when(ozoneManager.getAccessAuthorizer()) .thenReturn(new OzoneNativeAuthorizer()); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass @@ -220,8 +218,7 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); clientID = Time.now(); dataSize = 1000L; random = new Random(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index d48131de4bd3..d0cfd48e35dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -31,6 +31,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -145,8 +146,7 @@ protected void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); deleteKeyArgs.addKeys(key); deleteKeyList.add(key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java index f28ca2e2685f..2da80550275a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; /** @@ -83,11 +83,13 @@ protected void createPreRequisites() throws Exception { long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, dir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, dir + "/" + file, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, dir + "/" + file, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(file); OMRequestTestUtils .addFileToKeyTable(false, false, file, omKeyInfo, -1, 50, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index 3d429f4d6847..340b6e36eb0b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -127,8 +129,7 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() .setFromKeyName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 9c5a9257245f..8671ff107131 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -50,7 +50,7 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test public void testAddAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -116,7 +116,7 @@ public void testAddAclRequest() throws Exception { @Test public void testValidationFailure() { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); @@ -128,7 +128,7 @@ public void testValidationFailure() { ); OMClientResponse response1 = invalidRequest1.validateAndUpdateCache(ozoneManager, 1); - assertEquals(OzoneManagerProtocolProtos.Status.PREFIX_NOT_FOUND, + assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PATH_IN_ACL_REQUEST, response1.getOMResponse().getStatus()); // Not a valid FS path @@ -143,7 +143,7 @@ public void testValidationFailure() { @Test public void testRemoveAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -223,7 +223,7 @@ public void testRemoveAclRequest() throws Exception { @Test public void testSetAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java index bfae424cc954..ad834fa556bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java @@ -100,7 +100,7 @@ private OMRequest createSetTimesKeyRequest(long mtime, long atime) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, replicationConfig, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java index 2cd9273c25a5..0960125b0575 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -115,10 +115,13 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, PARENT_DIR, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, FILE_NAME, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_NAME, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index 25c908b18a2d..f02e1ee23679 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -27,14 +28,15 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -51,7 +53,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -480,10 +481,13 @@ private List createMPUsWithFSO(String volume, String bucket, commitMultipartRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); // Add key to open key table to be used in MPU commit processing - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + j, parentID, - trxnLogIndex, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + j) + .setParentObjectID(parentID) + .setUpdateID(trxnLogIndex) + .build(); + String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -563,8 +567,7 @@ private List createMPUs(String volume, String bucket, // Add key to open key table to be used in MPU commit processing OMRequestTestUtils.addKeyToTable( true, true, - volume, bucket, keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + volume, bucket, keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMClientResponse commitResponse = s3MultipartUploadCommitPartRequest.validateAndUpdateCache( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index 40b0c23e5a9e..a4c512b25aa7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -224,21 +224,20 @@ private void verifyKeyInheritAcls(List keyAcls, List parentDefaultAcl = bucketAcls.stream() .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.DEFAULT) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()); - OzoneAcl parentAccessAcl = bucketAcls.stream() + List parentAccessAcl = bucketAcls.stream() .filter(acl -> acl.getAclScope() == OzoneAcl.AclScope.ACCESS) - .findAny().orElse(null); + .collect(Collectors.toList()); // Should inherit parent DEFAULT Acls // [user:newUser:rw[DEFAULT], group:newGroup:rwl[DEFAULT]] - assertEquals(parentDefaultAcl.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) - .collect(Collectors.toList()), keyAcls, + assertEquals(parentDefaultAcl, keyAcls, "Failed to inherit parent DEFAULT acls!"); // Should not inherit parent ACCESS Acls - assertThat(keyAcls).doesNotContain(parentAccessAcl); + assertThat(keyAcls).doesNotContainAnyElementsOf(parentAccessAcl); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index ab05f927e1d6..cbdea7572069 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -256,7 +256,7 @@ private void verifyKeyInheritAcls(List dirs, OmKeyInfo fileInfo, List fileAcls = fileInfo.getAcls(); System.out.println(" file acls : " + fileInfo + " ==> " + fileAcls); assertEquals(expectedInheritAcls.stream() - .map(acl -> acl.setAclScope(OzoneAcl.AclScope.ACCESS)) + .map(acl -> acl.withScope(OzoneAcl.AclScope.ACCESS)) .collect(Collectors.toList()), fileAcls, "Failed to inherit parent DEFAULT acls!"); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index c01bb459b8f4..16cb9b6821a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataReader; import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -86,7 +85,7 @@ public void setup() throws Exception { when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); auditLogger = mock(AuditLogger.class); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 61c792a83de3..014b4e021cb3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -24,6 +24,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -224,9 +226,8 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), omMetadataManager); } protected String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 4c8e4881d925..24480c249cc8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,15 +24,17 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -68,13 +70,16 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 0L; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, - txnLogId, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 1) + .setParentObjectID(parentID) + .setUpdateID(txnLogId) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, - fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); + fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 733c790bcf17..34e32b0e182a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -28,16 +29,17 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -132,9 +134,14 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, List partList = new ArrayList<>(); - String partName = getPartName(volumeName, bucketName, keyName, - multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + String eTag = s3MultipartUploadCommitPartRequest.getOmRequest() + .getCommitMultiPartUploadRequest() + .getKeyArgs() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().get().getValue(); + partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -222,10 +229,10 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, bucketName, keyName, multipartUploadID, partList); @@ -315,8 +322,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } protected String getMultipartKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 5926b5fd1d9c..1762f38b44bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -18,18 +18,21 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -72,10 +75,12 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 45e5b1007531..a3e83986b531 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -52,6 +52,7 @@ import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; @@ -321,8 +322,9 @@ private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 100L); + bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + .setObjectID(100L) + .build(); OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L); OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L); @@ -381,8 +383,8 @@ public static OMSnapshotCreateRequest doPreExecute( private OmKeyInfo addKey(String keyName, long objectId) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, - objectId); + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index ca737d2bd254..03dc7862e35a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -110,8 +109,6 @@ public void setup() throws Exception { doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.getSnapshotCache()) - .thenReturn(mock(SnapshotCache.class)); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); volumeName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index a3b0dae46315..8edd096e766c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.IOmMetadataReader; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -64,10 +64,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; @@ -78,8 +80,6 @@ * Tests OMSnapshotPurgeRequest class. */ public class TestOMSnapshotPurgeRequestAndResponse { - - private BatchOperation batchOperation; private List checkpointPaths = new ArrayList<>(); private OzoneManager ozoneManager; @@ -115,7 +115,7 @@ void setup(@TempDir File testDir) throws Exception { when(ozoneManager.isAdmin(any())).thenReturn(true); when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); omSnapshotManager = new OmSnapshotManager(ozoneManager); @@ -178,7 +178,6 @@ private void createSnapshotCheckpoint(String snapshotName) throws Exception { private void createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { - batchOperation = omMetadataManager.getStore().initBatchOperation(); OMRequest omRequest = OMRequestTestUtils .createSnapshotRequest(volume, bucket, snapshotName); // Pre-Execute OMSnapshotCreateRequest. @@ -189,9 +188,10 @@ private void createSnapshotCheckpoint(String volume, OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); SnapshotInfo snapshotInfo = @@ -227,19 +227,35 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } } @Test public void testValidateAndUpdateCache() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); - purgeSnapshots(snapshotPurgeRequest); + + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + for (String snapshotTableKey: snapshotDbKeysToPurge) { + assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + } + + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } // Check if the entries are deleted. assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); @@ -248,6 +264,36 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + } + + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + + List snapshotDbKeysToPurge = createSnapshots(10); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); + assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java new file mode 100644 index 000000000000..14af3e28b8b8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -0,0 +1,359 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.util.UUID; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.renameSnapshotRequest; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenameSnapshot; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. + */ +public class TestOMSnapshotRenameRequest { + + @TempDir + private File anotherTempDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + + private String volumeName; + private String bucketName; + private String snapshotName1; + private String snapshotName2; + + @BeforeEach + public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + anotherTempDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + anotherTempDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecute(String toSnapshotName) throws Exception { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + doPreExecute(omRequest); + } + + @ValueSource(strings = { + // ? is not allowed in snapshot name. + "a?b", + // only numeric name not allowed. + "1234", + // less than 3 chars are not allowed. + "s1", + // more than or equal to 64 chars are not allowed. + "snap156808943643007724443266605711479126926050896107709081166294", + // Underscore is not allowed. + "snap_1", + // CamelCase is not allowed. + "NewSnapshot" + }) + @ParameterizedTest + public void testPreExecuteFailure(String toSnapshotName) { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + OMException omException = + assertThrows(OMException.class, () -> doPreExecute(omRequest)); + assertEquals("Invalid snapshot name: " + toSnapshotName, + omException.getMessage()); + } + + @Test + public void testPreExecuteBadOwner() { + // Owner is not set for the request. + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + + OMException omException = assertThrows(OMException.class, + () -> doPreExecute(omRequest)); + assertEquals("Only bucket owners and Ozone admins can rename snapshots", + omException.getMessage()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + String key = getTableKey(volumeName, bucketName, snapshotName1); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + // Add a 1000-byte key to the bucket + OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); + addKeyToTable(key1); + + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + bucketKey); + long bucketDataSize = key1.getDataSize(); + long bucketUsedBytes = omBucketInfo.getUsedBytes(); + assertEquals(key1.getReplicatedSize(), bucketUsedBytes); + + // Value in cache should be null as of now. + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + + // Add key to cache. + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName1, UUID.randomUUID(), Time.now()); + snapshotInfo.setReferencedSize(1000L); + snapshotInfo.setReferencedReplicatedSize(3 * 1000L); + assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(key), + CacheValue.get(1L, snapshotInfo)); + + // Run validateAndUpdateCache. + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + + assertNotNull(omClientResponse.getOMResponse()); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(RenameSnapshot, omResponse.getCmdType()); + assertEquals(OK, omResponse.getStatus()); + + // verify table data with response data. + OzoneManagerProtocolProtos.SnapshotInfo snapshotInfoProto = + omClientResponse + .getOMResponse() + .getRenameSnapshotResponse() + .getSnapshotInfo(); + + assertEquals(bucketDataSize, snapshotInfoProto.getReferencedSize()); + assertEquals(bucketUsedBytes, + snapshotInfoProto.getReferencedReplicatedSize()); + + SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); + + String key2 = getTableKey(volumeName, bucketName, snapshotName2); + + // Get value from cache + SnapshotInfo snapshotInfoNewInCache = + omMetadataManager.getSnapshotInfoTable().get(key2); + assertNotNull(snapshotInfoNewInCache); + assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); + assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); + + SnapshotInfo snapshotInfoOldInCache = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNull(snapshotInfoOldInCache); + } + + @Test + public void testEntryExists() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // First make sure we have two snapshots. + OzoneManagerProtocolProtos.OMRequest createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName1); + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName2); + omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, + omResponse.getStatus()); + } + + @Test + public void testEntryNotFound() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_NOT_FOUND, + omResponse.getStatus()); + } + + private OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { + return doPreExecute(originalRequest, ozoneManager); + } + + public static OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest, OzoneManager ozoneManager) throws Exception { + OMSnapshotRenameRequest omSnapshotRenameRequest = + new OMSnapshotRenameRequest(originalRequest); + + OzoneManagerProtocolProtos.OMRequest modifiedRequest = + omSnapshotRenameRequest.preExecute(ozoneManager); + return new OMSnapshotRenameRequest(modifiedRequest); + } + + private OmKeyInfo addKey(String keyName, long objectId) { + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); + } + + protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { + OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, + omMetadataManager); + return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyInfo.getKeyName()); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 3856a5b62f5f..b5bfc2714b0f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -48,6 +49,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.anyString; @@ -62,7 +64,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { private BatchOperation batchOperation; private OzoneManager ozoneManager; private OMMetadataManager omMetadataManager; - + private OMMetrics omMetrics; private String volumeName; private String bucketName; private String snapName; @@ -71,6 +73,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { @BeforeEach void setup(@TempDir File testDir) throws Exception { + omMetrics = OMMetrics.create(); ozoneManager = mock(OzoneManager.class); OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); when(lvm.isAllowed(anyString())).thenReturn(true); @@ -84,6 +87,7 @@ void setup(@TempDir File testDir) throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); @@ -94,6 +98,9 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + createSnapshotDataForTest(); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = @@ -120,6 +127,9 @@ public void testValidateAndUpdateCache() throws IOException { omMetadataManager.getStore().commitBatchOperation(batchOperation); } + assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { @@ -134,6 +144,42 @@ public void testValidateAndUpdateCache() throws IOException { } } + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + + createSnapshotDataForTest(); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + for (OMRequest omRequest: snapshotUpdateSizeRequests) { + OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); + + // Validate and Update Cache + OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) + omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); + } + + assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetPropertyFails()); + } + private void assertCacheValues(String dbKey) { CacheValue cacheValue = omMetadataManager .getSnapshotInfoTable() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 811e13ac173e..7d6487493861 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -20,6 +20,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; @@ -82,7 +84,7 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build(); ThreadLocalRandom random = ThreadLocalRandom.current(); long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index c7e2c265b7bb..c639c77c08e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -41,11 +40,11 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index e5a6b0ab14f5..88ef2964d17e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -92,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { protected OmKeyInfo createOmKeyInfo() throws Exception { return OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); } protected String getOpenKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index 85e9354ca8c9..b574b8548132 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -18,18 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + /** * Tests OMAllocateBlockResponse - prefix layout. */ @@ -49,12 +50,11 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long txnId = 50; long objectId = parentID + 1; - OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); - return omKeyInfoFSO; + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 25b2f6c1050f..c4384c2dc906 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -81,7 +81,7 @@ public void testAddToDBBatch() throws Exception { public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -135,7 +135,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 9c3f8c1143e3..62998d87b72c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -62,11 +64,11 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull @@ -77,11 +79,11 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now()); - + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index ee83f3671277..53d86e667367 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -18,13 +18,15 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -50,11 +52,12 @@ protected String getOpenKeyName() throws IOException { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index a000c3f9694e..8031ead68f18 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -22,7 +22,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Table; @@ -89,8 +88,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setReplicationConfig(RatisReplicationConfig - .getInstance(replicationFactor)) + .setReplicationConfig(replicationConfig) .setNodes(new ArrayList<>()) .build(); @@ -167,7 +165,7 @@ protected String addKeyToTable() throws Exception { keyName); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index 588907c6ce88..38f5438e9877 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; /** * Tests OMKeyDeleteResponse - prefix layout. @@ -50,11 +51,11 @@ protected String addKeyToTable() throws Exception { bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omMetadataManager.getOzonePathKey( @@ -66,11 +67,12 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(getOmBucketInfo()); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - getOmBucketInfo().getBucketName(), keyName, replicationType, - replicationFactor, - getOmBucketInfo().getObjectID() + 1, - getOmBucketInfo().getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + replicationConfig) + .setObjectID(getOmBucketInfo().getObjectID() + 1) + .setParentObjectID(getOmBucketInfo().getObjectID()) + .setUpdateID(100L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 2dcef56330f2..07c094cc98a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -154,12 +154,10 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo getOmKeyInfo(String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } - protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, - String keyName) { + protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { return getOmKeyInfo(keyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java index f2f9ccaf872e..edbb50d66f86 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java @@ -18,17 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** @@ -38,19 +38,21 @@ public class TestOMKeyRenameResponseWithFSO extends TestOMKeyRenameResponse { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100) + .setParentObjectID(bucketId + 101) + .build(); } @Override protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { - return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(), - toKeyInfo.getBucketName(), keyName, replicationType, - replicationFactor, toKeyInfo.getObjectID(), - toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(toKeyInfo.getObjectID()) + .setParentObjectID(toKeyInfo.getParentObjectID()) + .setUpdateID(0L) + .setCreationTime(toKeyInfo.getCreationTime()) + .build(); } @Override @@ -80,12 +82,12 @@ protected void createParent() { long bucketId = random.nextLong(); String fromKeyParentName = UUID.randomUUID().toString(); String toKeyParentName = UUID.randomUUID().toString(); - fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, replicationType, replicationFactor, - bucketId + 100L); - toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, toKeyParentName, replicationType, replicationFactor, - bucketId + 101L); + fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fromKeyParentName, replicationConfig) + .setObjectID(bucketId + 100L) + .build(); + toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, toKeyParentName, replicationConfig) + .setObjectID(bucketId + 101L) + .build(); fromKeyParent.setParentObjectID(bucketId); toKeyParent.setParentObjectID(bucketId); fromKeyParent.setFileName(OzoneFSUtils.getFileName( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index 1cbf5c6d0b2d..bc4c34bd0db3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -24,6 +24,7 @@ import java.util.Random; import java.util.UUID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -59,8 +60,7 @@ public class TestOMKeyResponse { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationFactor replicationFactor; - protected HddsProtos.ReplicationType replicationType; + protected ReplicationConfig replicationConfig; protected OmBucketInfo omBucketInfo; protected long clientID; protected Random random; @@ -78,18 +78,18 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = ReplicationConfig.fromProtoTypeAndFactor( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); clientID = 1000L; random = new Random(); keysToDelete = null; final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("admin") - .setOwnerName("owner") - .setObjectID(System.currentTimeMillis()) - .build(); + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("owner") + .setObjectID(System.currentTimeMillis()) + .build(); omMetadataManager.getVolumeTable().addCacheEntry( new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), @@ -117,8 +117,7 @@ protected String getOpenKeyName() throws IOException { @Nonnull protected OmKeyInfo getOmKeyInfo() { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 60f371ba1f88..8d178bcd47be 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -32,7 +33,6 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; @@ -64,7 +64,7 @@ protected void createPreRequisities() throws Exception { for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); + bucketName, keyName, 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList .add(omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java index 148a4e28c1b2..98522814de79 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.io.IOException; @@ -41,6 +40,7 @@ import java.util.Collections; import java.util.List; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -94,10 +94,11 @@ protected void createPreRequisities() throws Exception { keyName = keyPrefix + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dirId + 1, buckId, - dirId + 1, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dirId + 1) + .setParentObjectID(buckId) + .setUpdateID(dirId + 1) + .build(); ozoneDBKey = OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 0824f7c33de7..72a76a1aca4f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -32,7 +33,6 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,7 +117,8 @@ private void createPreRequisities() throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, + bucketName, parentDir.concat("/key" + i), 0L, + RatisReplicationConfig.getInstance(THREE), omMetadataManager); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index f4f0e729f05d..c9a4109809ed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -208,7 +208,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, long parentID = random.nextLong(); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, key, replicationType, replicationFactor); + bucket, key, replicationConfig).build(); if (keyLength > 0) { OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java index b12087785b1f..5ebd2e6fa1cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java @@ -17,8 +17,12 @@ package org.apache.hadoop.ozone.om.response.key.acl.prefix; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -36,6 +40,8 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests TestOMPrefixAclResponse. @@ -45,9 +51,9 @@ public class TestOMPrefixAclResponse extends TestOMKeyResponse { @Test public void testAddToDBBatch() throws Exception { final OzoneAcl user1 = new OzoneAcl(USER, "user1", - ACLType.READ_ACL, ACCESS); + ACCESS, ACLType.READ_ACL); final OzoneAcl user2 = new OzoneAcl(USER, "user2", - ACLType.WRITE, ACCESS); + ACCESS, ACLType.WRITE); final String prefixName = "/vol/buck/prefix/"; List acls = Arrays.asList(user1, user2); @@ -77,13 +83,22 @@ public void testAddToDBBatch() throws Exception { .getSkipCache(prefixName); assertEquals(omPrefixInfo, persistedPrefixInfo); + String volumeName = "vol"; + String bucketName = "buck"; + + OzoneManager ozoneManager = mock(OzoneManager.class); + when(ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName))) + .thenReturn(new ResolvedBucket(volumeName, bucketName, volumeName, + bucketName, "", BucketLayout.DEFAULT)); + + // Verify that in-memory Prefix Tree (Radix Tree) is able to reload from // DB successfully PrefixManagerImpl prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName("vol") - .setBucketName("buck") + .setVolumeName(volumeName) + .setBucketName(bucketName) .setPrefixName("prefix/") .setResType(OzoneObj.ResourceType.PREFIX) .setStoreType(OzoneObj.StoreType.OZONE) @@ -123,7 +138,7 @@ public void testAddToDBBatch() throws Exception { // Reload prefix tree from DB and validate again. prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); prefixInfo = prefixManager.getPrefixInfo(prefixObj); assertEquals(2L, prefixInfo.getUpdateID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java index b356dddd6b57..35600c331f3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java @@ -19,15 +19,19 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartAbortInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -277,10 +281,10 @@ private Map> addMPUsToDB( OmBucketInfo omBucketInfo = OMRequestTestUtils.addBucketToDB(volume, bucket, omMetadataManager, getBucketLayout()); - final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, Time.now(), true); + ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(ONE); + final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { omKeyInfo.setParentObjectID(omBucketInfo.getObjectID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 23b543b6ec12..51963a00a1cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setPartName(volumeName)).build(); + .newBuilder().setETag(volumeName).setPartName(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index 47aa641c1ebb..e7a570350cff 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -18,14 +18,17 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -85,14 +88,16 @@ public void testAddDBToBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, - parentID, fileName, clientId); + parentID, fileName, clientId); String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, - parentID, fileName); + parentID, fileName); OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -175,9 +180,11 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { parentID, fileName); OmKeyInfo omKeyInfoFSO = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -244,20 +251,20 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String keyName = getKeyName(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); + omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 8, - parentID, 8, Time.now(), true); + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 8) + .setParentObjectID(parentID) + .setUpdateID(8) + .build(); RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey); String ozoneKey = omMetadataManager - .getOzoneKey(prevKey.getVolumeName(), - prevKey.getBucketName(), prevKey.getFileName()); + .getOzoneKey(prevKey.getVolumeName(), + prevKey.getBucketName(), prevKey.getFileName()); omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); @@ -312,11 +319,12 @@ private long runAddDBToBatchWithParts(String volumeName, omMetadataManager.getBucketTable().get(bucketKey); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 9, - parentID, 100, Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 9) + .setParentObjectID(parentID) + .setUpdateID(100) + .build(); List unUsedParts = new ArrayList<>(); unUsedParts.add(omKeyInfo); S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 9ae0a395e906..70dd23a7b047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -36,12 +36,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.util.UUID; import java.nio.file.Path; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -115,7 +114,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + public void testAddToDBBatchNoOp() { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) @@ -126,15 +125,7 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse( omResponse); - - try { - omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - + assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index c8a3faae4cca..8dcb030d637a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -23,9 +23,10 @@ import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -47,6 +48,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -129,10 +131,11 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { for (int i = 0; i < 2000; ++i) { String keyName = "key" + longName + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, - keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dir1.getObjectID() + 1 + i, - dir1.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dir1.getObjectID() + 1 + i) + .setParentObjectID(dir1.getObjectID()) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, true, keyName, omKeyInfo, 1234L, i + 1, om.getMetadataManager()); } @@ -143,7 +146,7 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { .setBucketName(bucketName) .setKeyName("dir" + longName) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + ONE)) .setDataSize(0).setRecursive(true) .build(); writeClient.deleteKey(delArgs); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 77bf15ed76b1..c5099fc75919 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -31,12 +31,12 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -52,7 +52,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; @@ -82,12 +81,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -359,10 +357,9 @@ void testSnapshotDeepClean() throws Exception { keyDeletingService.resume(); - try (ReferenceCounted rcOmSnapshot = - om.getOmSnapshotManager().checkForSnapshot( - volumeName, bucketName, getSnapshotPrefix(snap3), true)) { - OmSnapshot snapshot3 = (OmSnapshot) rcOmSnapshot.get(); + try (ReferenceCounted rcOmSnapshot = + om.getOmSnapshotManager().getSnapshot(volumeName, bucketName, snap3)) { + OmSnapshot snapshot3 = rcOmSnapshot.get(); Table snap3deletedTable = snapshot3.getMetadataManager().getDeletedTable(); @@ -630,15 +627,13 @@ private static void assertTableRowCount(Table table, private static boolean assertTableRowCount(long expectedCount, Table table, OMMetadataManager metadataManager) { - long count = 0L; - try { - count = metadataManager.countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(metadataManager.countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void createVolumeAndBucket(String volumeName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 762d8740565f..9fc0f5c0c12d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -19,11 +19,13 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -241,6 +243,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, + DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(Collections.emptyList()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index 30fe6f6ffb0e..aa51e2be7b67 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -34,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -570,6 +572,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID() + .toString())) .build(); writeClient.commitMultipartUploadPart(commitPartKeyArgs, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 5ac7835f8ce6..1a0db1183311 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -19,8 +19,11 @@ package org.apache.hadoop.ozone.om.service; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -48,8 +51,7 @@ public void testQuotaRepair() throws Exception { String parentDir = "/user"; for (int i = 0; i < count; i++) { OMRequestTestUtils.addKeyToTableAndCache(volumeName, bucketName, - parentDir.concat("/key" + i), -1, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 150 + i, omMetadataManager); + parentDir.concat("/key" + i), -1, RatisReplicationConfig.getInstance(THREE), 150 + i, omMetadataManager); } String fsoBucketName = "fso" + bucketName; @@ -59,12 +61,13 @@ public void testQuotaRepair() throws Exception { fsoBucketName, "c/d/e", omMetadataManager); for (int i = 0; i < count; i++) { String fileName = "file1" + i; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, fsoBucketName, fileName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1 + i, - parentId, 100 + i, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, fsoBucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1 + i) + .setParentObjectID(parentId) + .setUpdateID(100L + i) + .build(); omKeyInfo.setKeyName(fileName); OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50 + i, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index cecd7a99af2b..2a1e2ec99fca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -18,9 +18,8 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -33,15 +32,14 @@ import org.slf4j.event.Level; import java.io.IOException; +import java.util.UUID; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -52,23 +50,21 @@ class TestSnapshotCache { private static final int CACHE_SIZE_LIMIT = 3; - private static OmSnapshotManager omSnapshotManager; - private static CacheLoader cacheLoader; + private static CacheLoader cacheLoader; private SnapshotCache snapshotCache; + private OMMetrics omMetrics; + @BeforeAll static void beforeAll() throws Exception { - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus(any(), eq(SNAPSHOT_ACTIVE))) - .thenReturn(true); cacheLoader = mock(CacheLoader.class); // Create a difference mock OmSnapshot instance each time load() is called when(cacheLoader.load(any())).thenAnswer( (Answer) invocation -> { final OmSnapshot omSnapshot = mock(OmSnapshot.class); // Mock the snapshotTable return value for the lookup inside release() - final String dbKey = (String) invocation.getArguments()[0]; - when(omSnapshot.getSnapshotTableKey()).thenReturn(dbKey); + final UUID snapshotID = (UUID) invocation.getArguments()[0]; + when(omSnapshot.getSnapshotTableKey()).thenReturn(snapshotID.toString()); return omSnapshot; } @@ -81,8 +77,8 @@ static void beforeAll() throws Exception { @BeforeEach void setUp() { // Reset cache for each test case - snapshotCache = new SnapshotCache( - omSnapshotManager, cacheLoader, CACHE_SIZE_LIMIT); + omMetrics = OMMetrics.create(); + snapshotCache = new SnapshotCache(cacheLoader, CACHE_SIZE_LIMIT, omMetrics); } @AfterEach @@ -92,115 +88,108 @@ void tearDown() { } @Test - @DisplayName("01. get()") + @DisplayName("get()") void testGet() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertNotNull(omSnapshot.get()); assertInstanceOf(OmSnapshot.class, omSnapshot.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("02. get() same entry twice yields one cache entry only") + @DisplayName("get() same entry twice yields one cache entry only") void testGetTwice() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - ReferenceCounted omSnapshot1again = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1again = snapshotCache.get(dbKey1); // Should be the same instance assertEquals(omSnapshot1, omSnapshot1again); assertEquals(omSnapshot1.get(), omSnapshot1again.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("03. release(String)") + @DisplayName("release(String)") void testReleaseByDbKey() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("04. release(OmSnapshot)") - void testReleaseByOmSnapshotInstance() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); - assertNotNull(omSnapshot1); - assertEquals(1, snapshotCache.size()); - - snapshotCache.release((OmSnapshot) omSnapshot1.get()); - // Entry will not be immediately evicted - assertEquals(1, snapshotCache.size()); - } - - @Test - @DisplayName("05. invalidate()") + @DisplayName("invalidate()") void testInvalidate() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } @Test - @DisplayName("06. invalidateAll()") + @DisplayName("invalidateAll()") void testInvalidateAll() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + final UUID dbKey1 = UUID.randomUUID(); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; - ReferenceCounted omSnapshot2 = - snapshotCache.get(dbKey2); + final UUID dbKey2 = UUID.randomUUID(); + ReferenceCounted omSnapshot2 = snapshotCache.get(dbKey2); assertNotNull(omSnapshot2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); - final String dbKey3 = "dbKey3"; - ReferenceCounted omSnapshot3 = - snapshotCache.get(dbKey3); + final UUID dbKey3 = UUID.randomUUID(); + ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidateAll(); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } - private void assertEntryExistence(String key, boolean shouldExist) { + private void assertEntryExistence(UUID key, boolean shouldExist) { if (shouldExist) { snapshotCache.getDbMap().computeIfAbsent(key, k -> { fail(k + " should not have been evicted"); @@ -215,108 +204,129 @@ private void assertEntryExistence(String key, boolean shouldExist) { } @Test - @DisplayName("07. Basic cache eviction") + @DisplayName("Basic cache eviction") void testEviction1() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last get() because // those were release()d. assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, false); } @Test - @DisplayName("08. Cache eviction while exceeding soft limit") + @DisplayName("Cache eviction while exceeding soft limit") void testEviction2() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1 would not have been evicted because it is not release()d assertEquals(4, snapshotCache.size()); + assertEquals(4, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, true); // Releasing dbKey2 at this point should immediately trigger its eviction // because the cache size exceeded the soft limit snapshotCache.release(dbKey2); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey2, false); assertEntryExistence(dbKey1, true); } @Test - @DisplayName("09. Cache eviction with try-with-resources") + @DisplayName("Cache eviction with try-with-resources") void testEviction3WithClose() throws IOException { - final String dbKey1 = "dbKey1"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { + final UUID dbKey1 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } // ref count should have been decreased because it would be close()d // upon exiting try-with-resources. assertEquals(0L, snapshotCache.getDbMap().get(dbKey1).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); - final String dbKey2 = "dbKey2"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { + final UUID dbKey2 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Get dbKey2 entry a second time - try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { assertEquals(2L, rcOmSnapshot.getTotalRefCount()); assertEquals(2L, rcOmSnapshot2.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); } assertEquals(1L, rcOmSnapshot.getTotalRefCount()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey2).getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); - final String dbKey3 = "dbKey3"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { + final UUID dbKey3 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey3).getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); - final String dbKey4 = "dbKey4"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { + final UUID dbKey4 = UUID.randomUUID(); + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey4).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java similarity index 98% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index 1821b6f9af32..c5ae809718e7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -15,11 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index b92546c2899b..a9e67b00cc9e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -21,12 +21,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -35,11 +33,9 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -68,7 +64,6 @@ import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.rocksdiff.RocksDiffUtils; -import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; import jakarta.annotation.Nonnull; @@ -134,10 +129,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_JOB_TABLE_NAME; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_REPORT_TABLE_NAME; @@ -217,9 +208,6 @@ public class TestSnapshotDiffManager { private OzoneManager ozoneManager; @Mock private OzoneConfiguration configuration; - - private SnapshotCache snapshotCache; - @Mock private Table snapshotInfoTable; @Mock @@ -328,8 +316,8 @@ public void init() throws RocksDBException, IOException, ExecutionException { OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT, TimeUnit.MILLISECONDS)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT); - when(configuration - .getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, + when(configuration. + getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT); when(configuration @@ -343,15 +331,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .getInt(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE, OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT); - when(configuration - .getInt(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT)) - .thenReturn(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - when(configuration - .getStorageSize(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES)) - .thenReturn(FileUtils.ONE_KB_BI.doubleValue()); when(configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT); @@ -382,40 +361,43 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getConfiguration()).thenReturn(configuration); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - CacheLoader loader = - new CacheLoader() { - @Nonnull - @Override - public OmSnapshot load(@Nonnull String key) { - return getMockedOmSnapshot(key); - } - }; - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus( - any(), any())).thenReturn(true); - snapshotCache = new SnapshotCache(omSnapshotManager, loader, 10); - + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); + SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics); + + when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) + .thenAnswer(invocationOnMock -> { + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, invocationOnMock.getArgument(0), + invocationOnMock.getArgument(1), invocationOnMock.getArgument(2)); + return snapshotCache.get(snapInfo.getSnapshotId()); + }); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); + snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); + when(omSnapshotManager.getDiffCleanupServiceInterval()).thenReturn(0L); + } + + private CacheLoader mockCacheLoader() { + return new CacheLoader() { + @Nonnull + @Override + public OmSnapshot load(@Nonnull UUID key) { + return getMockedOmSnapshot(key); + } + }; } @AfterEach public void tearDown() { - if (columnFamilyHandles != null) { - columnFamilyHandles.forEach(IOUtils::closeQuietly); - } - - IOUtils.closeQuietly(db); - IOUtils.closeQuietly(dbOptions); - IOUtils.closeQuietly(columnFamilyOptions); IOUtils.closeQuietly(snapshotDiffManager); + IOUtils.closeQuietly(columnFamilyHandles); + IOUtils.closeQuietly(db, dbOptions, columnFamilyOptions); } - private OmSnapshot getMockedOmSnapshot(String snapshot) { + private OmSnapshot getMockedOmSnapshot(UUID snapshotId) { OmSnapshot omSnapshot = mock(OmSnapshot.class); - when(omSnapshot.getName()).thenReturn(snapshot); + when(omSnapshot.getName()).thenReturn(snapshotId.toString()); when(omSnapshot.getMetadataManager()).thenReturn(omMetadataManager); when(omMetadataManager.getStore()).thenReturn(dbStore); return omSnapshot; @@ -432,6 +414,10 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); String diffDir = snapDiffDir.getAbsolutePath(); Set randomStrings = IntStream.range(0, numberOfFiles) @@ -444,12 +430,12 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { eq(diffDir)) ).thenReturn(Lists.newArrayList(randomStrings)); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); @@ -501,6 +487,10 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); if (!useFullDiff) { when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), @@ -509,12 +499,12 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, .thenReturn(Collections.emptyList()); } - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -564,6 +554,10 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap1)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); doThrow(new FileNotFoundException("File not found exception.")) .when(differ) @@ -572,12 +566,12 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) any(DifferSnapshotInfo.class), anyString()); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -653,15 +647,11 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, try (MockedConstruction mockedSSTFileReader = mockConstruction(SstFileSetReader.class, (mock, context) -> { - when(mock.getKeyStreamWithTombstone(any(), any(), any())) + when(mock.getKeyStreamWithTombstone(any(), any())) .thenReturn(keysIncludingTombstones.stream()); when(mock.getKeyStream(any(), any())) .thenReturn(keysExcludingTombstones.stream()); }); - MockedConstruction mockedSSTDumpTool = - mockConstruction(ManagedSSTDumpTool.class, - (mock, context) -> { - }) ) { Map toSnapshotTableMap = IntStream.concat(IntStream.range(0, 25), IntStream.range(50, 100)) @@ -679,9 +669,6 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, Table fromSnapshotTable = getMockedTable(fromSnapshotTableMap, snapshotTableName); - snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); SnapshotDiffManager spy = spy(snapshotDiffManager); doAnswer(invocation -> { @@ -1566,7 +1553,6 @@ public void testGetSnapshotDiffReportHappyCase() throws Exception { * Tests that only QUEUED jobs are submitted to the executor and rest are * short-circuited based on previous one. */ - @Unhealthy @Test public void testGetSnapshotDiffReportJob() throws Exception { for (int i = 0; i < jobStatuses.size(); i++) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java similarity index 96% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index 48f366371adf..dc00433e179b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -16,10 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java similarity index 78% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index a8b026af05b5..72bca07557b6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -16,7 +16,7 @@ * limitations under the License. * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -26,6 +26,14 @@ import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SstFilteringService; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -34,18 +42,17 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.rocksdb.LiveFileMetaData; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -71,24 +78,20 @@ /** * Test SST Filtering Service. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) public class TestSstFilteringService { - public static final String SST_FILE_EXTENSION = ".sst"; - @TempDir - private File folder; + private static final String SST_FILE_EXTENSION = ".sst"; private OzoneManagerProtocol writeClient; private OzoneManager om; private OzoneConfiguration conf; private KeyManager keyManager; + private short countTotalSnapshots = 0; @BeforeAll - public static void setup() { + void setup(@TempDir Path folder) throws Exception { ExitUtils.disableSystemExit(); - } - - @BeforeEach - void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, folder.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, folder.toString()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 100, @@ -102,7 +105,7 @@ void init() throws Exception { om = omTestManagers.getOzoneManager(); } - @AfterEach + @AfterAll public void cleanup() throws Exception { if (keyManager != null) { keyManager.stop(); @@ -140,9 +143,23 @@ public void testIrrelevantSstFileDeletion() keyManager.getSnapshotSstFilteringService(); final int keyCount = 100; - String volumeName = "vol1"; + String volumeName = "volz"; String bucketName1 = "buck1"; - createVolumeAndBucket(volumeName, bucketName1); + createVolume(volumeName); + addBucketToVolume(volumeName, bucketName1); + + long countExistingSnapshots = filteringService.getSnapshotFilteredCount().get(); + List previousFiles = activeDbStore.getDb().getSstFileList(); + List listPreviousFiles = new ArrayList(); + int level0FilesCountDiff = 0; + int totalFileCountDiff = 0; + for (LiveFileMetaData fileMetaData : previousFiles) { + totalFileCountDiff++; + listPreviousFiles.add(fileMetaData.fileName()); + if (fileMetaData.level() == 0) { + level0FilesCountDiff++; + } + } createKeys(volumeName, bucketName1, keyCount / 2); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); @@ -153,8 +170,7 @@ public void testIrrelevantSstFileDeletion() int level0FilesCount = 0; int totalFileCount = 0; - List initialsstFileList = - activeDbStore.getDb().getSstFileList(); + List initialsstFileList = activeDbStore.getDb().getSstFileList(); for (LiveFileMetaData fileMetaData : initialsstFileList) { totalFileCount++; if (fileMetaData.level() == 0) { @@ -162,36 +178,36 @@ public void testIrrelevantSstFileDeletion() } } - assertEquals(totalFileCount, level0FilesCount); + assertEquals(totalFileCount - totalFileCountDiff, level0FilesCount - level0FilesCountDiff); activeDbStore.getDb().compactRange(OmMetadataManagerImpl.KEY_TABLE); int nonLevel0FilesCountAfterCompact = 0; - List nonLevelOFiles = new ArrayList<>(); + List nonLevelOFiles = new ArrayList<>(); for (LiveFileMetaData fileMetaData : activeDbStore.getDb() .getSstFileList()) { if (fileMetaData.level() != 0) { nonLevel0FilesCountAfterCompact++; - nonLevelOFiles.add(fileMetaData); + nonLevelOFiles.add(fileMetaData.fileName()); } } assertThat(nonLevel0FilesCountAfterCompact).isGreaterThan(0); String bucketName2 = "buck2"; - createVolumeAndBucket(volumeName, bucketName2); + addBucketToVolume(volumeName, bucketName2); createKeys(volumeName, bucketName2, keyCount); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); List allFiles = activeDbStore.getDb().getSstFileList(); String snapshotName1 = "snapshot1"; - writeClient.createSnapshot(volumeName, bucketName2, snapshotName1); + createSnapshot(volumeName, bucketName2, snapshotName1); SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); assertFalse(snapshotInfo.isSstFiltered()); - waitForSnapshotsAtLeast(filteringService, 1); - assertEquals(1, filteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(filteringService, countExistingSnapshots + 1); + assertEquals(countExistingSnapshots + 1, filteringService.getSnapshotFilteredCount().get()); Set keysFromActiveDb = getKeysFromDb(om.getMetadataManager(), volumeName, bucketName2); @@ -206,27 +222,33 @@ public void testIrrelevantSstFileDeletion() OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); for (LiveFileMetaData file : allFiles) { + //Skipping the previous files from this check even those also works. + if (listPreviousFiles.contains(file.fileName())) { + continue; + } File sstFile = new File(snapshotDirName + OM_KEY_PREFIX + file.fileName()); - if (nonLevelOFiles.stream() - .anyMatch(o -> file.fileName().equals(o.fileName()))) { + if (nonLevelOFiles.contains(file.fileName())) { assertFalse(sstFile.exists()); } else { assertTrue(sstFile.exists()); } } - assertTrue(snapshotInfo.isSstFiltered()); + // Need to read the sstFiltered flag which is set in background process and + // hence snapshotInfo.isSstFiltered() may not work sometimes. + assertTrue(om.getMetadataManager().getSnapshotInfoTable().get(SnapshotInfo + .getTableKey(volumeName, bucketName2, snapshotName1)).isSstFiltered()); String snapshotName2 = "snapshot2"; final long count; try (BootstrapStateHandler.Lock lock = filteringService.getBootstrapStateLock().lock()) { count = filteringService.getSnapshotFilteredCount().get(); - writeClient.createSnapshot(volumeName, bucketName2, snapshotName2); + createSnapshot(volumeName, bucketName2, snapshotName2); assertThrows(TimeoutException.class, - () -> waitForSnapshotsAtLeast(filteringService, count + 1)); + () -> waitForSnapshotsAtLeast(filteringService, count + 1 + countExistingSnapshots)); assertEquals(count, filteringService.getSnapshotFilteredCount().get()); } @@ -245,9 +267,10 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { String volumeName = "volume1"; List bucketNames = Arrays.asList("bucket1", "bucket2"); + createVolume(volumeName); // Create 2 Buckets for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } // Write 25 keys in each bucket, 2 sst files would be generated each for // keys in a single bucket @@ -265,8 +288,8 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { keyManager.getSnapshotSstFilteringService(); sstFilteringService.pause(); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap1"); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap2"); + createSnapshot(volumeName, bucketNames.get(0), "snap1"); + createSnapshot(volumeName, bucketNames.get(0), "snap2"); SnapshotInfo snapshot1Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap1")); @@ -284,15 +307,15 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { await(10_000, 1_000, () -> snap1Current.exists() && snap2Current.exists()); long snap1SstFileCountBeforeFilter = Arrays.stream(snapshot1Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountBeforeFilter = Arrays.stream(snapshot2Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); // delete snap1 writeClient.deleteSnapshot(volumeName, bucketNames.get(0), "snap1"); sstFilteringService.resume(); // Filtering service will only act on snap2 as it is an active snaphot - waitForSnapshotsAtLeast(sstFilteringService, 2); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); long snap1SstFileCountAfterFilter = Arrays.stream(snapshot1Dir.listFiles()) .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountAfterFilter = Arrays.stream(snapshot2Dir.listFiles()) @@ -300,10 +323,12 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { // one sst will be filtered in both active but not in deleted snapshot // as sstFiltering svc won't run on already deleted snapshots but will mark // it as filtered. - assertEquals(2, sstFilteringService.getSnapshotFilteredCount().get()); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); assertEquals(snap1SstFileCountBeforeFilter, snap1SstFileCountAfterFilter); - assertEquals(snap2SstFileCountBeforeFilter - 1, - snap2SstFileCountAfterFilter); + // If method with order 1 is run .sst file from /vol1/buck1 and /vol1/buck2 will be deleted. + // As part of this method .sst file from /volume1/bucket2/ will be deleted. + // sstFiltering won't run on deleted snapshots in /volume1/bucket1. + assertThat(snap2SstFileCountBeforeFilter).isGreaterThan(snap2SstFileCountAfterFilter); } private void createKeys(String volumeName, @@ -316,8 +341,7 @@ private void createKeys(String volumeName, } } - private void createVolumeAndBucket(String volumeName, - String bucketName) + private void createVolume(String volumeName) throws IOException { OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() @@ -325,7 +349,10 @@ private void createVolumeAndBucket(String volumeName, .setAdminName("a") .setVolume(volumeName) .build()); + } + private void addBucketToVolume(String volumeName, String bucketName) + throws IOException { OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -371,8 +398,9 @@ public void testSstFilteringService() throws Exception { String volumeName = "volume"; List bucketNames = Arrays.asList("bucket", "bucket1", "bucket2"); + createVolume(volumeName); for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } int keyCount = 150; @@ -407,15 +435,14 @@ public void testSstFilteringService() throws Exception { List snapshotNames = Arrays.asList("snap", "snap-1", "snap-2"); for (int i = 0; i < 3; i++) { - writeClient.createSnapshot(volumeName, bucketNames.get(i), - snapshotNames.get(i)); + createSnapshot(volumeName, bucketNames.get(i), snapshotNames.get(i)); } SstFilteringService sstFilteringService = keyManager.getSnapshotSstFilteringService(); - waitForSnapshotsAtLeast(sstFilteringService, 3); - assertEquals(3, sstFilteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); Set keyInBucketAfterFilteringRun = getKeysFromSnapshot(volumeName, bucketNames.get(0), @@ -461,12 +488,18 @@ private Set getKeysFromSnapshot(String volume, String snapshot) throws IOException { SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volume, bucket, snapshot)); - try (ReferenceCounted - snapshotMetadataReader = om.getOmSnapshotManager() - .getSnapshotCache() - .get(snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + try (ReferenceCounted snapshotMetadataReader = + om.getOmSnapshotManager().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); return getKeysFromDb(omSnapshot.getMetadataManager(), volume, bucket); } } + + private void createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException { + writeClient.createSnapshot(volumeName, bucketName, snapshotName); + countTotalSnapshots++; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java new file mode 100644 index 000000000000..23f21e9cdaed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.security.acl; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; + +import java.io.IOException; +import java.util.List; + +/** Helper for ACL tests. */ +final class OzoneNativeAclTestUtil { + + public static void addVolumeAcl( + OMMetadataManager metadataManager, + String volume, + OzoneAcl ozoneAcl + ) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.addAcl(ozoneAcl); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void addBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + OzoneAcl ozoneAcl) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.addAcl(ozoneAcl); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void addKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + OzoneAcl ozoneAcl + ) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.addAcl(ozoneAcl); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static void setVolumeAcl( + OMMetadataManager metadataManager, + String volume, + List ozoneAcls) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.setAcls(ozoneAcls); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void setBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + List ozoneAcls) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.setAcls(ozoneAcls); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void setKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + List ozoneAcls) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.setAcls(ozoneAcls); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static List getVolumeAcls( + OMMetadataManager metadataManager, + String volume + ) throws IOException { + return metadataManager.getVolumeTable() + .get(metadataManager.getVolumeKey(volume)) + .getAcls(); + } + + public static List getBucketAcls( + OMMetadataManager metadataManager, + String volume, + String bucket + ) throws IOException { + return metadataManager.getBucketTable() + .get(metadataManager.getBucketKey(volume, bucket)) + .getAcls(); + } + + public static List getKeyAcls( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key + ) throws IOException { + return metadataManager.getKeyTable(bucketLayout) + .get(metadataManager.getOzoneKey(volume, bucket, key)) + .getAcls(); + } + + private OzoneNativeAclTestUtil() { + // utilities + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index f5bb8d35350b..7eb46f617f37 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -21,8 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -244,9 +242,9 @@ public void testCheckAccessForBucket( ACLType groupRight, boolean expectedResult) throws Exception { createAll(keyName, prefixName, userRight, groupRight, expectedResult); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume. // We should directly add to table because old API's update to DB. @@ -266,9 +264,9 @@ public void testCheckAccessForKey( ACLType groupRight, boolean expectedResult) throws Exception { createAll(keyName, prefixName, userRight, groupRight, expectedResult); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume & bucket. We should directly add to table // because old API's update to DB. @@ -296,9 +294,9 @@ public void testCheckAccessForPrefix( .build(); OzoneAcl userAcl = new OzoneAcl(USER, testUgi.getUserName(), - parentDirUserAcl, ACCESS); + ACCESS, parentDirUserAcl); OzoneAcl groupAcl = new OzoneAcl(GROUP, testUgi.getGroups().size() > 0 ? - testUgi.getGroups().get(0) : "", parentDirGroupAcl, ACCESS); + testUgi.getGroups().get(0) : "", ACCESS, parentDirGroupAcl); // Set access for volume & bucket. We should directly add to table // because old API's update to DB. @@ -314,45 +312,19 @@ public void testCheckAccessForPrefix( private void setVolumeAcl(List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void setBucketAcl(List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private void addVolumeAcl(OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private void addBucketAcl(OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private void resetAclsAndValidateAccess( @@ -379,8 +351,8 @@ private void resetAclsAndValidateAccess( * if user/group has access to them. */ for (ACLType a1 : allAcls) { - OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), a1, - ACCESS); + OzoneAcl newAcl = new OzoneAcl(accessType, getAclName(accessType), ACCESS, a1 + ); // Reset acls to only one right. if (obj.getResourceType() == VOLUME) { @@ -459,7 +431,7 @@ private void resetAclsAndValidateAccess( ACLIdentityType identityType = ACLIdentityType.values()[type]; // Add remaining acls one by one and then check access. OzoneAcl addAcl = new OzoneAcl(identityType, - getAclName(identityType), a2, ACCESS); + getAclName(identityType), ACCESS, a2); // For volume and bucket update to cache. As Old API's update to // only DB not cache. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index f17d477bd793..e7ef8f51c4c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -34,7 +32,6 @@ import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; @@ -226,10 +223,10 @@ private void testParentChild(OzoneObj child, .setAclRights(childAclType).build(); OzoneAcl childAcl = new OzoneAcl(USER, - testUgi1.getUserName(), childAclType, ACCESS); + testUgi1.getUserName(), ACCESS, childAclType); OzoneAcl parentAcl = new OzoneAcl(USER, - testUgi1.getUserName(), parentAclType, ACCESS); + testUgi1.getUserName(), ACCESS, parentAclType); assertFalse(nativeAuthorizer.checkAccess(child, requestContext)); if (child.getResourceType() == BUCKET) { @@ -257,7 +254,7 @@ private void testParentChild(OzoneObj child, // add the volume acl (grand-parent), now key access is allowed. OzoneAcl parentVolumeAcl = new OzoneAcl(USER, - testUgi1.getUserName(), READ, ACCESS); + testUgi1.getUserName(), ACCESS, READ); addVolumeAcl(child.getVolumeName(), parentVolumeAcl); assertTrue(nativeAuthorizer.checkAccess( child, requestContext)); @@ -265,88 +262,46 @@ private void testParentChild(OzoneObj child, } private void addVolumeAcl(String vol, OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private List getVolumeAcls(String vol) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - return omVolumeArgs.getAcls(); + return OzoneNativeAclTestUtil.getVolumeAcls(metadataManager, vol); } private void setVolumeAcl(String vol, List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void addKeyAcl(String vol, String buck, String key, OzoneAcl ozoneAcl) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - - omKeyInfo.addAcl(ozoneAcl); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.addKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcl); } private void setKeyAcl(String vol, String buck, String key, List ozoneAcls) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - omKeyInfo.setAcls(ozoneAcls); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.setKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcls); } private void addBucketAcl(String vol, String buck, OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private List getBucketAcls(String vol, String buck) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); + return OzoneNativeAclTestUtil.getBucketAcls(metadataManager, vol, buck); + } - return omBucketInfo.getAcls(); + private List getKeyAcls(String vol, String buck, String key) + throws IOException { + return OzoneNativeAclTestUtil.getKeyAcls(metadataManager, vol, buck, getBucketLayout(), key); } private void setBucketAcl(String vol, String buck, List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private static OzoneObjInfo createVolume(String volumeName) diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java new file mode 100644 index 000000000000..f5d0c8521334 --- /dev/null +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ByteBufferPositionedReadable.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs; + +import java.io.EOFException; +import java.io.IOException; +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * FIXME: Hack: This is copied from Hadoop 3.3.6. Remove this interface once + * we drop Hadoop 3.1, 3.2 support. + * Implementers of this interface provide a positioned read API that writes to a + * {@link ByteBuffer} rather than a {@code byte[]}. + * + * @see PositionedReadable + * @see ByteBufferReadable + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ByteBufferPositionedReadable { + /** + * Reads up to {@code buf.remaining()} bytes into buf from a given position + * in the file and returns the number of bytes read. Callers should use + * {@code buf.limit(...)} to control the size of the desired read and + * {@code buf.position(...)} to control the offset into the buffer the data + * should be written to. + *

    + * After a successful call, {@code buf.position()} will be advanced by the + * number of bytes read and {@code buf.limit()} will be unchanged. + *

    + * In the case of an exception, the state of the buffer (the contents of the + * buffer, the {@code buf.position()}, the {@code buf.limit()}, etc.) is + * undefined, and callers should be prepared to recover from this + * eventuality. + *

    + * Callers should use {@link StreamCapabilities#hasCapability(String)} with + * {@link StreamCapabilities#PREADBYTEBUFFER} to check if the underlying + * stream supports this interface, otherwise they might get a + * {@link UnsupportedOperationException}. + *

    + * Implementations should treat 0-length requests as legitimate, and must not + * signal an error upon their receipt. + *

    + * This does not change the current offset of a file, and is thread-safe. + * + * @param position position within file + * @param buf the ByteBuffer to receive the results of the read operation. + * @return the number of bytes read, possibly zero, or -1 if reached + * end-of-stream + * @throws IOException if there is some error performing the read + */ + int read(long position, ByteBuffer buf) throws IOException; + + /** + * Reads {@code buf.remaining()} bytes into buf from a given position in + * the file or until the end of the data was reached before the read + * operation completed. Callers should use {@code buf.limit(...)} to + * control the size of the desired read and {@code buf.position(...)} to + * control the offset into the buffer the data should be written to. + *

    + * This operation provides similar semantics to + * {@link #read(long, ByteBuffer)}, the difference is that this method is + * guaranteed to read data until the {@link ByteBuffer} is full, or until + * the end of the data stream is reached. + * + * @param position position within file + * @param buf the ByteBuffer to receive the results of the read operation. + * @throws IOException if there is some error performing the read + * @throws EOFException the end of the data was reached before + * the read operation completed + * @see #read(long, ByteBuffer) + */ + void readFully(long position, ByteBuffer buf) throws IOException; +} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 28812a5a1a9d..d44055236d8b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -201,8 +202,8 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); this.config = conf; } @@ -615,6 +616,16 @@ public String createSnapshot(String pathStr, String snapshotName) snapshotName); } + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + OFSPath ofsPath = new OFSPath(pathStr, config); + objectStore.renameSnapshot(ofsPath.getVolumeName(), + ofsPath.getBucketName(), + snapshotOldName, + snapshotNewName); + } + @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { @@ -691,7 +702,7 @@ private SnapshotDiffReportOzone getSnapshotDiffReportOnceComplete( } @Override - public OmKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { + public LeaseKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { incrementCounter(Statistic.INVOCATION_RECOVER_FILE_PREPARE, 1); return ozoneClient.getProxy().getOzoneManagerClient().recoverLease( diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index dbe3b517e554..cd09cf1d5a8f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -954,6 +954,12 @@ public Path createSnapshot(Path path, String snapshotName) OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index e1ed85cff171..de9603c475f7 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -79,6 +79,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -209,8 +210,8 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, proxy = objectStore.getClientProxy(); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. initDefaultFsBucketLayout(conf); @@ -893,9 +894,9 @@ public List listStatus(String pathStr, boolean recursive, } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { - String startBucket = ofsStartPath.getBucketName(); + String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucket, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username); } if (ofsPath.isSnapshotPath()) { @@ -1265,6 +1266,16 @@ public String createSnapshot(String pathStr, String snapshotName) snapshotName); } + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + OFSPath ofsPath = new OFSPath(pathStr, config); + proxy.renameSnapshot(ofsPath.getVolumeName(), + ofsPath.getBucketName(), + snapshotOldName, + snapshotNewName); + } + @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { @@ -1364,7 +1375,7 @@ public boolean isFileClosed(String pathStr) throws IOException { } @Override - public OmKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { + public LeaseKeyInfo recoverFilePrepare(final String pathStr, boolean force) throws IOException { incrementCounter(Statistic.INVOCATION_RECOVER_FILE_PREPARE, 1); OFSPath ofsPath = new OFSPath(pathStr, config); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index b13d726371c4..3ba291ae0fd0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Preconditions; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; @@ -41,6 +43,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.ozone.OFSPath; @@ -239,7 +242,12 @@ public FSDataInputStream open(Path path, int bufferSize) throws IOException { statistics.incrementReadOps(1); LOG.trace("open() path: {}", path); final String key = pathToKey(path); - return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + return TracingUtil.executeInNewSpan("ofs open", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("path", key); + return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + }); } protected InputStream createFSInputStream(InputStream inputStream) { @@ -263,7 +271,8 @@ public FSDataOutputStream create(Path f, FsPermission permission, incrementCounter(Statistic.INVOCATION_CREATE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(f); - return createOutputStream(key, replication, overwrite, true); + return TracingUtil.executeInNewSpan("ofs create", + () -> createOutputStream(key, replication, overwrite, true)); } @Override @@ -277,8 +286,10 @@ public FSDataOutputStream createNonRecursive(Path path, incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(path); - return createOutputStream(key, - replication, flags.contains(CreateFlag.OVERWRITE), false); + return TracingUtil.executeInNewSpan("ofs createNonRecursive", + () -> + createOutputStream(key, + replication, flags.contains(CreateFlag.OVERWRITE), false)); } private OutputStream selectOutputStream(String key, short replication, @@ -374,6 +385,14 @@ boolean processKeyPath(List keyPathList) throws IOException { */ @Override public boolean rename(Path src, Path dst) throws IOException { + return TracingUtil.executeInNewSpan("ofs rename", + () -> renameInSpan(src, dst)); + } + + private boolean renameInSpan(Path src, Path dst) throws IOException { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("src", src.toString()) + .setTag("dst", dst.toString()); incrementCounter(Statistic.INVOCATION_RENAME, 1); statistics.incrementWriteOps(1); if (src.equals(dst)) { @@ -526,16 +545,23 @@ protected void rename(final Path src, final Path dst, @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { - String snapshot = getAdapter() - .createSnapshot(pathToKey(path), snapshotName); + String snapshot = TracingUtil.executeInNewSpan("ofs createSnapshot", + () -> getAdapter().createSnapshot(pathToKey(path), snapshotName)); return new Path(OzoneFSUtils.trimPathToDepth(path, PATH_DEPTH_TO_BUCKET), OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { - adapter.deleteSnapshot(pathToKey(path), snapshotName); + TracingUtil.executeInNewSpan("ofs deleteSnapshot", + () -> adapter.deleteSnapshot(pathToKey(path), snapshotName)); } private class DeleteIterator extends OzoneListingIterator { @@ -666,6 +692,11 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { */ @Override public boolean delete(Path f, boolean recursive) throws IOException { + return TracingUtil.executeInNewSpan("ofs delete", + () -> deleteInSpan(f, recursive)); + } + + private boolean deleteInSpan(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); @@ -883,7 +914,8 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { - return convertFileStatusArr(listStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs listStatus", + () -> convertFileStatusArr(listStatusAdapter(f))); } private FileStatus[] convertFileStatusArr( @@ -940,7 +972,8 @@ public Path getWorkingDirectory() { @Override public Token getDelegationToken(String renewer) throws IOException { - return adapter.getDelegationToken(renewer); + return TracingUtil.executeInNewSpan("ofs getDelegationToken", + () -> adapter.getDelegationToken(renewer)); } /** @@ -1008,7 +1041,8 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { if (isEmpty(key)) { return false; } - return mkdir(f); + return TracingUtil.executeInNewSpan("ofs mkdirs", + () -> mkdir(f)); } @Override @@ -1019,7 +1053,8 @@ public long getDefaultBlockSize() { @Override public FileStatus getFileStatus(Path f) throws IOException { - return convertFileStatus(getFileStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs getFileStatus", + () -> convertFileStatus(getFileStatusAdapter(f))); } public FileStatusAdapter getFileStatusAdapter(Path f) throws IOException { @@ -1090,7 +1125,8 @@ public boolean exists(Path f) throws IOException { public FileChecksum getFileChecksum(Path f, long length) throws IOException { incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); String key = pathToKey(f); - return adapter.getFileChecksum(key, length); + return TracingUtil.executeInNewSpan("ofs getFileChecksum", + () -> adapter.getFileChecksum(key, length)); } @Override @@ -1502,6 +1538,11 @@ FileStatus convertFileStatus(FileStatusAdapter fileStatusAdapter) { @Override public ContentSummary getContentSummary(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs getContentSummary", + () -> getContentSummaryInSpan(f)); + } + + private ContentSummary getContentSummaryInSpan(Path f) throws IOException { FileStatusAdapter status = getFileStatusAdapter(f); if (status.isFile()) { @@ -1577,7 +1618,8 @@ public void setTimes(Path f, long mtime, long atime) throws IOException { if (key.equals("NONE")) { throw new FileNotFoundException("File not found. path /NONE."); } - adapter.setTimes(key, mtime, atime); + TracingUtil.executeInNewSpan("ofs setTimes", + () -> adapter.setTimes(key, mtime, atime)); } protected boolean setSafeModeUtil(SafeModeAction action, @@ -1589,6 +1631,7 @@ protected boolean setSafeModeUtil(SafeModeAction action, statistics.incrementWriteOps(1); } LOG.trace("setSafeMode() action:{}", action); - return getAdapter().setSafeMode(action, isChecked); + return TracingUtil.executeInNewSpan("ofs setSafeMode", + () -> getAdapter().setSafeMode(action, isChecked)); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java index 290546e4a104..30e0c32265bf 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSInputStream.java @@ -35,6 +35,7 @@ public boolean hasCapability(String capability) { switch (StringUtils.toLowerCase(capability)) { case StreamCapabilities.READBYTEBUFFER: case StreamCapabilities.UNBUFFER: + case StreamCapabilities.PREADBYTEBUFFER: return true; default: return false; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index c7444a389d9b..7e78d6650ee3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -28,8 +28,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.token.Token; @@ -92,13 +92,15 @@ FileStatusAdapter getFileStatus(String key, URI uri, String createSnapshot(String pathStr, String snapshotName) throws IOException; + void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) throws IOException; + void deleteSnapshot(String pathStr, String snapshotName) throws IOException; SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, String fromSnapshot, String toSnapshot) throws IOException, InterruptedException; - OmKeyInfo recoverFilePrepare(String pathStr, boolean force) throws IOException; + LeaseKeyInfo recoverFilePrepare(String pathStr, boolean force) throws IOException; void recoverFile(OmKeyArgs keyArgs) throws IOException; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index 918640799c71..4dc70bfa569d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -18,18 +18,24 @@ package org.apache.hadoop.fs.ozone; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ReadOnlyBufferException; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.fs.ByteBufferReadable; +import org.apache.hadoop.fs.ByteBufferPositionedReadable; import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hdds.tracing.TracingUtil; /** * The input stream for Ozone file system. @@ -40,7 +46,7 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public class OzoneFSInputStream extends FSInputStream - implements ByteBufferReadable, CanUnbuffer { + implements ByteBufferReadable, CanUnbuffer, ByteBufferPositionedReadable { private final InputStream inputStream; private final Statistics statistics; @@ -52,25 +58,40 @@ public OzoneFSInputStream(InputStream inputStream, Statistics statistics) { @Override public int read() throws IOException { - int byteRead = inputStream.read(); - if (statistics != null && byteRead >= 0) { - statistics.incrementBytesRead(1); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + int byteRead = inputStream.read(); + if (statistics != null && byteRead >= 0) { + statistics.incrementBytesRead(1); + } + return byteRead; + } finally { + span.finish(); } - return byteRead; } @Override public int read(byte[] b, int off, int len) throws IOException { - int bytesRead = inputStream.read(b, off, len); - if (statistics != null && bytesRead >= 0) { - statistics.incrementBytesRead(bytesRead); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", off) + .setTag("length", len); + int bytesRead = inputStream.read(b, off, len); + if (statistics != null && bytesRead >= 0) { + statistics.incrementBytesRead(bytesRead); + } + return bytesRead; + } finally { + span.finish(); } - return bytesRead; } @Override public synchronized void close() throws IOException { - inputStream.close(); + TracingUtil.executeInNewSpan("OzoneFSInputStream.close", + inputStream::close); } @Override @@ -101,6 +122,11 @@ public int available() throws IOException { */ @Override public int read(ByteBuffer buf) throws IOException { + return TracingUtil.executeInNewSpan("OzoneFSInputStream.read(ByteBuffer)", + () -> readInTrace(buf)); + } + + private int readInTrace(ByteBuffer buf) throws IOException { if (buf.isReadOnly()) { throw new ReadOnlyBufferException(); } @@ -137,4 +163,49 @@ public void unbuffer() { ((CanUnbuffer) inputStream).unbuffer(); } } + + /** + * @param buf the ByteBuffer to receive the results of the read operation. + * @param position offset + * @return the number of bytes read, possibly zero, or -1 if + * reach end-of-stream + * @throws IOException if there is some error performing the read + */ + @Override + public int read(long position, ByteBuffer buf) throws IOException { + if (!buf.hasRemaining()) { + return 0; + } + long oldPos = this.getPos(); + int bytesRead; + try { + ((Seekable) inputStream).seek(position); + bytesRead = ((ByteBufferReadable) inputStream).read(buf); + } catch (EOFException e) { + // Either position is negative or it has reached EOF + return -1; + } finally { + ((Seekable) inputStream).seek(oldPos); + } + return bytesRead; + } + + /** + * @param buf the ByteBuffer to receive the results of the read operation. + * @param position offset + * @return void + * @throws IOException if there is some error performing the read + * @throws EOFException if end of file reached before reading fully + */ + @Override + public void readFully(long position, ByteBuffer buf) throws IOException { + int bytesRead; + for (int readCount = 0; buf.hasRemaining(); readCount += bytesRead) { + bytesRead = this.read(position + (long)readCount, buf); + if (bytesRead < 0) { + // Still buffer has space to read but stream has already reached EOF + throw new EOFException("End of file reached before reading fully."); + } + } + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java index 141a40469419..c5f62d6f68ba 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java @@ -18,7 +18,10 @@ package org.apache.hadoop.fs.ozone; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import java.io.IOException; @@ -42,17 +45,24 @@ public OzoneFSOutputStream(OzoneOutputStream outputStream) { @Override public void write(int b) throws IOException { - outputStream.write(b); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> outputStream.write(b)); } @Override public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("length", len); + outputStream.write(b, off, len); + }); } @Override public synchronized void flush() throws IOException { - outputStream.flush(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.flush", + outputStream::flush); } @Override @@ -67,7 +77,8 @@ public void hflush() throws IOException { @Override public void hsync() throws IOException { - outputStream.hsync(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.hsync", + outputStream::hsync); } protected OzoneOutputStream getWrappedOutputStream() { diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index ce567348114b..8e31b055daa4 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -123,6 +123,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index 4c2e4d5b9c8d..4e35e986c155 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -54,6 +54,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* @@ -77,6 +78,7 @@ shade + ${maven.shade.skip} diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index 48573e633803..6c900c56f86a 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -74,6 +74,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index b6cc22bbad09..4de4b22908d7 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -142,9 +145,9 @@ public boolean recoverLease(Path f) throws IOException { Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -154,25 +157,41 @@ public boolean recoverLease(Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index e6eba955e4d9..3025b1af03be 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -30,9 +30,10 @@ import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; @@ -41,6 +42,8 @@ import java.net.URI; import java.util.List; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -146,9 +149,9 @@ public boolean recoverLease(final Path f) throws IOException { LOG.trace("recoverLease() path:{}", f); Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -158,25 +161,41 @@ public boolean recoverLease(final Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 6ff4e3c701a7..417a4f9dca3b 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -91,6 +91,7 @@ shade + ${maven.shade.skip} @@ -128,7 +129,6 @@ org.apache.commons.digester.**.* org.apache.commons.io.**.* org.apache.commons.logging.**.* - org.apache.commons.pool2.**.* org.apache.commons.validator.**.* org.apache.commons.lang3.**.* org.sqlite.**.* diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index b6cc22bbad09..4de4b22908d7 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -35,12 +35,15 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -142,9 +145,9 @@ public boolean recoverLease(Path f) throws IOException { Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -154,25 +157,41 @@ public boolean recoverLease(Path f) throws IOException { } // finalize the final block and get block length - List locationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); if (!locationInfoList.isEmpty()) { OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index 36aa0e5f27c8..c06a6b7644e8 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -19,20 +19,23 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Strings; +import io.opentracing.util.GlobalTracer; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LeaseRecoverable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.SafeMode; import org.apache.hadoop.fs.SafeModeAction; +import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.security.token.DelegationTokenIssuer; @@ -41,6 +44,8 @@ import java.net.URI; import java.util.List; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; import static org.apache.hadoop.ozone.OzoneConsts.FORCE_LEASE_RECOVERY_ENV; /** @@ -135,13 +140,18 @@ public boolean hasPathCapability(final Path path, final String capability) @Override public boolean recoverLease(final Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs recoverLease", + () -> recoverLeaseTraced(f)); + } + private boolean recoverLeaseTraced(final Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("recoverLease() path:{}", f); Path qualifiedPath = makeQualified(f); String key = pathToKey(qualifiedPath); - OmKeyInfo keyInfo = null; + LeaseKeyInfo leaseKeyInfo; try { - keyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); + leaseKeyInfo = getAdapter().recoverFilePrepare(key, forceRecovery); } catch (OMException e) { if (e.getResult() == OMException.ResultCodes.KEY_ALREADY_CLOSED) { // key is already closed, let's just return success @@ -151,26 +161,42 @@ public boolean recoverLease(final Path f) throws IOException { } // finalize the final block and get block length - List keyLocationInfoList = keyInfo.getLatestVersionLocations().getLocationList(); - if (!keyLocationInfoList.isEmpty()) { - OmKeyLocationInfo block = keyLocationInfoList.get(keyLocationInfoList.size() - 1); + List locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList(); + if (!locationInfoList.isEmpty()) { + OmKeyLocationInfo block = locationInfoList.get(locationInfoList.size() - 1); try { block.setLength(getAdapter().finalizeBlock(block)); } catch (Throwable e) { - if (!forceRecovery) { + if (e instanceof StorageContainerException && (((StorageContainerException) e).getResult().equals(NO_SUCH_BLOCK) + || ((StorageContainerException) e).getResult().equals(CONTAINER_NOT_FOUND)) + && !leaseKeyInfo.getIsKeyInfo() && locationInfoList.size() > 1) { + locationInfoList = leaseKeyInfo.getKeyInfo().getLatestVersionLocations().getLocationList().subList(0, + locationInfoList.size() - 1); + block = locationInfoList.get(locationInfoList.size() - 1); + try { + block.setLength(getAdapter().finalizeBlock(block)); + } catch (Throwable exp) { + if (!forceRecovery) { + throw exp; + } + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, exp); + } + } else if (!forceRecovery) { throw e; + } else { + LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", + FORCE_LEASE_RECOVERY_ENV, e); } - LOG.warn("Failed to finalize block. Continue to recover the file since {} is enabled.", - FORCE_LEASE_RECOVERY_ENV, e); } } // recover and commit file - long keyLength = keyLocationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); - OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(keyInfo.getVolumeName()) - .setBucketName(keyInfo.getBucketName()).setKeyName(keyInfo.getKeyName()) - .setReplicationConfig(keyInfo.getReplicationConfig()).setDataSize(keyLength) - .setLocationInfoList(keyLocationInfoList) + long keyLength = locationInfoList.stream().mapToLong(OmKeyLocationInfo::getLength).sum(); + OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(leaseKeyInfo.getKeyInfo().getVolumeName()) + .setBucketName(leaseKeyInfo.getKeyInfo().getBucketName()).setKeyName(leaseKeyInfo.getKeyInfo().getKeyName()) + .setReplicationConfig(leaseKeyInfo.getKeyInfo().getReplicationConfig()).setDataSize(keyLength) + .setLocationInfoList(locationInfoList) .build(); getAdapter().recoverFile(keyArgs); return true; @@ -178,6 +204,12 @@ public boolean recoverLease(final Path f) throws IOException { @Override public boolean isFileClosed(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs isFileClosed", + () -> isFileClosedTraced(f)); + } + + private boolean isFileClosedTraced(Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("isFileClosed() path:{}", f); Path qualifiedPath = makeQualified(f); diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 4b61b37bc40c..059db6b95130 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -305,66 +305,6 @@ - - org.apache.rat - apache-rat-plugin - - - **/*.json - **/hs_err*.log - **/target/** - .gitattributes - **/.attach_* - **/**.rej - **/.factorypath - public - **/*.iml - **/output.xml - **/log.html - **/report.html - **/.idea/** - **/.ssh/id_rsa* - dev-support/*tests - dev-support/checkstyle* - dev-support/jdiff/** - src/contrib/** - src/main/webapps/datanode/robots.txt - src/main/webapps/hdfs/robots.txt - src/main/webapps/journal/robots.txt - src/main/webapps/router/robots.txt - src/main/webapps/secondary/robots.txt - src/site/resources/images/* - src/test/all-tests - src/test/empty-file - src/test/resources/*.log - src/test/resources/*.tgz - src/test/resources/data* - src/test/resources/empty-file - src/test/resources/ssl/* - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-CLI.txt - src/main/compose/ozonesecure/docker-image/runner/build/apache-rat-0.12/README-ANT.txt - webapps/static/angular-1.8.0.min.js - webapps/static/angular-nvd3-1.0.9.min.js - webapps/static/angular-route-1.8.0.min.js - webapps/static/bootstrap-3.4.1/** - webapps/static/d3-3.5.17.min.js - webapps/static/jquery-3.5.1.min.js - webapps/static/jquery.dataTables.min.js - webapps/static/nvd3-1.8.5.min.css.map - webapps/static/nvd3-1.8.5.min.css - webapps/static/nvd3-1.8.5.min.js.map - webapps/static/nvd3-1.8.5.min.js - **/dependency-reduced-pom.xml - **/node_modules/** - **/yarn.lock - **/pnpm-lock.yaml - **/ozone-recon-web/build/** - src/test/resources/prometheus-test-response.txt - src/main/license/** - src/main/resources/proto.lock - - - org.apache.maven.plugins maven-jar-plugin @@ -406,6 +346,13 @@ ozonefs-hadoop3-client + + go-offline + + ozonefs-shaded + ozonefs-hadoop2 + + build-with-recon diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 43e2d728b763..4d62ca886cda 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -51,7 +51,8 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_UNHEALTHY + ALL_REPLICAS_UNHEALTHY, + NEGATIVE_SIZE // Added new state to track containers with negative sizes } private static final String CONTAINER_ID = "container_id"; diff --git a/hadoop-ozone/recon/.gitignore b/hadoop-ozone/recon/.gitignore new file mode 100644 index 000000000000..3c3629e647f5 --- /dev/null +++ b/hadoop-ozone/recon/.gitignore @@ -0,0 +1 @@ +node_modules diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index d838e9c36e57..afc9c8a3239a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -397,7 +397,12 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - for (UnhealthyContainers c : containers) { + List emptyMissingFiltered = containers.stream() + .filter( + container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) + .collect( + Collectors.toList()); + for (UnhealthyContainers c : emptyMissingFiltered) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 84f55749a68f..baa9c522be10 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,6 +58,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -652,6 +653,36 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } + /** + * Retrieves the summary of deleted directories. + * + * This method calculates and returns a summary of deleted directories. + * @return The HTTP response body includes a map with the following entries: + * - "totalDeletedDirectories": the total number of deleted directories + * + * Example response: + * { + * "totalDeletedDirectories": 8, + * } + */ + @GET + @Path("/deletePending/dirs/summary") + public Response getDeletedDirectorySummary() { + Map dirSummary = new HashMap<>(); + // Create a keys summary for deleted directories + createSummaryForDeletedDirectories(dirSummary); + return Response.ok(dirSummary).build(); + } + + private void createSummaryForDeletedDirectories( + Map dirSummary) { + // Fetch the necessary metrics for deleted directories. + Long deletedDirCount = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); + // Calculate the total number of deleted directories + dirSummary.put("totalDeletedDirectories", deletedDirCount); + } + private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 09cbf4fe4e40..266caaa2d8e2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -163,6 +165,8 @@ public static BucketHandler getBucketHandler( ReconOMMetadataManager omMetadataManager, OzoneStorageContainerManager reconSCM, OmBucketInfo bucketInfo) throws IOException { + // Check if enableFileSystemPaths flag is set to true. + boolean enableFileSystemPaths = isEnableFileSystemPaths(omMetadataManager); // If bucketInfo is null then entity type is UNKNOWN if (Objects.isNull(bucketInfo)) { @@ -172,15 +176,20 @@ public static BucketHandler getBucketHandler( .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { return new FSOBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - } else if (bucketInfo.getBucketLayout() - .equals(BucketLayout.LEGACY)) { - return new LegacyBucketHandler(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketInfo); + } else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) { + // Choose handler based on enableFileSystemPaths flag for legacy layout. + // If enableFileSystemPaths is false, then the legacy bucket is treated + // as an OBS bucket. + if (enableFileSystemPaths) { + return new LegacyBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } else { + return new OBSBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { - // TODO: HDDS-7810 Write a handler for object store bucket - // We can use LegacyBucketHandler for OBS bucket for now. - return new LegacyBucketHandler(reconNamespaceSummaryManager, + return new OBSBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); } else { LOG.error("Unsupported bucket layout: " + @@ -190,6 +199,22 @@ public static BucketHandler getBucketHandler( } } + /** + * Determines whether FileSystemPaths are enabled for Legacy Buckets + * based on the Ozone configuration. + * + * @param ReconOMMetadataManager Instance + * @return True if FileSystemPaths are enabled, false otherwise. + */ + private static boolean isEnableFileSystemPaths(ReconOMMetadataManager omMetadataManager) { + OzoneConfiguration configuration = omMetadataManager.getOzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } + return configuration.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + public static BucketHandler getBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index d12c7b6545ac..4f9e68ddff95 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -60,9 +61,18 @@ public EntityHandler( this.omMetadataManager = omMetadataManager; this.reconSCM = reconSCM; this.bucketHandler = bucketHandler; - normalizedPath = normalizePath(path); - names = parseRequestPath(normalizedPath); + // Defaulting to FILE_SYSTEM_OPTIMIZED if bucketHandler is null + BucketLayout layout = + (bucketHandler != null) ? bucketHandler.getBucketLayout() : + BucketLayout.FILE_SYSTEM_OPTIMIZED; + + // Normalize the path based on the determined layout + normalizedPath = normalizePath(path, layout); + + // Choose the parsing method based on the bucket layout + names = (layout == BucketLayout.OBJECT_STORE) ? + parseObjectStorePath(normalizedPath) : parseRequestPath(normalizedPath); } public abstract NamespaceSummaryResponse getSummaryResponse() @@ -118,7 +128,8 @@ public static EntityHandler getEntityHandler( String path) throws IOException { BucketHandler bucketHandler; - String normalizedPath = normalizePath(path); + String normalizedPath = + normalizePath(path, BucketLayout.FILE_SYSTEM_OPTIMIZED); String[] names = parseRequestPath(normalizedPath); if (path.equals(OM_KEY_PREFIX)) { return EntityType.ROOT.create(reconNamespaceSummaryManager, @@ -156,23 +167,36 @@ public static EntityHandler getEntityHandler( String volName = names[0]; String bucketName = names[1]; - String keyName = BucketHandler.getKeyName(names); - + // Assuming getBucketHandler already validates volume and bucket existence bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName, + bucketName); - // check if either volume or bucket doesn't exist - if (bucketHandler == null - || !omMetadataManager.volumeExists(volName) - || !bucketHandler.bucketExists(volName, bucketName)) { + if (bucketHandler == null) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); + } + + // Directly handle path normalization and parsing based on the layout + if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) { + String[] parsedObjectLayoutPath = parseObjectStorePath( + normalizePath(path, bucketHandler.getBucketLayout())); + if (parsedObjectLayoutPath == null) { + return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, null, path); + } + // Use the key part directly from the parsed path + return bucketHandler.determineKeyPath(parsedObjectLayoutPath[2]) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); + } else { + // Use the existing names array for non-OBJECT_STORE layouts to derive + // the keyName + String keyName = BucketHandler.getKeyName(names); + return bucketHandler.determineKeyPath(keyName) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); } - return bucketHandler.determineKeyPath(keyName) - .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); } } @@ -256,7 +280,52 @@ public static String[] parseRequestPath(String path) { return names; } - private static String normalizePath(String path) { + /** + * Splits an object store path into volume, bucket, and key name components. + * + * This method parses a path of the format "/volumeName/bucketName/keyName", + * including paths with additional '/' characters within the key name. It's + * designed for object store paths where the first three '/' characters + * separate the root, volume and bucket names from the key name. + * + * @param path The object store path to parse, starting with a slash. + * @return A String array with three elements: volume name, bucket name, and + * key name, or {null} if the path format is invalid. + */ + public static String[] parseObjectStorePath(String path) { + // Removing the leading slash for correct splitting + path = path.substring(1); + + // Splitting the modified path by "/", limiting to 3 parts + String[] parts = path.split("/", 3); + + // Checking if we correctly obtained 3 parts after removing the leading slash + if (parts.length <= 3) { + return parts; + } else { + return null; + } + } + + /** + * Normalizes a given path based on the specified bucket layout. + * + * This method adjusts the path according to the bucket layout. + * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level + * using OmUtils.normalizePathUptoBucket. For other layouts, it + * normalizes the entire path, including the key, using + * OmUtils.normalizeKey, and does not preserve any trailing slashes. + * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure it + * is consistent with the expected format for object storage paths in Ozone. + * + * @param path + * @param bucketLayout + * @return A normalized path + */ + private static String normalizePath(String path, BucketLayout bucketLayout) { + if (bucketLayout == BucketLayout.OBJECT_STORE) { + return OM_KEY_PREFIX + OmUtils.normalizePathUptoBucket(path); + } return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 26cda6442d4e..8a1c5babe75e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -42,7 +42,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling FSO buckets. + * Class for handling FSO buckets NameSpaceSummaries. */ public class FSOBucketHandler extends BucketHandler { private static final Logger LOG = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 3dd1ddbdabb9..09f1c5bc7454 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -41,7 +41,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling Legacy buckets. + * Class for handling Legacy buckets NameSpaceSummaries. */ public class LegacyBucketHandler extends BucketHandler { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java new file mode 100644 index 000000000000..024eec989a10 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.handlers; + + +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Class for handling OBS buckets NameSpaceSummaries. + */ +public class OBSBucketHandler extends BucketHandler { + + private final String vol; + private final String bucket; + private final OmBucketInfo omBucketInfo; + + public OBSBucketHandler( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + OmBucketInfo bucketInfo) { + super(reconNamespaceSummaryManager, omMetadataManager, + reconSCM); + this.omBucketInfo = bucketInfo; + this.vol = omBucketInfo.getVolumeName(); + this.bucket = omBucketInfo.getBucketName(); + } + + /** + * Helper function to check if a path is a key, or invalid. + * + * @param keyName key name + * @return KEY, or UNKNOWN + * @throws IOException + */ + @Override + public EntityType determineKeyPath(String keyName) throws IOException { + String key = OM_KEY_PREFIX + vol + + OM_KEY_PREFIX + bucket + + OM_KEY_PREFIX + keyName; + + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + iterator.seek(key); + if (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + if (dbKey.equals(key)) { + return EntityType.KEY; + } + } + } + return EntityType.UNKNOWN; + } + + /** + * This method handles disk usage of direct keys. + * + * @param parentId The identifier for the parent bucket. + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a children + * keys + * @param duData the current DU data + * @param normalizedPath the normalized path request + * @return the total DU of all direct keys + * @throws IOException IOE + */ + @Override + public long handleDirectKeys(long parentId, boolean withReplica, + boolean listFile, + List duData, + String normalizedPath) throws IOException { + + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // Handle the case of an empty bucket. + if (nsSummary == null) { + return 0; + } + + Table keyTable = getKeyTable(); + long keyDataSizeWithReplica = 0L; + + try ( + TableIterator> + iterator = keyTable.iterator()) { + + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; + + iterator.seek(seekPrefix); + + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + + // Exit loop if the key doesn't match the seekPrefix. + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // List all the keys for the OBS bucket if requested. + if (listFile) { + duData.add(diskUsage); + } + } + } + } + + return keyDataSizeWithReplica; + } + + /** + * Calculates the total disk usage (DU) for an Object Store Bucket (OBS) by + * summing the sizes of all keys contained within the bucket. + * Since OBS buckets operate on a flat hierarchy, this method iterates through + * all the keys in the bucket without the need to traverse directories. + * + * @param parentId The identifier for the parent bucket. + * @return The total disk usage of all keys within the specified OBS bucket. + * @throws IOException + */ + @Override + public long calculateDUUnderObject(long parentId) throws IOException { + // Initialize the total disk usage variable. + long totalDU = 0L; + + // Access the key table for the bucket. + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + // Construct the seek prefix to filter keys under this bucket. + String seekPrefix = + OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX; + iterator.seek(seekPrefix); + + // Iterate over keys in the bucket. + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String keyName = kv.getKey(); + + // Break the loop if the current key does not start with the seekPrefix. + if (!keyName.startsWith(seekPrefix)) { + break; + } + + // Sum the size of each key to the total disk usage. + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + totalDU += keyInfo.getDataSize(); + } + } + } + + // Return the total disk usage of all keys in the bucket. + return totalDU; + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names, int cutoff) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Returns the keyInfo object from the KEY table. + * @return OmKeyInfo + */ + @Override + public OmKeyInfo getKeyInfo(String[] names) throws IOException { + String ozoneKey = OM_KEY_PREFIX; + ozoneKey += String.join(OM_KEY_PREFIX, names); + + return getKeyTable().getSkipCache(ozoneKey); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public OmDirectoryInfo getDirInfo(String[] names) throws IOException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + public Table getKeyTable() { + return getOmMetadataManager().getKeyTable(getBucketLayout()); + } + + public BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index eaf08d9ca83e..ba03ec61f145 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,6 +50,12 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; + /** + * Total count of containers with negative size. + */ + @JsonProperty("negativeSizeCount") + private long negativeSizeCount = 0; + /** * A collection of unhealthy containers. */ @@ -77,6 +83,9 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; + } else if (state.equals( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + this.negativeSizeCount = count; } } @@ -96,6 +105,10 @@ public long getMisReplicatedCount() { return misReplicatedCount; } + public long getNegativeSizeCount() { + return negativeSizeCount; + } + public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 577fb7d2bcc1..2284fe84e6d4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_KEYS; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_USED_BYTES; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING; /** @@ -217,6 +218,8 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); + unhealthyContainerStateStatsMap.put( + UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private ContainerHealthStatus setCurrentContainer(long recordId) @@ -293,6 +296,8 @@ private long processExistingDBRecords(long currentTime, rec.update(); } } else { + LOG.info("DELETED existing unhealthy container record...for Container: {}", + currentContainer.getContainerID()); rec.delete(); } } catch (ContainerNotFoundException cnf) { @@ -313,13 +318,21 @@ private long processExistingDBRecords(long currentTime, private void processContainer(ContainerInfo container, long currentTime, Map> - unhealthyContainerStateStatsMap) { + unhealthyContainerStateStatsMap) { try { Set containerReplicas = containerManager.getContainerReplicas(container.containerID()); ContainerHealthStatus h = new ContainerHealthStatus(container, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); + + // Handle negative sized containers separately + if (h.getContainer().getUsedBytes() < 0) { + handleNegativeSizedContainers(h, currentTime, + unhealthyContainerStateStatsMap); + return; + } + if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -365,6 +378,32 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { return false; } + /** + * This method is used to handle containers with negative sizes. It logs an + * error message and inserts a record into the UNHEALTHY_CONTAINERS table. + * @param containerHealthStatus + * @param currentTime + * @param unhealthyContainerStateStatsMap + */ + private void handleNegativeSizedContainers( + ContainerHealthStatus containerHealthStatus, long currentTime, + Map> + unhealthyContainerStateStatsMap) { + ContainerInfo container = containerHealthStatus.getContainer(); + LOG.error( + "Container {} has negative size. Please visit Recon's unhealthy " + + "container endpoint for more details.", + container.getContainerID()); + UnhealthyContainers record = + ContainerHealthRecords.recordForState(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); + List records = Collections.singletonList(record); + populateContainerStats(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, + unhealthyContainerStateStatsMap); + containerHealthSchemaManager.insertUnhealthyContainerRecords(records); + } + /** * Helper methods to generate and update the required database records for * unhealthy containers. @@ -394,7 +433,7 @@ public static boolean retainOrUpdateRecord( boolean returnValue = false; switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) { case MISSING: - returnValue = container.isMissing(); + returnValue = container.isMissing() && !container.isEmpty(); break; case MIS_REPLICATED: returnValue = keepMisReplicatedRecord(container, rec); @@ -459,10 +498,10 @@ public static List generateUnhealthyRecords( "starting with **Container State Stats:**"); } records.add( - recordForState(container, UnHealthyContainerStates.EMPTY_MISSING, + recordForState(container, EMPTY_MISSING, time)); populateContainerStats(container, - UnHealthyContainerStates.EMPTY_MISSING, + EMPTY_MISSING, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java index e6ad328ab98f..e1a3c97d2be2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java @@ -107,39 +107,39 @@ public void setAverageNumUpdatesInDeltaRequest(float avg) { averageNumUpdatesInDeltaRequest.set(avg); } - public MutableCounterLong getNumSnapshotRequests() { - return numSnapshotRequests; + public long getNumSnapshotRequests() { + return numSnapshotRequests.value(); } - public MutableCounterLong getNumSnapshotRequestsFailed() { - return numSnapshotRequestsFailed; + public long getNumSnapshotRequestsFailed() { + return numSnapshotRequestsFailed.value(); } - public MutableRate getSnapshotRequestLatency() { + MutableRate getSnapshotRequestLatency() { return snapshotRequestLatency; } - public MutableCounterLong getNumDeltaRequestsFailed() { - return numDeltaRequestsFailed; + public long getNumDeltaRequestsFailed() { + return numDeltaRequestsFailed.value(); } - public MutableCounterLong getNumUpdatesInDeltaTotal() { - return numUpdatesInDeltaTotal; + public long getNumUpdatesInDeltaTotal() { + return numUpdatesInDeltaTotal.value(); } - public MutableGaugeFloat getAverageNumUpdatesInDeltaRequest() { - return averageNumUpdatesInDeltaRequest; + public float getAverageNumUpdatesInDeltaRequest() { + return averageNumUpdatesInDeltaRequest.value(); } - public MutableCounterLong getNumNonZeroDeltaRequests() { - return numNonZeroDeltaRequests; + public long getNumNonZeroDeltaRequests() { + return numNonZeroDeltaRequests.value(); } public void setSequenceNumberLag(long lag) { sequenceNumberLag.set(lag); } - public MutableGaugeLong getSequenceNumberLag() { - return sequenceNumberLag; + public long getSequenceNumberLag() { + return sequenceNumberLag.value(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 364aff103a51..0c13376fa526 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -34,6 +34,9 @@ import org.jooq.DSLContext; import org.jooq.Record; import org.jooq.SelectQuery; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.List; /** @@ -41,6 +44,8 @@ */ @Singleton public class ContainerHealthSchemaManager { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerHealthSchemaManager.class); private final UnhealthyContainersDao unhealthyContainersDao; private final ContainerSchemaDefinition containerSchemaDefinition; @@ -113,6 +118,12 @@ public Cursor getAllUnhealthyRecordsCursor() { } public void insertUnhealthyContainerRecords(List recs) { + if (LOG.isDebugEnabled()) { + recs.forEach(rec -> { + LOG.debug("rec.getContainerId() : {}, rec.getContainerState(): {} ", rec.getContainerId(), + rec.getContainerState()); + }); + } unhealthyContainersDao.insert(recs); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 2040b7b343d9..1fc114eabd75 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -105,4 +106,11 @@ List listBucketsUnderVolume(String volumeName, */ List listBucketsUnderVolume( String volumeName) throws IOException; + + /** + * Return the OzoneConfiguration instance used by Recon. + * @return + */ + OzoneConfiguration getOzoneConfiguration(); + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index ad0526363df0..4b041f6511f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -291,6 +291,11 @@ public List listBucketsUnderVolume(final String volumeName) Integer.MAX_VALUE); } + @Override + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + private List listAllBuckets(final int maxNumberOfBuckets) throws IOException { List result = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java index fb387861f0e3..105406f2bdf6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.scm.ReconScmTask; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; @@ -34,13 +35,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.ArrayList; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; @@ -60,6 +62,8 @@ public class ContainerSizeCountTask extends ReconScmTask { private ContainerCountBySizeDao containerCountBySizeDao; private DSLContext dslContext; private HashMap processedContainers = new HashMap<>(); + private Map> + unhealthyContainerStateStatsMap; private ReadWriteLock lock = new ReentrantReadWriteLock(true); public ContainerSizeCountTask( @@ -121,7 +125,17 @@ protected synchronized void run() { private void process(ContainerInfo container, Map map) { final ContainerID id = container.containerID(); - final long currentSize = container.getUsedBytes(); + final long usedBytes = container.getUsedBytes(); + final long currentSize; + + if (usedBytes < 0) { + LOG.warn("Negative usedBytes ({}) for container {}, treating it as 0", + usedBytes, id); + currentSize = 0; + } else { + currentSize = usedBytes; + } + final Long previousSize = processedContainers.put(id, currentSize); if (previousSize != null) { decrementContainerSizeCount(previousSize, map); @@ -132,24 +146,27 @@ private void process(ContainerInfo container, /** * The process() function is responsible for updating the counts of * containers being tracked in a containerSizeCountMap based on the - * ContainerInfo objects in the list containers.It then iterates through + * ContainerInfo objects in the list containers. It then iterates through * the list of containers and does the following for each container: * - * 1) If the container is not present in processedContainers, - * it is a new container, so it is added to the processedContainers map - * and the count for its size in the containerSizeCountMap is incremented - * by 1 using the handlePutKeyEvent() function. - * 2) If the container is present in processedContainers but its size has - * been updated to the new size then the count for the old size in the - * containerSizeCountMap is decremented by 1 using the - * handleDeleteKeyEvent() function. The count for the new size is then - * incremented by 1 using the handlePutKeyEvent() function. - * 3) If the container is not present in containers list, it means the - * container has been deleted. - * The remaining containers inside the deletedContainers map are the ones - * that are not in the cluster and need to be deleted. Finally, the counts in - * the containerSizeCountMap are written to the database using the - * writeCountsToDB() function. + * 1) If the container's state is not "deleted," it will be processed: + * - If the container is not present in processedContainers, it is a new + * container. Therefore, it is added to the processedContainers map, and + * the count for its size in the containerSizeCountMap is incremented by + * 1 using the handlePutKeyEvent() function. + * - If the container is present in processedContainers but its size has + * been updated to a new size, the count for the old size in the + * containerSizeCountMap is decremented by 1 using the + * handleDeleteKeyEvent() function. Subsequently, the count for the new + * size is incremented by 1 using the handlePutKeyEvent() function. + * + * 2) If the container's state is "deleted," it is skipped, as deleted + * containers are not processed. + * + * After processing, the remaining containers inside the deletedContainers map + * are those that are not in the cluster and need to be deleted from the total + * size counts. Finally, the counts in the containerSizeCountMap are written + * to the database using the writeCountsToDB() function. */ public void process(List containers) { lock.writeLock().lock(); @@ -161,7 +178,9 @@ public void process(List containers) { // Loop to handle container create and size-update operations for (ContainerInfo container : containers) { - // The containers present in the cache hence it is not yet deleted + if (container.getState().equals(DELETED)) { + continue; // Skip deleted containers + } deletedContainers.remove(container.containerID()); // For New Container being created try { @@ -246,10 +265,10 @@ public String getTaskName() { /** * - * The handleContainerDeleteOperations() function loops through the entries - * in the deletedContainers map and calls the handleDeleteKeyEvent() function - * for each one. This will decrement the size counts of those containers by - * one which are no longer present in the cluster + * Handles the deletion of containers by updating the tracking of processed containers + * and adjusting the count of containers based on their sizes. When a container is deleted, + * it is removed from the tracking of processed containers, and the count of containers + * corresponding to its size is decremented in the container size count map. * * Used by process() * @@ -261,6 +280,9 @@ private void handleContainerDeleteOperations( Map containerSizeCountMap) { for (Map.Entry containerId : deletedContainers.entrySet()) { + // processedContainers will only keep a track of all containers that have + // been processed except DELETED containers. + processedContainers.remove(containerId.getKey()); long containerSize = deletedContainers.get(containerId.getKey()); decrementContainerSizeCount(containerSize, containerSizeCountMap); } @@ -316,19 +338,26 @@ private static void updateContainerSizeCount(long containerSize, int delta, } /** - * * The purpose of this function is to categorize containers into different * size ranges, or "bins," based on their size. * The ContainerSizeCountKey object is used to store the upper bound value * for each size range, and is later used to lookup the count of containers * in that size range within a Map. * - * Used by decrementContainerSizeCount() and incrementContainerSizeCount() + * If the container size is 0, the method sets the size of + * ContainerSizeCountKey as zero without calculating the upper bound. Used by + * decrementContainerSizeCount() and incrementContainerSizeCount() * * @param containerSize to calculate the upperSizeBound */ private static ContainerSizeCountKey getContainerSizeCountKey( long containerSize) { + // If containerSize is 0, return a ContainerSizeCountKey with size 0 + if (containerSize == 0) { + return new ContainerSizeCountKey(0L); + } + + // Otherwise, calculate the upperSizeBound return new ContainerSizeCountKey( ReconUtils.getContainerSizeUpperBound(containerSize)); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java new file mode 100644 index 000000000000..5a6d7a256e49 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the Deleted Table, updating counts and sizes of + * pending Key Deletions in the backend. + */ +public class DeletedKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(DeletedKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been backlogged in the backend for deletion. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + + } + + /** + * Invoked by the process method to remove information on those keys that have + * been successfully deleted from the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> + count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update the statistics on the keys + * pending to be deleted. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + // The size of deleted keys cannot change hence no-op. + return; + } + + /** + * Invoked by the reprocess method to calculate the records count of the + * deleted table and the sizes of replicated and unreplicated keys that are + * pending deletion in Ozone. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 42356191c501..30fdb7c1292e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -38,6 +38,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -62,12 +63,13 @@ */ public class NSSummaryTask implements ReconOmTask { private static final Logger LOG = - LoggerFactory.getLogger(NSSummaryTask.class); + LoggerFactory.getLogger(NSSummaryTask.class); private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; + private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -86,6 +88,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); + this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); } @Override @@ -95,20 +100,28 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success; - success = nsSummaryTaskWithFSO.processWithFSO(events); - if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - } else { + boolean success = nsSummaryTaskWithFSO.processWithFSO(events); + if (!success) { LOG.error("processWithFSO failed."); } + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + if (!success) { + LOG.error("processWithLegacy failed."); + } + success = nsSummaryTaskWithOBS.processWithOBS(events); + if (!success) { + LOG.error("processWithOBS failed."); + } return new ImmutablePair<>(getTaskName(), success); } @Override public Pair reprocess(OMMetadataManager omMetadataManager) { + // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); + long startTime = System.nanoTime(); // Record start time + try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -122,6 +135,8 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); + tasks.add(() -> nsSummaryTaskWithOBS + .reprocessWithOBS(reconOMMetadataManager)); List> results; ThreadFactory threadFactory = new ThreadFactoryBuilder() @@ -137,17 +152,24 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex2); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); + + long endTime = System.nanoTime(); + // Convert to milliseconds + long durationInMillis = + TimeUnit.NANOSECONDS.toMillis(endTime - startTime); + + // Log performance metrics + LOG.info("Task execution time: {} milliseconds", durationInMillis); } + return new ImmutablePair<>(getTaskName(), true); } -} +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index ec1ccd0542fc..4555b976ffed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -47,7 +47,7 @@ */ public class NSSummaryTaskWithLegacy extends NSSummaryTaskDbEventHandler { - private static final BucketLayout BUCKET_LAYOUT = BucketLayout.LEGACY; + private static final BucketLayout LEGACY_BUCKET_LAYOUT = BucketLayout.LEGACY; private static final Logger LOG = LoggerFactory.getLogger(NSSummaryTaskWithLegacy.class); @@ -71,16 +71,17 @@ public NSSummaryTaskWithLegacy(ReconNamespaceSummaryManager public boolean processWithLegacy(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); Map nsSummaryMap = new HashMap<>(); + ReconOMMetadataManager metadataManager = getReconOMMetadataManager(); while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); // we only process updates on OM's KeyTable String table = omdbUpdateEvent.getTable(); - boolean updateOnKeyTable = table.equals(KEY_TABLE); - if (!updateOnKeyTable) { + + if (!table.equals(KEY_TABLE)) { continue; } @@ -90,102 +91,26 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; Object value = keyTableUpdateEvent.getValue(); Object oldValue = keyTableUpdateEvent.getOldValue(); + if (!(value instanceof OmKeyInfo)) { LOG.warn("Unexpected value type {} for key {}. Skipping processing.", value.getClass().getName(), updatedKey); continue; } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; - // KeyTable entries belong to both Legacy and OBS buckets. - // Check bucket layout and if it's OBS - // continue to the next iteration. - // Check just for the current KeyInfo. - String volumeName = updatedKeyInfo.getVolumeName(); - String bucketName = updatedKeyInfo.getBucketName(); - String bucketDBKey = getReconOMMetadataManager() - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = getReconOMMetadataManager() - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid(metadataManager, updatedKeyInfo)) { continue; } - setKeyParentID(updatedKeyInfo); - - if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - switch (action) { - case PUT: - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldKeyInfo != null) { - // delete first, then put - setKeyParentID(oldKeyInfo); - handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old keyInfo for {}.", - updatedKey); - } - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + if (enableFileSystemPaths) { + processWithFileSystemLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } else { - OmDirectoryInfo updatedDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(updatedKeyInfo.getKeyName()) - .setObjectID(updatedKeyInfo.getObjectID()) - .setParentObjectID(updatedKeyInfo.getParentObjectID()) - .build(); - - OmDirectoryInfo oldDirectoryInfo = null; - - if (oldKeyInfo != null) { - oldDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(oldKeyInfo.getKeyName()) - .setObjectID(oldKeyInfo.getObjectID()) - .setParentObjectID(oldKeyInfo.getParentObjectID()) - .build(); - } - - switch (action) { - case PUT: - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldDirectoryInfo != null) { - // delete first, then put - handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old dirInfo for {}.", - updatedKey); - } - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + processWithObjectStoreLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } } catch (IOException ioEx) { LOG.error("Unable to process Namespace Summary data in Recon DB. ", @@ -206,12 +131,118 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { return true; } + private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setKeyParentID(updatedKeyInfo); + + if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } else { + OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() + .setName(updatedKeyInfo.getKeyName()) + .setObjectID(updatedKeyInfo.getObjectID()) + .setParentObjectID(updatedKeyInfo.getParentObjectID()) + .build(); + + OmDirectoryInfo oldDirectoryInfo = null; + + if (oldKeyInfo != null) { + oldDirectoryInfo = + new OmDirectoryInfo.Builder() + .setName(oldKeyInfo.getKeyName()) + .setObjectID(oldKeyInfo.getObjectID()) + .setParentObjectID(oldKeyInfo.getParentObjectID()) + .build(); + } + + switch (action) { + case PUT: + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldDirectoryInfo != null) { + handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old dirInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Directory: {}", action); + } + } + } + + private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setParentBucketId(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setParentBucketId(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } + public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { Map nsSummaryMap = new HashMap<>(); try { Table keyTable = - omMetadataManager.getKeyTable(BUCKET_LAYOUT); + omMetadataManager.getKeyTable(LEGACY_BUCKET_LAYOUT); try (TableIterator> keyTableIter = keyTable.iterator()) { @@ -223,30 +254,29 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { // KeyTable entries belong to both Legacy and OBS buckets. // Check bucket layout and if it's OBS // continue to the next iteration. - String volumeName = keyInfo.getVolumeName(); - String bucketName = keyInfo.getBucketName(); - String bucketDBKey = omMetadataManager - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = omMetadataManager - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid((ReconOMMetadataManager) omMetadataManager, + keyInfo)) { continue; } - setKeyParentID(keyInfo); - - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - OmDirectoryInfo directoryInfo = - new OmDirectoryInfo.Builder() - .setName(keyInfo.getKeyName()) - .setObjectID(keyInfo.getObjectID()) - .setParentObjectID(keyInfo.getParentObjectID()) - .build(); - handlePutDirEvent(directoryInfo, nsSummaryMap); + if (enableFileSystemPaths) { + // The LEGACY bucket is a file system bucket. + setKeyParentID(keyInfo); + + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + OmDirectoryInfo directoryInfo = + new OmDirectoryInfo.Builder() + .setName(keyInfo.getKeyName()) + .setObjectID(keyInfo.getObjectID()) + .setParentObjectID(keyInfo.getParentObjectID()) + .build(); + handlePutDirEvent(directoryInfo, nsSummaryMap); + } else { + handlePutKeyEvent(keyInfo, nsSummaryMap); + } } else { + // The LEGACY bucket is an object store bucket. + setParentBucketId(keyInfo); handlePutKeyEvent(keyInfo, nsSummaryMap); } if (!checkAndCallFlushToDB(nsSummaryMap)) { @@ -290,7 +320,7 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), parentKeyName); OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) + .getKeyTable(LEGACY_BUCKET_LAYOUT) .getSkipCache(fullParentKeyName); if (parentKeyInfo != null) { @@ -300,17 +330,53 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { "NSSummaryTaskWithLegacy is null"); } } else { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + setParentBucketId(keyInfo); + } + } - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + /** + * Set the parent object ID for a bucket. + *@paramkeyInfo + *@throwsIOException + */ + private void setParentBucketId(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); } } + + /** + * Check if the bucket layout is LEGACY. + * @param metadataManager + * @param keyInfo + * @return + */ + private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager, + OmKeyInfo keyInfo) + throws IOException { + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = metadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + metadataManager.getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != LEGACY_BUCKET_LAYOUT) { + LOG.debug( + "Skipping processing for bucket {} as bucket layout is not LEGACY", + bucketName); + return false; + } + + return true; + } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..34c7dc967c3a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + + +/** + * Class for handling OBS specific tasks. + */ +public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { + + private static final BucketLayout BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + + private static final Logger LOG = + LoggerFactory.getLogger(NSSummaryTaskWithOBS.class); + + + public NSSummaryTaskWithOBS( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager reconOMMetadataManager, + OzoneConfiguration ozoneConfiguration) { + super(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + + public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { + Map nsSummaryMap = new HashMap<>(); + + try { + Table keyTable = + omMetadataManager.getKeyTable(BUCKET_LAYOUT); + + try (TableIterator> + keyTableIter = keyTable.iterator()) { + + while (keyTableIter.hasNext()) { + Table.KeyValue kv = keyTableIter.next(); + OmKeyInfo keyInfo = kv.getValue(); + + // KeyTable entries belong to both Legacy and OBS buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = omMetadataManager + .getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = omMetadataManager + .getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(keyInfo); + + handlePutKeyEvent(keyInfo, nsSummaryMap); + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + } + } catch (IOException ioEx) { + LOG.error("Unable to reprocess Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + + // flush and commit left out entries at end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + LOG.info("Completed a reprocess run of NSSummaryTaskWithOBS"); + return true; + } + + public boolean processWithOBS(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + Map nsSummaryMap = new HashMap<>(); + + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); + OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); + + // We only process updates on OM's KeyTable + String table = omdbUpdateEvent.getTable(); + boolean updateOnKeyTable = table.equals(KEY_TABLE); + if (!updateOnKeyTable) { + continue; + } + + String updatedKey = omdbUpdateEvent.getKey(); + + try { + OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; + Object value = keyTableUpdateEvent.getValue(); + Object oldValue = keyTableUpdateEvent.getOldValue(); + if (value == null) { + LOG.warn("Value is null for key {}. Skipping processing.", + updatedKey); + continue; + } else if (!(value instanceof OmKeyInfo)) { + LOG.warn("Unexpected value type {} for key {}. Skipping processing.", + value.getClass().getName(), updatedKey); + continue; + } + + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; + OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; + + // KeyTable entries belong to both OBS and Legacy buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = updatedKeyInfo.getVolumeName(); + String bucketName = updatedKeyInfo.getBucketName(); + String bucketDBKey = + getReconOMMetadataManager().getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = getReconOMMetadataManager().getBucketTable() + .getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case UPDATE: + if (oldKeyInfo != null) { + // delete first, then put + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKey); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + default: + LOG.debug("Skipping DB update event: {}", action); + } + + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } catch (IOException ioEx) { + LOG.error("Unable to process Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + + // Flush and commit left-out entries at the end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + + LOG.info("Completed a process run of NSSummaryTaskWithOBS"); + return true; + } + + + /** + * KeyTable entries don't have the parentId set. + * In order to reuse the existing methods that rely on + * the parentId, we have to set it explicitly. + * Note: For an OBS key, the parentId will always correspond to the ID of the + * OBS bucket in which it is located. + * + * @param keyInfo + * @throws IOException + */ + private void setKeyParentID(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithOBS is null"); + } + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java new file mode 100644 index 000000000000..5ae23b68a703 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Interface for handling PUT, DELETE and UPDATE events for size-related + * tables for OM Insights. + */ +public interface OmTableHandler { + + /** + * Handles a PUT event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The PUT event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles a DELETE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The DELETE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles an UPDATE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The UPDATE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo,RepeatedOmKeyInfo. + * If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unReplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException; + + + /** + * Returns the count key for the given table. + * + * @param tableName The name of the table. + * @return The count key for the table. + */ + default String getTableCountKeyFromTable(String tableName) { + return tableName + "Count"; + } + + /** + * Returns the replicated size key for the given table. + * + * @param tableName The name of the table. + * @return The replicated size key for the table. + */ + default String getReplicatedSizeKeyFromTable(String tableName) { + return tableName + "ReplicatedDataSize"; + } + + /** + * Returns the unreplicated size key for the given table. + * + * @param tableName The name of the table. + * @return The unreplicated size key for the table. + */ + default String getUnReplicatedSizeKeyFromTable(String tableName) { + return tableName + "UnReplicatedDataSize"; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index c814d9d9e33f..3e84f311c942 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -37,22 +35,20 @@ import java.io.IOException; import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; - - +import java.util.Collection; import java.util.Map.Entry; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.jooq.impl.DSL.currentTimestamp; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -65,14 +61,21 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; + private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; + + // Initialize table handlers + tableHandlers = new HashMap<>(); + tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -90,8 +93,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -100,16 +103,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try ( - TableIterator> iterator - = table.iterator()) { - if (getTablesToCalculateSize().contains(tableName)) { - Triple details = getTableSizeAndCount(iterator); + try (TableIterator> iterator + = table.iterator()) { + if (tableHandlers.containsKey(tableName)) { + Triple details = + tableHandlers.get(tableName).getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeCountMap.put( + unReplicatedSizeMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -124,72 +127,17 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unReplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unreplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - private Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - if (kv.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - if (kv.getValue() instanceof RepeatedOmKeyInfo) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - // Since we can have multiple deleted keys of same name - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - } - - return Triple.of(count, unReplicatedSize, replicatedSize); - } - - /** - * Returns a collection of table names that require data size calculation. - */ - public Collection getTablesToCalculateSize() { - List taskTables = new ArrayList<>(); - taskTables.add(OPEN_KEY_TABLE); - taskTables.add(OPEN_FILE_TABLE); - taskTables.add(DELETED_TABLE); - return taskTables; - } - @Override public String getTaskName() { return "OmTableInsightTask"; @@ -211,10 +159,9 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unreplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); - final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -223,22 +170,21 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } - try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; default: @@ -256,11 +202,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unreplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unreplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -268,65 +214,34 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } - } - - private void handleSizeRelatedTablePutEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle PUT for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle PUT for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); + if (event.getValue() != null) { + if (tableHandler != null) { + tableHandler.handlePutEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + if (tableHandler != null) { + tableHandler.handleDeleteEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -335,109 +250,28 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } - private void handleSizeRelatedTableDeleteEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle DELETE for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle DELETE for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? - count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } - } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { + if (tableHandler != null) { // Handle update for only size related tables - handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + tableHandler.handleUpdateEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } } } - - private void handleSizeRelatedTableUpdateEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we don't - // need to update the count. Except for RepeatedOmKeyInfo, for which the - // size of omKeyInfoList can change - if (event.getValue() instanceof OmKeyInfo) { - // Handle UPDATE for OpenKeyTable & OpenFileTable - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle UPDATE for DeletedTable - RepeatedOmKeyInfo oldRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getOldValue(); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? - count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + - newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); - Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldSize.getRight() + newSize.getRight()); - } - } - - + /** + * Write the updated count and size information to the database. + * + * @param dataMap Map containing the updated count and size information. + */ private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -461,6 +295,11 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } + /** + * Initializes and returns a count map with the counts for the tables. + * + * @return The count map containing the counts for each table. + */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -478,11 +317,13 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - Collection tables = getTablesToCalculateSize(); - HashMap sizeCountMap = new HashMap<>(tables.size()); - for (String tableName : tables) { - String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : - getUnReplicatedSizeKeyFromTable(tableName); + HashMap sizeCountMap = new HashMap<>(); + for (Map.Entry entry : tableHandlers.entrySet()) { + String tableName = entry.getKey(); + OmTableHandler tableHandler = entry.getValue(); + String key = + replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : + tableHandler.getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java new file mode 100644 index 000000000000..7a27d29d8f28 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the OpenKey Table, updating counts and sizes of + * open keys in the backend. + */ +public class OpenKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(OpenKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been open in the backend. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to delete information on those keys that are + * no longer closed in the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update information on those open keys that + * have been updated in the backend. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + if (event.getValue() != null) { + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we + // don't need to update the count. + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Update event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * This method is called by the reprocess method. It calculates the record + * counts for both the open key table and the open file table. Additionally, + * it computes the sizes of both replicated and unreplicated keys + * that are currently open in the backend. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } + +} diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 204609f66fec..79ff9f8e7c6d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -44,6 +44,42 @@ "replicationType": "RATIS", "replicationFactor": 1, "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc710", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc711", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc712", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc713", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc714", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc715", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" } ], "containers": 80, @@ -1000,7 +1036,7 @@ ] }, "keys": { - "totalCount": 534, + "totalCount": 15, "keys": [ { "Volume": "vol-0-20448", @@ -1062,7 +1098,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77506", "DataSize": 10240, "Versions": [ 0 @@ -1081,7 +1117,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64512", "DataSize": 5692407, "Versions": [ 0 @@ -1100,7 +1136,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69105", "DataSize": 189407, "Versions": [ 0 @@ -1119,7 +1155,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77507", "DataSize": 10240, "Versions": [ 0 @@ -1138,7 +1174,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64513", "DataSize": 5692407, "Versions": [ 0 @@ -1157,7 +1193,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69106", "DataSize": 189407, "Versions": [ 0 @@ -1176,7 +1212,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77508", "DataSize": 10240, "Versions": [ 0 @@ -1195,7 +1231,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64514", "DataSize": 5692407, "Versions": [ 0 @@ -1214,7 +1250,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69107", "DataSize": 189407, "Versions": [ 0 @@ -1233,7 +1269,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77509", "DataSize": 10240, "Versions": [ 0 @@ -1252,7 +1288,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64515", "DataSize": 5692407, "Versions": [ 0 @@ -1271,7 +1307,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69109", "DataSize": 189407, "Versions": [ 0 @@ -3729,7 +3765,6 @@ "totalDeletedKeys": 3 }, "omMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -3924,12 +3959,7 @@ } ], "existsAt": "SCM" - } - ] - }, - "omMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4162,12 +4192,7 @@ } ] }, - "omMismatch2":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "scmMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -4362,12 +4387,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4561,12 +4581,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch2":{ - "lastKey":31, - "containerDiscrepancyInfo": [ + }, { "containerId": 21, "numberOfKeys": 1, @@ -4760,12 +4775,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch3":{ - "lastKey":41, - "containerDiscrepancyInfo": [ + }, { "containerId": 31, "numberOfKeys": 1, @@ -4959,12 +4969,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch4":{ - "lastKey":51, - "containerDiscrepancyInfo": [ + }, { "containerId": 41, "numberOfKeys": 1, @@ -5161,12 +5166,7 @@ } ] }, - "scmMismatch5":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "nonFSO": { - "lastKey": "11", "keysSummary": { "totalUnreplicatedDataSize": 10485760, "totalReplicatedDataSize": 31457280, @@ -5196,13 +5196,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO1": { - "lastKey": "21", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 11", @@ -5226,13 +5220,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO2": { - "lastKey": "31", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 21", @@ -5260,19 +5248,7 @@ ], "status": "OK" }, - "nonFSO3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "fso": { - "lastKey": "11", "fso": [ { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2401/110569623850191713", @@ -5400,14 +5376,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - - } - ], - "status": "OK" - }, - "fso1": { - "lastKey": "21", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "11", @@ -5515,13 +5484,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "fso2": { - "lastKey": "31", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "21", @@ -5645,19 +5608,7 @@ ], "status": "OK" }, - "fso3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "keydeletePending":{ - "lastKey": "/volume/bucket1/rcmeevblsf/106/-9223372036843950335", "keysSummary": { "totalUnreplicatedDataSize": 29291, "totalReplicatedDataSize": 87873, @@ -6015,16 +5966,7 @@ "updateIDset": true } ] - } - ], - "status": "OK" - }, - - "keydeletePending1": { - "lastKey":"/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6081,15 +6023,7 @@ "updateIDset": false } ] - } - ], - "status": "OK" - }, - "keydeletePending2": { - "lastKey":"31", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6205,12 +6139,7 @@ ], "status": "OK" }, - "keydeletePending3": { - "lastKey":"", - "deletedKeyInfo": [] - }, "deleted": { - "lastKey": "11", "containers": [ { "containerId": 1, @@ -6381,12 +6310,7 @@ "healthy": true } ] - } - ] - }, - "deleted1": { - "lastKey": "21", - "containers": [ + }, { "containerId": 11, "numberOfKeys": 2, @@ -6556,12 +6480,7 @@ "healthy": true } ] - } - ] - }, - "deleted2": { - "lastKey": "31", - "containers": [ + }, { "containerId": 21, "numberOfKeys": 2, @@ -6731,12 +6650,7 @@ "healthy": true } ] - } - ] - }, - "deleted3": { - "lastKey": "41", - "containers": [ + }, { "containerId": 31, "numberOfKeys": 2, @@ -6773,8 +6687,71 @@ } ] }, - "deleted4": { - "lastKey": null, - "containers": [] + "dirdeletePending": { + "replicatedDataSize": 0, + "unreplicatedDataSize": 0, + "deletedDirInfo": [ + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854764286/231010153900/-9223372036854760111", + "path": ".Trash/hadoop/231010153900", + "inStateSince": 1696952297266, + "size": 17289, + "replicatedSize": 100, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-9223372036854774015", + "path": "dir1", + "inStateSince": 1696954980154, + "size": 1200, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854764286/231010153900/-9223372036854760191", + "path": ".Trash/hadoop/231010153900", + "inStateSince": 1696952297266, + "size": 17289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-9223372036854774112", + "path": "dir21", + "inStateSince": 1696954980154, + "size": 17289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + }, + { + "key": "/-4611686018427388160/-9223372036854775552/-9223372036854775552/dir3/-922337203685477303", + "path": "dir22", + "inStateSince": 1696954980900, + "size": 20289, + "replicatedSize": 0, + "replicationInfo": { + "replicationFactor": "THREE", + "requiredNodes": 3, + "replicationType": "RATIS" + } + } + ], + "status": "OK" } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json index 0bf0c69f5459..1e1f79d18754 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json @@ -37,39 +37,17 @@ "/keys/open/summary": "/keysOpenSummary", "/keys/deletePending/summary": "/keysdeletePendingSummary", - "/containers/mismatch?limit=*&prevKey=11&missingIn=OM" : "/omMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=OM" : "/omMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=OM" : "/omMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=OM" : "/omMismatch4", + "/containers/mismatch?&missingIn=OM" : "/omMismatch", + "/containers/mismatch?limit=*&missingIn=OM" : "/omMismatch", - "/containers/mismatch?limit=*&prevKey=*&missingIn=OM" : "/omMismatch", + "/containers/mismatch?&missingIn=SCM" : "/scmMismatch", + "/containers/mismatch?limit=*&missingIn=SCM" : "/scmMismatch", - "/containers/mismatch?limit=*&prevKey=11&missingIn=SCM" : "/scmMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=SCM" : "/scmMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=SCM" : "/scmMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=SCM" : "/scmMismatch4", - "/containers/mismatch?limit=*&prevKey=51&missingIn=SCM" : "/scmMismatch5", - - "/containers/mismatch?limit=*&prevKey=*&missingIn=SCM" : "/scmMismatch", - - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=11": "/nonFSO1", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=21": "/nonFSO2", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=31": "/nonFSO3", "/keys/open?includeFso=false&includeNonFso=true&limit=*": "/nonFSO", - - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=11": "/fso1", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=21": "/fso2", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=31": "/fso3", "/keys/open?includeFso=true&includeNonFso=false&limit=*": "/fso", - "/keys/deletePending?limit=*&prevKey=/volume/bucket1/rcmeevblsf/106/-9223372036843950335" : "/keydeletePending1", - "/keys/deletePending?limit=*&prevKey=/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191" : "/keydeletePending2", - "/keys/deletePending?limit=*&prevKey=31" : "/keydeletePending3", "/keys/deletePending?limit=*" : "/keydeletePending", - "/containers/mismatch/deleted?limit=*&prevKey": "/deleted", - "/containers/mismatch/deleted?limit=*&prevKey=11": "/deleted1", - "/containers/mismatch/deleted?limit=*&prevKey=21": "/deleted2", - "/containers/mismatch/deleted?limit=*&prevKey=31": "/deleted3", - "/containers/mismatch/deleted?limit=*&prevKey=41": "/deleted4" + "/containers/mismatch/deleted?limit=*": "/deleted", + "/keys/deletePending/dirs?limit=*": "/dirdeletePending" } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 3d1528fccb3e..41987c00ef35 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -16,7 +16,7 @@ "ag-charts-community": "^7.3.0", "ag-charts-react": "^7.3.0", "antd": "^3.26.20", - "axios": "^0.27.2", + "axios": "^0.28.0", "babel-jest": "^24.9.0", "babel-plugin-import": "^1.13.8", "classnames": "^2.3.2", @@ -25,7 +25,7 @@ "less": "^3.13.1", "less-loader": "^5.0.0", "moment": "^2.29.4", - "plotly.js": "^1.58.5", + "plotly.js": "^2.25.2", "pretty-ms": "^5.1.0", "react": "^16.8.6", "react-app-rewired": "^2.2.1", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index d54bc8663918..957a0ed5d152 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -17,7 +17,7 @@ specifiers: ag-charts-community: ^7.3.0 ag-charts-react: ^7.3.0 antd: ^3.26.20 - axios: ^0.27.2 + axios: ^0.28.0 babel-jest: ^24.9.0 babel-plugin-import: ^1.13.8 classnames: ^2.3.2 @@ -35,7 +35,7 @@ specifiers: less-loader: ^5.0.0 moment: ^2.29.4 npm-run-all: ^4.1.5 - plotly.js: ^1.58.5 + plotly.js: ^2.25.2 pretty-ms: ^5.1.0 react: ^16.8.6 react-app-rewired: ^2.2.1 @@ -61,7 +61,7 @@ dependencies: ag-charts-community: 7.3.0 ag-charts-react: 7.3.0_4uflhkpzmxcxyxkuqg2ofty3gq antd: 3.26.20_wcqkhtmu7mswc6yz4uyexck3ty - axios: 0.27.2 + axios: 0.28.0 babel-jest: 24.9.0_@babel+core@7.22.11 babel-plugin-import: 1.13.8 classnames: 2.3.2 @@ -70,12 +70,12 @@ dependencies: less: 3.13.1 less-loader: 5.0.0_less@3.13.1 moment: 2.29.4 - plotly.js: 1.58.5 + plotly.js: 2.25.2 pretty-ms: 5.1.0 react: 16.14.0 react-app-rewired: 2.2.1_react-scripts@3.4.4 react-dom: 16.14.0_react@16.14.0 - react-plotly.js: 2.6.0_f6dluzp62qf57yw3gl4ocsg3e4 + react-plotly.js: 2.6.0_qtjenpcawcnnxnr626ndcvhi4u react-router: 5.3.4_react@16.14.0 react-router-dom: 5.3.4_react@16.14.0 react-scripts: 3.4.4_bo7u2dcgnntwwyyxmecoaqdaee @@ -100,14 +100,6 @@ devDependencies: packages: - /3d-view/2.0.1: - resolution: {integrity: sha512-YSLRHXNpSziaaiK2R0pI5+JKguoJVbtWmIv9YyBFtl0+q42kQwJB/JUulbFR/1zYFm58ifjKQ6kVdgZ6tyKtCA==} - dependencies: - matrix-camera-controller: 2.1.4 - orbit-camera-controller: 4.0.0 - turntable-camera-controller: 3.0.1 - dev: false - /@ampproject/remapping/2.2.1: resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} engines: {node: '>=6.0.0'} @@ -239,7 +231,7 @@ packages: gensync: 1.0.0-beta.2 json5: 2.2.3 lodash: 4.17.21 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 source-map: 0.5.7 transitivePeerDependencies: @@ -351,7 +343,7 @@ packages: '@babel/helper-plugin-utils': 7.22.5 debug: 4.3.4 lodash.debounce: 4.0.8 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -1842,7 +1834,7 @@ packages: '@babel/core': 7.9.0 '@babel/helper-module-imports': 7.22.5 '@babel/helper-plugin-utils': 7.22.5 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 dev: false @@ -2712,6 +2704,10 @@ packages: d3-shape: 1.3.7 dev: false + /@plotly/d3/3.8.1: + resolution: {integrity: sha512-x49ThEu1FRA00kTso4Jdfyf2byaCPLBGmLjAYQz5OzaPyLUhHesX3/Nfv2OHEhynhdy2UB39DLXq6thYe2L2kg==} + dev: false + /@plotly/point-cluster/3.1.9: resolution: {integrity: sha512-MwaI6g9scKf68Orpr1pHZ597pYx9uP8UEFXLPbsCmuw3a84obwz6pnMXGc90VhgDNeNiLEdlmuK7CPo+5PIxXw==} dependencies: @@ -2727,6 +2723,10 @@ packages: pick-by-alias: 1.2.0 dev: false + /@plotly/regl/2.1.2: + resolution: {integrity: sha512-Mdk+vUACbQvjd0m/1JJjOOafmkp/EpmHjISsopEz5Av44CBq7rPC05HHNbYGKVyNUF2zmEoBS/TT0pd0SPFFyw==} + dev: false + /@sinclair/typebox/0.27.8: resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} dev: false @@ -3290,14 +3290,6 @@ packages: resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} dev: false - /a-big-triangle/1.0.3: - resolution: {integrity: sha512-AboEtoSPueZisde3Vr+7VRSfUIWBSGZUOtW3bJrOZXgIyK7dNNDdpDmOKJjg5GmJLlRKUONWV8lMgTK8MBhQWw==} - dependencies: - gl-buffer: 2.1.2 - gl-vao: 1.3.0 - weak-map: 1.0.8 - dev: false - /abab/2.0.6: resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} dev: false @@ -3361,12 +3353,6 @@ packages: object-assign: 4.1.1 dev: false - /add-line-numbers/1.0.1: - resolution: {integrity: sha512-w+2a1malCvWwACQFBpZ5/uwmHGaGYT+aGIxA8ONF5vlhe6X/gD3eR8qVoLWa+5nnWAOq2LuPbrqDYqj1pn0WMg==} - dependencies: - pad-left: 1.0.2 - dev: false - /address/1.1.2: resolution: {integrity: sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==} engines: {node: '>= 0.12.0'} @@ -3380,12 +3366,6 @@ packages: regex-parser: 2.2.11 dev: false - /affine-hull/1.0.0: - resolution: {integrity: sha512-3QNG6+vFAwJvSZHsJYDJ/mt1Cxx9n5ffA+1Ohmj7udw0JuRgUVIXK0P9N9pCMuEdS3jCNt8GFX5q2fChq+GO3Q==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /ag-charts-community/7.3.0: resolution: {integrity: sha512-118U6YsCMia6iZHaN06zT19rr2SYa92WB73pMVCKQlp2H3c19uKQ6Y6DfKG/nIfNUzFXZLHBwKIdZXsMWJdZww==} dev: false @@ -3439,20 +3419,6 @@ packages: resolution: {integrity: sha512-0V/PkoculFl5+0Lp47JoxUcO0xSxhIBvm+BxHdD/OgXNmdRpRHCFnKVuUoWyS9EzQP+otSGv0m9Lb4yVkQBn2A==} dev: false - /alpha-complex/1.0.0: - resolution: {integrity: sha512-rhsjKfc9tMF5QZc0NhKz/zFzMu2rvHxCP/PyJtEmMkV7M848YjIoQGDlNGp+vTqxXjA8wAY2OxgR1K54C2Awkg==} - dependencies: - circumradius: 1.0.0 - delaunay-triangulate: 1.1.6 - dev: false - - /alpha-shape/1.0.0: - resolution: {integrity: sha512-/V+fmmjtSA2yfQNq8iEqBxnPbjcOMXpM9Ny+yE/O7aLR7Q1oPzUc9bHH0fPHS3hUugUL/dHzTis6l3JirYOS/w==} - dependencies: - alpha-complex: 1.0.0 - simplicial-complex-boundary: 1.0.1 - dev: false - /alphanum-sort/1.0.2: resolution: {integrity: sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==} dev: false @@ -3792,6 +3758,13 @@ packages: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} dev: false + /asn1.js/4.10.1: + resolution: {integrity: sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==} + dependencies: + bn.js: 4.12.0 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + /asn1.js/5.4.1: resolution: {integrity: sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==} dependencies: @@ -3859,10 +3832,6 @@ packages: engines: {node: '>= 4.0.0'} dev: true - /atob-lite/1.0.0: - resolution: {integrity: sha512-ArXcmHR/vwSN37HLVap/Y5SKpz12CuEybxe1sIYl7th/S6SQPrVMNFt6rblJzCOAxn0SHbXpknUtqbAIeo3Aow==} - dev: false - /atob/2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} engines: {node: '>= 4.5.0'} @@ -3891,11 +3860,12 @@ packages: /aws4/1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} - /axios/0.27.2: - resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} + /axios/0.28.0: + resolution: {integrity: sha512-Tu7NYoGY4Yoc7I+Npf9HhUMtEEpV7ZiLH9yndTCoNhcpBH0kwcvFbzYN9/u5QKI5A6uefjsNNWaz5olJVYS62Q==} dependencies: - follow-redirects: 1.15.2 + follow-redirects: 1.15.6 form-data: 4.0.0 + proxy-from-env: 1.1.0 transitivePeerDependencies: - debug dev: false @@ -3925,7 +3895,7 @@ packages: '@babel/types': 7.22.11 eslint: 6.8.0 eslint-visitor-keys: 1.3.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -4034,7 +4004,7 @@ packages: dependencies: '@babel/runtime': 7.9.0 cosmiconfig: 6.0.0 - resolve: 1.15.0 + resolve: 1.22.4 dev: false /babel-plugin-named-asset-import/0.3.8_@babel+core@7.9.0: @@ -4159,12 +4129,6 @@ packages: /balanced-match/1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - /barycentric/1.0.1: - resolution: {integrity: sha512-47BuWXsenBbox4q1zqJrUoxq1oM1ysrYc5mdBACAwaP+CL+tcNauC3ybA0lzbIWzJCLZYMqebAx46EauTI2Nrg==} - dependencies: - robust-linear-solve: 1.0.0 - dev: false - /base/0.11.2: resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} engines: {node: '>=0.10.0'} @@ -4196,14 +4160,6 @@ packages: dependencies: tweetnacl: 0.14.5 - /big-rat/1.0.4: - resolution: {integrity: sha512-AubEohDDrak6urvKkFMIlwPWyQbJ/eq04YsK/SNipH7NNiPCYchjQNvWYK5vyyMmtGXAmNmsAjIcfkaDuTtd8g==} - dependencies: - bit-twiddle: 1.0.2 - bn.js: 4.12.0 - double-bits: 1.1.1 - dev: false - /big.js/5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} dev: false @@ -4224,15 +4180,12 @@ packages: /bindings/1.5.0: resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + requiresBuild: true dependencies: file-uri-to-path: 1.0.0 dev: false optional: true - /bit-twiddle/0.0.2: - resolution: {integrity: sha512-76iFAOrkcuw5UPA30Pt32XaytMHXz/04JembgIwsQAp7ImHYSWNq1shBbrlWf6CUvh1+amQ81LI8hNhqQgsBEw==} - dev: false - /bit-twiddle/1.0.2: resolution: {integrity: sha512-B9UhK0DKFZhoTFcfvAzhqsjStvGJp9vYWf3+6SNTtdSQnvIgfkHbgHrg/e4+TH71N2GDu8tpmCVoyfrL1d7ntA==} dev: false @@ -4258,8 +4211,8 @@ packages: /bn.js/5.2.1: resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} - /body-parser/1.20.1: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} + /body-parser/1.20.2: + resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -4271,40 +4224,20 @@ packages: iconv-lite: 0.4.24 on-finished: 2.4.1 qs: 6.11.0 - raw-body: 2.5.1 + raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 transitivePeerDependencies: - supports-color dev: true - /body-parser/1.20.1_supports-color@6.1.0: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9_supports-color@6.1.0 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.11.0 - raw-body: 2.5.1 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - dev: false - - /body-parser/1.20.2: + /body-parser/1.20.2_supports-color@6.1.0: resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 content-type: 1.0.5 - debug: 2.6.9 + debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 destroy: 1.2.0 http-errors: 2.0.0 @@ -4316,7 +4249,7 @@ packages: unpipe: 1.0.0 transitivePeerDependencies: - supports-color - dev: true + dev: false /bonjour/3.5.0: resolution: {integrity: sha512-RaVTblr+OnEli0r/ud8InrU7D+G0y6aJhlxaLa6Pwty4+xoxboF1BsUI45tujvRpbj9dQVoglChqonGAsjEBYg==} @@ -4333,17 +4266,6 @@ packages: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} dev: false - /boundary-cells/2.0.2: - resolution: {integrity: sha512-/S48oUFYEgZMNvdqC87iYRbLBAPHYijPRNrNpm/sS8u7ijIViKm/hrV3YD4sx/W68AsG5zLMyBEditVHApHU5w==} - dev: false - - /box-intersect/1.0.2: - resolution: {integrity: sha512-yJeMwlmFPG1gIa7Rs/cGXeI6iOj6Qz5MG5PE61xLKpElUGzmJ4abm+qsLpzxKJFpsSDq742BQEocr8dI2t8Nxw==} - dependencies: - bit-twiddle: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /boxen/3.2.0: resolution: {integrity: sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==} engines: {node: '>=6'} @@ -4463,17 +4385,19 @@ packages: bn.js: 5.2.1 randombytes: 2.1.0 - /browserify-sign/4.2.1: - resolution: {integrity: sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==} + /browserify-sign/4.2.3: + resolution: {integrity: sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==} + engines: {node: '>= 0.12'} dependencies: bn.js: 5.2.1 browserify-rsa: 4.1.0 create-hash: 1.2.0 create-hmac: 1.1.7 - elliptic: 6.5.4 + elliptic: 6.5.5 + hash-base: 3.0.4 inherits: 2.0.4 - parse-asn1: 5.1.6 - readable-stream: 3.6.2 + parse-asn1: 5.1.7 + readable-stream: 2.3.8 safe-buffer: 5.2.1 /browserify-zlib/0.2.0: @@ -4706,18 +4630,6 @@ packages: /caseless/0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} - /cdt2d/1.0.0: - resolution: {integrity: sha512-pFKb7gVhpsI6onS5HUXRoqbBIJB4CJ+KPk8kgaIVcm0zFgOxIyBT5vzifZ4j1aoGVJS0U1A+S4oFDshuLAitlA==} - dependencies: - binary-search-bounds: 2.0.5 - robust-in-sphere: 1.2.1 - robust-orientation: 1.2.1 - dev: false - - /cell-orientation/1.0.1: - resolution: {integrity: sha512-DtEsrgP+donmPxpEZm7hK8zCPYDXAQ977ecJiE7G0gbTfnS6TZVBlief3IdRP/TZS1PVnJRGJTDdjSdV8mRDug==} - dev: false - /chalk/1.1.3: resolution: {integrity: sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==} engines: {node: '>=0.10.0'} @@ -4831,19 +4743,6 @@ packages: inherits: 2.0.4 safe-buffer: 5.2.1 - /circumcenter/1.0.0: - resolution: {integrity: sha512-YRw0mvttcISviaOtSmaHb2G3ZVbkxzYPQeAEd57/CFFtmOkwfRTw9XuxYZ7PCi2BYa0NajjHV6bq4nbY1VCC8g==} - dependencies: - dup: 1.0.0 - robust-linear-solve: 1.0.0 - dev: false - - /circumradius/1.0.0: - resolution: {integrity: sha512-5ltoQvWQzJiZjCVX9PBKgKt+nsuzOLKayqXMNllfRSqIp2L5jFpdanv1V6j27Ue7ACxlzmamlR+jnLy+NTTVTw==} - dependencies: - circumcenter: 1.0.0 - dev: false - /clamp/1.0.1: resolution: {integrity: sha512-kgMuFyE78OC6Dyu3Dy7vcx4uy97EIbVxJB/B0eJ3bUNAkwdNcxYzgKltnyADiYwsR7SEqkkUPsEUT//OVS6XMA==} dev: false @@ -4872,18 +4771,6 @@ packages: source-map: 0.6.1 dev: false - /clean-pslg/1.1.2: - resolution: {integrity: sha512-bJnEUR6gRiiNi2n4WSC6yrc0Hhn/oQDOTzs6evZfPwEF/VKVXM6xu0F4n/WSBz7TjTt/ZK6I5snRM9gVKMVAxA==} - dependencies: - big-rat: 1.0.4 - box-intersect: 1.0.2 - nextafter: 1.0.0 - rat-vec: 1.1.1 - robust-segment-intersect: 1.0.1 - union-find: 1.0.2 - uniq: 1.0.1 - dev: false - /clean-regexp/1.0.0: resolution: {integrity: sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw==} engines: {node: '>=4'} @@ -5048,12 +4935,6 @@ packages: color-string: 1.9.1 dev: false - /colormap/2.3.2: - resolution: {integrity: sha512-jDOjaoEEmA9AgA11B/jCSAvYE95r3wRoAyTf3LEHGiUVlNHJaL1mRkf5AyLSpQBVGfTEPwGEqCIzL+kgr2WgNA==} - dependencies: - lerp: 1.0.3 - dev: false - /combined-stream/1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -5077,27 +4958,6 @@ packages: /commondir/1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} - /compare-angle/1.0.1: - resolution: {integrity: sha512-adM1/bpLFQFquh0/Qr5aiOPuztoga/lCf2Z45s+Oydgzf18F3wBSkdHmcHMeig0bD+dDKlz52u1rLOAOqiyE5A==} - dependencies: - robust-orientation: 1.2.1 - robust-product: 1.0.0 - robust-sum: 1.0.0 - signum: 0.0.0 - two-sum: 1.0.0 - dev: false - - /compare-cell/1.0.0: - resolution: {integrity: sha512-uNIkjiNLZLhdCgouF39J+W04R7oP1vwrNME4vP2b2/bAa6PHOj+h8yXu52uPjPTKs5RatvqNsDVwEN7Yp19vNA==} - dev: false - - /compare-oriented-cell/1.0.1: - resolution: {integrity: sha512-9D7R2MQfsGGRskZAZF0TkJHt9eFNbFkZyVdVps+WUYxtRHgG77BLbieKgSkj7iEAb9PNDSU9QNa9MtigjQ3ktQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - dev: false - /component-classes/1.2.6: resolution: {integrity: sha512-hPFGULxdwugu1QWW3SvVOCUHLzO34+a2J6Wqy0c5ASQkfi9/8nZcBB0ZohaEbXOQlCflMAEMmEWk7u7BVs4koA==} dependencies: @@ -5242,19 +5102,11 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convex-hull/1.0.3: - resolution: {integrity: sha512-24rZAoh81t41GHPLAxcsokgjH9XNoVqU2OiSi8iMHUn6HUURfiefcEWAPt1AfwZjBBWTKadOm1xUcUMnfFukhQ==} - dependencies: - affine-hull: 1.0.0 - incremental-convex-hull: 1.0.1 - monotone-convex-hull-2d: 1.0.1 - dev: false - /cookie-signature/1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} - /cookie/0.5.0: - resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + /cookie/0.6.0: + resolution: {integrity: sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==} engines: {node: '>= 0.6'} /copy-anything/2.0.6: @@ -5420,7 +5272,7 @@ packages: resolution: {integrity: sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==} dependencies: browserify-cipher: 1.0.1 - browserify-sign: 4.2.1 + browserify-sign: 4.2.3 create-ecdh: 4.0.4 create-hash: 1.2.0 create-hmac: 1.1.7 @@ -5714,10 +5566,6 @@ packages: resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} dev: false - /cubic-hermite/1.0.0: - resolution: {integrity: sha512-DKZ6yLcJiJJgl54mGA4n0uueYB4qdPfOJrQ1HSEZqdKp6D25AAAWVDwpoAxLflOku5a/ALBO77oEIyWcVa+UYg==} - dev: false - /currently-unhandled/0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -5731,12 +5579,6 @@ packages: lodash.flow: 3.5.0 dev: false - /cwise-compiler/1.1.3: - resolution: {integrity: sha512-WXlK/m+Di8DMMcCjcWr4i+XzcQra9eCdXIJrgh4TUgh0pIS/yJduLxS9JgefsHJ/YVLdgPtXm9r62W92MvanEQ==} - dependencies: - uniq: 1.0.1 - dev: false - /cyclist/1.0.2: resolution: {integrity: sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==} dev: false @@ -5744,7 +5586,7 @@ packages: /d/1.0.1: resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} dependencies: - es5-ext: 0.10.62 + es5-ext: 0.10.64 type: 1.2.0 dev: false @@ -5773,12 +5615,33 @@ packages: d3-timer: 1.0.10 dev: false + /d3-format/1.4.5: + resolution: {integrity: sha512-J0piedu6Z8iB6TbIGfZgDzfXxUFN3qQRMofy2oPdXzQibYGqPB/9iMcxr/TGalU+2RsyDO+U4f33id8tbnSRMQ==} + dev: false + + /d3-geo-projection/2.9.0: + resolution: {integrity: sha512-ZULvK/zBn87of5rWAfFMc9mJOipeSo57O+BBitsKIXmU4rTVAnX1kSsJkE0R+TxY8pGNoM1nbyRRE7GYHhdOEQ==} + hasBin: true + dependencies: + commander: 2.20.3 + d3-array: 1.2.4 + d3-geo: 1.12.1 + resolve: 1.22.4 + dev: false + + /d3-geo/1.12.1: + resolution: {integrity: sha512-XG4d1c/UJSEX9NfU02KwBL6BYPj8YKHxgBEw5om2ZnTRSbIcego6dhHwcxuSR3clxh0EpE38os1DVPOmnYtTPg==} + dependencies: + d3-array: 1.2.4 + dev: false + /d3-hierarchy/1.1.9: resolution: {integrity: sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==} dev: false - /d3-interpolate/1.4.0: - resolution: {integrity: sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==} + /d3-interpolate/3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} dependencies: d3-color: 1.4.1 dev: false @@ -5811,10 +5674,6 @@ packages: resolution: {integrity: sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw==} dev: false - /d3/3.5.17: - resolution: {integrity: sha512-yFk/2idb8OHPKkbAL8QaOaqENNoMhIaSHZerk3oQsECwkObkCpJyjYwCe+OHiq6UEdhe1m8ZGARRRO3ljFjlKg==} - dev: false - /damerau-levenshtein/1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} dev: false @@ -6016,13 +5875,6 @@ packages: rimraf: 2.7.1 dev: false - /delaunay-triangulate/1.1.6: - resolution: {integrity: sha512-mhAclqFCgLoiBIDQDIz2K+puZq6OhYxunXrG2wtTcZS+S1xuzl+H3h0MIOajpES+Z+jfY/rz0wVt3o5iipt1wg==} - dependencies: - incremental-convex-hull: 1.0.1 - uniq: 1.0.1 - dev: false - /delayed-stream/1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} @@ -6109,7 +5961,7 @@ packages: /dns-packet/1.3.4: resolution: {integrity: sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==} dependencies: - ip: 1.1.8 + ip: 1.1.9 safe-buffer: 5.2.1 dev: false @@ -6254,10 +6106,6 @@ packages: engines: {node: '>=8'} dev: false - /double-bits/1.1.1: - resolution: {integrity: sha512-BCLEIBq0O/DWoA7BsCu/R+RP0ZXiowP8BhtJT3qeuuQEBpnS8LK/Wo6UTJQv6v8mK1fj8n90YziHLwGdM5whSg==} - dev: false - /draft-js/0.10.5_wcqkhtmu7mswc6yz4uyexck3ty: resolution: {integrity: sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg==} peerDependencies: @@ -6314,12 +6162,6 @@ packages: jsbn: 0.1.1 safer-buffer: 2.1.2 - /edges-to-adjacency-list/1.0.0: - resolution: {integrity: sha512-0n0Z+xTLfg96eYXm91PEY4rO4WGxohLWjJ9qD1RI3fzxKU6GHez+6KPajpobR4zeZxp7rSiHjHG5dZPj8Kj58Q==} - dependencies: - uniq: 1.0.1 - dev: false - /ee-first/1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} @@ -6348,6 +6190,17 @@ packages: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + /elliptic/6.5.5: + resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + hash.js: 1.1.7 + hmac-drbg: 1.0.1 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + /emoji-regex/7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} @@ -6525,13 +6378,14 @@ packages: is-date-object: 1.0.5 is-symbol: 1.0.4 - /es5-ext/0.10.62: - resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + /es5-ext/0.10.64: + resolution: {integrity: sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==} engines: {node: '>=0.10'} requiresBuild: true dependencies: es6-iterator: 2.0.3 es6-symbol: 3.1.3 + esniff: 2.0.1 next-tick: 1.1.0 dev: false @@ -6539,14 +6393,10 @@ packages: resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-symbol: 3.1.3 dev: false - /es6-promise/4.2.8: - resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} - dev: false - /es6-symbol/3.1.3: resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} dependencies: @@ -6558,7 +6408,7 @@ packages: resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-iterator: 2.0.3 es6-symbol: 3.1.3 dev: false @@ -6871,7 +6721,7 @@ packages: minimatch: 3.1.2 object.values: 1.1.6 read-pkg-up: 2.0.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -7180,6 +7030,16 @@ packages: transitivePeerDependencies: - supports-color + /esniff/2.0.1: + resolution: {integrity: sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==} + engines: {node: '>=0.10'} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + event-emitter: 0.3.5 + type: 2.7.2 + dev: false + /espree/6.2.1: resolution: {integrity: sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==} engines: {node: '>=6.0.0'} @@ -7234,6 +7094,13 @@ packages: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} + /event-emitter/0.3.5: + resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + dev: false + /eventemitter3/4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} dev: false @@ -7344,16 +7211,16 @@ packages: - supports-color dev: true - /express/4.18.2: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1 + body-parser: 1.20.2 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 @@ -7383,16 +7250,16 @@ packages: - supports-color dev: true - /express/4.18.2_supports-color@6.1.0: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2_supports-color@6.1.0: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1_supports-color@6.1.0 + body-parser: 1.20.2_supports-color@6.1.0 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 @@ -7483,10 +7350,6 @@ packages: - supports-color dev: false - /extract-frustum-planes/1.0.0: - resolution: {integrity: sha512-GivvxEMgjSNnB3e1mIMBlB5ogPB6XyEjOQRGG0SfYVVLtu1ntLGHLT1ly8+mE819dKBHBwnm9+UBCScjiMgppA==} - dev: false - /extsprintf/1.3.0: resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} engines: {'0': node >=0.6.0} @@ -7592,6 +7455,7 @@ packages: /file-uri-to-path/1.0.0: resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + requiresBuild: true dev: false optional: true @@ -7620,13 +7484,6 @@ packages: dependencies: to-regex-range: 5.0.1 - /filtered-vector/1.2.5: - resolution: {integrity: sha512-5Vu6wdtQJ1O2nRmz39dIr9m3hEDq1skYby5k1cJQdNWK4dMgvYcUEiA/9j7NcKfNZ5LGxn8w2LSLiigyH7pTAw==} - dependencies: - binary-search-bounds: 2.0.5 - cubic-hermite: 1.0.0 - dev: false - /finalhandler/1.2.0: resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} engines: {node: '>= 0.8'} @@ -7742,8 +7599,8 @@ packages: readable-stream: 2.3.8 dev: false - /follow-redirects/1.15.2: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -7752,8 +7609,8 @@ packages: optional: true dev: false - /follow-redirects/1.15.2_debug@4.3.4: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6_debug@4.3.4: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -7969,10 +7826,6 @@ packages: /functions-have-names/1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} - /gamma/0.1.0: - resolution: {integrity: sha512-IgHc/jnzNTA2KjXmRSx/CVd1ONp7HTAV81SLI+n3G6PyyHkakkE+2d3hteJYFm7aoe01NEl4m7ziUAsoWCc5AA==} - dev: false - /gensync/1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -8056,120 +7909,6 @@ packages: dependencies: assert-plus: 1.0.0 - /gl-axes3d/1.5.3: - resolution: {integrity: sha512-KRYbguKQcDQ6PcB9g1pgqB8Ly4TY1DQODpPKiDTasyWJ8PxQk0t2Q7XoQQijNqvsguITCpVVCzNb5GVtIWiVlQ==} - dependencies: - bit-twiddle: 1.0.2 - dup: 1.0.0 - extract-frustum-planes: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-state: 1.0.0 - gl-vao: 1.3.0 - gl-vec4: 1.0.1 - glslify: 7.1.1 - robust-orientation: 1.2.1 - split-polygon: 1.0.0 - vectorize-text: 3.2.2 - dev: false - - /gl-buffer/2.1.2: - resolution: {integrity: sha512-uVvLxxhEbQGl43xtDeKu75ApnrGyNHoPmOcvvuJNyP04HkK0/sX5Dll6OFffQiwSV4j0nlAZsgznvO3CPT3dFg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-cone3d/1.5.2: - resolution: {integrity: sha512-1JNeHH4sUtUmDA4ZK7Om8/kShwb8IZVAsnxaaB7IPRJsNGciLj1sTpODrJGeMl41RNkex5kXD2SQFrzyEAR2Rw==} - dependencies: - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - gl-vec3: 1.1.3 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-constants/1.0.0: - resolution: {integrity: sha512-3DNyoAUdb1c+o7jNk5Nm7eh6RSQFi9ZmMQIQb2xxsO27rUopE+IUhoh4xlUvZYBn1YPgUC8BlCnrVjXq/d2dQA==} - dev: false - - /gl-contour2d/1.1.7: - resolution: {integrity: sha512-GdebvJ9DtT3pJDpoE+eU2q+Wo9S3MijPpPz5arZbhK85w2bARmpFpVfPaDlZqWkB644W3BlH8TVyvAo1KE4Bhw==} - dependencies: - binary-search-bounds: 2.0.5 - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - ndarray: 1.0.19 - surface-nets: 1.0.2 - dev: false - - /gl-error3d/1.0.16: - resolution: {integrity: sha512-TGJewnKSp7ZnqGgG3XCF9ldrDbxZrO+OWlx6oIet4OdOM//n8xJ5isArnIV/sdPJnFbhfoLxWrW9f5fxHFRQ1A==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - dev: false - - /gl-fbo/2.0.5: - resolution: {integrity: sha512-tDq6zQSQzvvK2QwPV7ln7cf3rs0jV1rQXqKOEuB145LdN+xhADPBtXHDJ3Ftk80RAJimJU0AaQBgP/X6yYGNhQ==} - dependencies: - gl-texture2d: 2.1.0 - dev: false - - /gl-format-compiler-error/1.0.3: - resolution: {integrity: sha512-FtQaBYlsM/rnz7YhLkxG9dLcNDB+ExErIsFV2DXl0nk+YgIZ2i0jMob4BrhT9dNa179zFb0gZMWpNAokytK+Ug==} - dependencies: - add-line-numbers: 1.0.1 - gl-constants: 1.0.0 - glsl-shader-name: 1.0.0 - sprintf-js: 1.1.2 - dev: false - - /gl-heatmap2d/1.1.1: - resolution: {integrity: sha512-6Vo1fPIB1vQFWBA/MR6JAA16XuQuhwvZRbSjYEq++m4QV33iqjGS2HcVIRfJGX+fomd5eiz6bwkVZcKm69zQPw==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - - /gl-line3d/1.2.1: - resolution: {integrity: sha512-eeb0+RI2ZBRqMYJK85SgsRiJK7c4aiOjcnirxv0830A3jmOc99snY3AbPcV8KvKmW0Yaf3KA4e+qNCbHiTOTnA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-mat3/1.0.0: - resolution: {integrity: sha512-obeEq9y7xaDoVkwMGJNL1upwpYlPJiXJFhREaNytMqUdfHKHNna9HvImmLV8F8Ys6QOYwPPddptZNoiiec/XOg==} - dev: false - /gl-mat4/1.2.0: resolution: {integrity: sha512-sT5C0pwB1/e9G9AvAoLsoaJtbMGjfd/jfxo8jMCKqYYEnjZuFvqV5rehqar0538EmssjdDeiEWnKyBSTw7quoA==} dev: false @@ -8178,169 +7917,6 @@ packages: resolution: {integrity: sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA==} dev: false - /gl-mesh3d/2.3.1: - resolution: {integrity: sha512-pXECamyGgu4/9HeAQSE5OEUuLBGS1aq9V4BCsTcxsND4fNLaajEkYKUz/WY2QSYElqKdsMBVsldGiKRKwlybqA==} - dependencies: - barycentric: 1.0.1 - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - normals: 1.1.0 - polytope-closest-point: 1.0.0 - simplicial-complex-contour: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-plot2d/1.4.5: - resolution: {integrity: sha512-6GmCN10SWtV+qHFQ1gjdnVubeHFVsm6P4zmo0HrPIl9TcdePCUHDlBKWAuE6XtFhiMKMj7R8rApOX8O8uXUYog==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - glsl-inverse: 1.0.0 - glslify: 7.1.1 - text-cache: 4.2.2 - dev: false - - /gl-plot3d/2.4.7: - resolution: {integrity: sha512-mLDVWrl4Dj0O0druWyHUK5l7cBQrRIJRn2oROEgrRuOgbbrLAzsREKefwMO0bA0YqkiZMFMnV5VvPA9j57X5Xg==} - dependencies: - 3d-view: 2.0.1 - a-big-triangle: 1.0.3 - gl-axes3d: 1.5.3 - gl-fbo: 2.0.5 - gl-mat4: 1.2.0 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - gl-spikes3d: 1.0.10 - glslify: 7.1.1 - has-passive-events: 1.0.0 - is-mobile: 2.2.2 - mouse-change: 1.4.0 - mouse-event-offset: 3.0.2 - mouse-wheel: 1.2.0 - ndarray: 1.0.19 - right-now: 1.0.0 - dev: false - - /gl-pointcloud2d/1.0.3: - resolution: {integrity: sha512-OS2e1irvJXVRpg/GziXj10xrFJm9kkRfFoB6BLUvkjCQV7ZRNNcs2CD+YSK1r0gvMwTg2T3lfLM3UPwNtz+4Xw==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - typedarray-pool: 1.2.0 - dev: false - - /gl-quat/1.0.0: - resolution: {integrity: sha512-Pv9yvjJgQN85EbE79S+DF50ujxDkyjfYHIyXJcCRiimU1UxMY7vEHbVkj0IWLFaDndhfZT9vVOyfdMobLlrJsQ==} - dependencies: - gl-mat3: 1.0.0 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - dev: false - - /gl-scatter3d/1.2.3: - resolution: {integrity: sha512-nXqPlT1w5Qt51dTksj+DUqrZqwWAEWg0PocsKcoDnVNv0X8sGA+LBZ0Y+zrA+KNXUL0PPCX9WR9cF2uJAZl1Sw==} - dependencies: - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - is-string-blank: 1.0.1 - typedarray-pool: 1.2.0 - vectorize-text: 3.2.2 - dev: false - - /gl-select-box/1.0.4: - resolution: {integrity: sha512-mKsCnglraSKyBbQiGq0Ila0WF+m6Tr+EWT2yfaMn/Sh9aMHq5Wt0F/l6Cf/Ed3CdERq5jHWAY5yxLviZteYu2w==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - dev: false - - /gl-select-static/2.0.7: - resolution: {integrity: sha512-OvpYprd+ngl3liEatBTdXhSyNBjwvjMSvV2rN0KHpTU+BTi4viEETXNZXFgGXY37qARs0L28ybk3UQEW6C5Nnw==} - dependencies: - bit-twiddle: 1.0.2 - gl-fbo: 2.0.5 - ndarray: 1.0.19 - typedarray-pool: 1.2.0 - dev: false - - /gl-shader/4.3.1: - resolution: {integrity: sha512-xLoN6XtRLlg97SEqtuzfKc+pVWpVkQ3YjDI1kuCale8tF7+zMhiKlMfmG4IMQPMdKJZQbIc/Ny8ZusEpfh5U+w==} - dependencies: - gl-format-compiler-error: 1.0.3 - weakmap-shim: 1.1.1 - dev: false - - /gl-spikes2d/1.0.2: - resolution: {integrity: sha512-QVeOZsi9nQuJJl7NB3132CCv5KA10BWxAY2QgJNsKqbLsG53B/TrGJpjIAohnJftdZ4fT6b3ZojWgeaXk8bOOA==} - dev: false - - /gl-spikes3d/1.0.10: - resolution: {integrity: sha512-lT3xroowOFxMvlhT5Mof76B2TE02l5zt/NIWljhczV2FFHgIVhA4jMrd5dIv1so1RXMBDJIKu0uJI3QKliDVLg==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glslify: 7.1.1 - dev: false - - /gl-state/1.0.0: - resolution: {integrity: sha512-Od836PpgCuTC0W7uHYnEEPRdQPL1FakWlznz3hRvlO6tD5sdLfBKX9qNRGy1DjfMCDTudhyYWxiWjhql1B8N4Q==} - dependencies: - uniq: 1.0.1 - dev: false - - /gl-streamtube3d/1.4.1: - resolution: {integrity: sha512-rH02v00kgwgdpkXVo7KsSoPp38bIAYR9TE1iONjcQ4cQAlDhrGRauqT/P5sUaOIzs17A2DxWGcXM+EpNQs9pUA==} - dependencies: - gl-cone3d: 1.5.2 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - dev: false - - /gl-surface3d/1.6.0: - resolution: {integrity: sha512-x15+u4712ysnB85G55RLJEml6mOB4VaDn0VTlXCc9JcjRl5Es10Tk7lhGGyiPtkCfHwvhnkxzYA1/rHHYN7Y0A==} - dependencies: - binary-search-bounds: 2.0.5 - bit-twiddle: 1.0.2 - colormap: 2.3.2 - dup: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-beckmann: 1.1.2 - glslify: 7.1.1 - ndarray: 1.0.19 - ndarray-gradient: 1.0.1 - ndarray-ops: 1.2.2 - ndarray-pack: 1.2.1 - ndarray-scratch: 1.2.0 - surface-nets: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /gl-text/1.3.1: resolution: {integrity: sha512-/f5gcEMiZd+UTBJLTl3D+CkCB/0UFGTx3nflH8ZmyWcLkZhsZ1+Xx5YYkw2rgWAzgPeE35xCqBuHSoMKQVsR+w==} dependencies: @@ -8363,14 +7939,6 @@ packages: typedarray-pool: 1.2.0 dev: false - /gl-texture2d/2.1.0: - resolution: {integrity: sha512-W0tzEjtlGSsCKq5FFwFVhH+fONFUTUeqM4HhA/BleygKaX39IwNTVOiqkwfu9szQZ4dQEq8ZDl7w1ud/eKLaZA==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - /gl-util/3.1.3: resolution: {integrity: sha512-dvRTggw5MSkJnCbh74jZzSoTOGnVYK+Bt+Ckqm39CVcl6+zSsxqWk4lr5NKhkqXHL6qvZAU9h17ZF8mIskY9mA==} dependencies: @@ -8383,18 +7951,6 @@ packages: weak-map: 1.0.8 dev: false - /gl-vao/1.3.0: - resolution: {integrity: sha512-stSOZ+n0fnAxgDfipwKK/73AwzCNL+AFEc/v2Xm76nyFnUZGmQtD2FEC3lt1icoOHAzMgHBAjCue7dBIDeOTcw==} - dev: false - - /gl-vec3/1.1.3: - resolution: {integrity: sha512-jduKUqT0SGH02l8Yl+mV1yVsDfYgQAJyXGxkJQGyxPLHRiW25DwVIRPt6uvhrEMHftJfqhqKthRcyZqNEl9Xdw==} - dev: false - - /gl-vec4/1.0.1: - resolution: {integrity: sha512-/gx5zzIy75JXzke4yuwcbvK+COWf8UJbVCUPvhfsYVw1GVey4Eextk/0H0ctXnOICruNK7+GS4ILQzEQcHcPEg==} - dev: false - /glob-parent/3.1.0: resolution: {integrity: sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==} dependencies: @@ -8517,14 +8073,6 @@ packages: glsl-tokenizer: 2.1.5 dev: false - /glsl-inverse/1.0.0: - resolution: {integrity: sha512-+BsseNlgqzd4IFX1dMqg+S0XuIXzH0acvTtW7svwhJESM1jb2BZFwdO+tOWdCXD5Zse6b9bOmzp5sCNA7GQ2QA==} - dev: false - - /glsl-out-of-range/1.0.4: - resolution: {integrity: sha512-fCcDu2LCQ39VBvfe1FbhuazXEf0CqMZI9OYXrYlL6uUARG48CTAbL04+tZBtVM0zo1Ljx4OLu2AxNquq++lxWQ==} - dev: false - /glsl-resolve/0.0.1: resolution: {integrity: sha512-xxFNsfnhZTK9NBhzJjSBGX6IOqYpvBHxxmo+4vapiljyGNCY0Bekzn0firQkQrazK59c1hYxMDxYS8MDlhw4gA==} dependencies: @@ -8532,23 +8080,6 @@ packages: xtend: 2.2.0 dev: false - /glsl-shader-name/1.0.0: - resolution: {integrity: sha512-OtHon0dPCbJD+IrVA1vw9QDlp2cS/f9z8X/0y+W7Qy1oZ3U1iFAQUEco2v30V0SAlVLDG5rEfhjEfc3DKdGbFQ==} - dependencies: - atob-lite: 1.0.0 - glsl-tokenizer: 2.1.5 - dev: false - - /glsl-specular-beckmann/1.1.2: - resolution: {integrity: sha512-INvd7szO1twNPLGwE0Kf2xXIEy5wpOPl/LYoiw3+3nbAe6Rfn5rjdK9xvfnwoWksTCs3RejuLeAiZkLTkdFtwg==} - dev: false - - /glsl-specular-cook-torrance/2.0.1: - resolution: {integrity: sha512-bFtTfbgLXIbto/U6gM7h0IxoPMU+5zpMK5HoAaA2LnPuGk3JSzKAnsoyh5QGTT8ioIEQrjk6jcQNrgujPsP7rw==} - dependencies: - glsl-specular-beckmann: 1.1.2 - dev: false - /glsl-token-assignments/2.0.2: resolution: {integrity: sha512-OwXrxixCyHzzA0U2g4btSNAyB2Dx8XrztY5aVUCjRSh4/D0WoJn8Qdps7Xub3sz6zE73W3szLrmWtQ7QMpeHEQ==} dev: false @@ -8804,7 +8335,14 @@ packages: resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} engines: {node: '>= 0.4.0'} dependencies: - function-bind: 1.1.1 + function-bind: 1.1.1 + + /hash-base/3.0.4: + resolution: {integrity: sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 /hash-base/3.1.0: resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} @@ -8990,7 +8528,7 @@ packages: engines: {node: '>=8.0.0'} dependencies: eventemitter3: 4.0.7 - follow-redirects: 1.15.2_debug@4.3.4 + follow-redirects: 1.15.6_debug@4.3.4 requires-port: 1.0.0 transitivePeerDependencies: - debug @@ -9062,12 +8600,6 @@ packages: dev: false optional: true - /image-size/0.7.5: - resolution: {integrity: sha512-Hiyv+mXHfFEP7LzUL/llg9RwFxxY+o9N3JVLIeG5E7iFIFAalxvRU9UZthBdYDEVnzHMgjnKJPPpay5BWf1g9g==} - engines: {node: '>=6.9.0'} - hasBin: true - dev: false - /immer/1.10.0: resolution: {integrity: sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg==} dev: false @@ -9129,13 +8661,6 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} - /incremental-convex-hull/1.0.1: - resolution: {integrity: sha512-mKRJDXtzo1R9LxCuB1TdwZXHaPaIEldoGPsXy2jrJc/kufyqp8y/VAQQxThSxM2aroLoh6uObexPk1ASJ7FB7Q==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 1.0.0 - dev: false - /indent-string/3.2.0: resolution: {integrity: sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ==} engines: {node: '>=4'} @@ -9234,33 +8759,19 @@ packages: engines: {node: '>= 0.10'} dev: true - /interval-tree-1d/1.0.4: - resolution: {integrity: sha512-wY8QJH+6wNI0uh4pDQzMvl+478Qh7Rl4qLmqiluxALlNvl+I+o5x38Pw3/z7mDPTPS1dQalZJXsmbvxx5gclhQ==} - dependencies: - binary-search-bounds: 2.0.5 - dev: false - /invariant/2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} dependencies: loose-envify: 1.4.0 dev: false - /invert-permutation/1.0.0: - resolution: {integrity: sha512-8f473/KSrnvyBd7Khr4PC5wPkAOehwkGc+AH5Q7D+U/fE+cdDob2FJ3naXAs4mspR9JIaEwbDI3me8H0KlVzSQ==} - dev: false - - /iota-array/1.0.0: - resolution: {integrity: sha512-pZ2xT+LOHckCatGQ3DcG/a+QuEqvoxqkiL7tvE8nn3uuu+f6i1TtpB5/FtWFbxUuVr5PZCx8KskuGatbJDXOWA==} - dev: false - /ip-regex/2.1.0: resolution: {integrity: sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw==} engines: {node: '>=4'} dev: false - /ip/1.1.8: - resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==} + /ip/1.1.9: + resolution: {integrity: sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==} dev: false /ipaddr.js/1.9.1: @@ -9543,6 +9054,10 @@ packages: resolution: {integrity: sha512-wW/SXnYJkTjs++tVK5b6kVITZpAZPtUrt9SF80vvxGiF/Oywal+COk1jlRkiVq15RFNEQKQY31TkV24/1T5cVg==} dev: false + /is-mobile/4.0.0: + resolution: {integrity: sha512-mlcHZA84t1qLSuWkt2v0I2l61PYdyQDt4aG1mLIXF5FDMm4+haBCxCPYSr/uwqQNRk1MiTizn0ypEuRAOLRAew==} + dev: false + /is-negated-glob/1.0.0: resolution: {integrity: sha512-czXVVn/QEmgvej1f50BZ648vUI+em0xqMq2Sn+QncCLN4zj1UAxlT+kw/6ggQTOaZPd1HqKQGEqbpQVtJucWug==} engines: {node: '>=0.10.0'} @@ -10449,7 +9964,7 @@ packages: connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.18.2 + express: 4.19.2 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -10622,10 +10137,6 @@ packages: deprecated: use String.prototype.padStart() dev: false - /lerp/1.0.3: - resolution: {integrity: sha512-70Rh4rCkJDvwWiTsyZ1HmJGvnyfFah4m6iTux29XmasRiZPDBpT9Cfa4ai73+uLZxnlKruUS62jj2lb11wURiA==} - dev: false - /less-loader/5.0.0_less@3.13.1: resolution: {integrity: sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg==} engines: {node: '>= 4.8.0'} @@ -10792,6 +10303,10 @@ packages: resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} dev: false + /lodash.merge/4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: false + /lodash.sortby/4.7.0: resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==} dev: false @@ -10982,49 +10497,11 @@ packages: vt-pbf: 3.1.3 dev: false - /marching-simplex-table/1.0.0: - resolution: {integrity: sha512-PexXXVF4f5Bux3vGCNlRRBqF/GyTerNo77PbBz8g/MFFXv212b48IGVglj/VfaYBRY6vlFQffa9dFbCCN0+7LA==} - dependencies: - convex-hull: 1.0.3 - dev: false - - /mat4-decompose/1.0.4: - resolution: {integrity: sha512-M3x6GXrzRTt5Ok4/bcHFc869Pe8F3uWaSp3xkUpi+uaTRulPXIZ1GWD13Z3A8WK2bxTrcvX21mjp05gUy/Dwbw==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - - /mat4-interpolate/1.0.4: - resolution: {integrity: sha512-+ulnoc6GUHq8eGZGbLyhQU61tx2oeNAFilV/xzCCzLV+F3nDk8jqERUqRmx8eNMMMvrdvoRSw0JXmnisfVPY9A==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-decompose: 1.0.4 - mat4-recompose: 1.0.4 - quat-slerp: 1.0.1 - dev: false - - /mat4-recompose/1.0.4: - resolution: {integrity: sha512-s1P2Yl4LQxq8dN0CgJE+mCO8y3IX/SmauSZ+H0zJsE1UKlgJ9loInfPC/OUxn2MzUW9bfBZf0Wcc2QKA3/e6FQ==} - dependencies: - gl-mat4: 1.2.0 - dev: false - /math-log2/1.0.1: resolution: {integrity: sha512-9W0yGtkaMAkf74XGYVy4Dqw3YUMnTNB2eeiw9aQbUl4A3KmuCEHTt2DgAB07ENzOYAjsYSAYufkAq0Zd+jU7zA==} engines: {node: '>=0.10.0'} dev: false - /matrix-camera-controller/2.1.4: - resolution: {integrity: sha512-zsPGPONclrKSImNpqqKDTcqFpWLAIwMXEJtCde4IFPOw1dA9udzFg4HOFytOTosOFanchrx7+Hqq6glLATIxBA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-interpolate: 1.0.4 - dev: false - /md5.js/1.3.5: resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} dependencies: @@ -11336,12 +10813,6 @@ packages: resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} dev: false - /monotone-convex-hull-2d/1.0.1: - resolution: {integrity: sha512-ixQ3qdXTVHvR7eAoOjKY8kGxl9YjOFtzi7qOjwmFFPfBqZHVOjUFOBy/Dk9dusamRSPJe9ggyfSypRbs0Bl8BA==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /morgan/1.10.0: resolution: {integrity: sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==} engines: {node: '>= 0.8.0'} @@ -11433,6 +10904,7 @@ packages: /nan/2.17.0: resolution: {integrity: sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ==} + requiresBuild: true dev: false optional: true @@ -11477,6 +10949,10 @@ packages: - supports-color dev: false + /native-promise-only/0.8.1: + resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==} + dev: false + /native-request/1.1.0: resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} requiresBuild: true @@ -11486,55 +10962,16 @@ packages: /natural-compare/1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - /ndarray-extract-contour/1.0.1: - resolution: {integrity: sha512-iDngNoFRqrqbXGLP8BzyGrybw/Jnkkn7jphzc3ZFfO7dfmpL1Ph74/6xCi3xSvJFyVW90XpMnd766jTaRPsTCg==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray-gradient/1.0.1: - resolution: {integrity: sha512-+xONVi7xxTCGL6KOb11Yyoe0tPNqAUKF39CvFoRjL5pdOmPd2G2pckK9lD5bpLF3q45LLnYNyiUSJSdNmQ2MTg==} - dependencies: - cwise-compiler: 1.1.3 - dup: 1.0.0 - dev: false - - /ndarray-linear-interpolate/1.0.0: - resolution: {integrity: sha512-UN0f4+6XWsQzJ2pP5gVp+kKn5tJed6mA3K/L50uO619+7LKrjcSNdcerhpqxYaSkbxNJuEN76N05yBBJySnZDw==} - dev: false - - /ndarray-ops/1.2.2: - resolution: {integrity: sha512-BppWAFRjMYF7N/r6Ie51q6D4fs0iiGmeXIACKY66fLpnwIui3Wc3CXiD/30mgLbDjPpSLrsqcp3Z62+IcHZsDw==} - dependencies: - cwise-compiler: 1.1.3 - dev: false - - /ndarray-pack/1.2.1: - resolution: {integrity: sha512-51cECUJMT0rUZNQa09EoKsnFeDL4x2dHRT0VR5U2H5ZgEcm95ZDWcMA5JShroXjHOejmAD/fg8+H+OvUnVXz2g==} - dependencies: - cwise-compiler: 1.1.3 - ndarray: 1.0.19 - dev: false - - /ndarray-scratch/1.2.0: - resolution: {integrity: sha512-a4pASwB1jQyJcKLYrwrladVfDZDUGc78qLJZbHyb1Q4rhte0URhzc6ALQpBcauwgov0sXLwZz3vYH5jKAhSMIg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /ndarray-sort/1.0.1: - resolution: {integrity: sha512-Gpyis5NvEPOQVadDOG+Dx8bhYCkaxn5IlA4Ig/jBJIlnW1caDiPneQLzT/+AIMeHEmqlGZfdqO/I1TXJS2neAw==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray/1.0.19: - resolution: {integrity: sha512-B4JHA4vdyZU30ELBw3g7/p9bZupyew5a7tX1Y/gGeF2hafrPaQZhgrGQfsvgfYbgdFZjYwuEcnaobeM/WMW+HQ==} + /needle/2.9.1: + resolution: {integrity: sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==} + engines: {node: '>= 4.4.x'} + hasBin: true dependencies: - iota-array: 1.0.0 - is-buffer: 1.1.6 + debug: 3.2.7 + iconv-lite: 0.4.24 + sax: 1.2.4 + transitivePeerDependencies: + - supports-color dev: false /negotiator/0.6.3: @@ -11549,12 +10986,6 @@ packages: resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} dev: false - /nextafter/1.0.0: - resolution: {integrity: sha512-7PO+A89Tll2rSEfyrjtqO0MaI37+nnxBdnQcPypfbEYYuGaJxWGCqaOwQX4a3GHNTS08l1kazuiLEWZniZjMUQ==} - dependencies: - double-bits: 1.1.1 - dev: false - /nice-try/1.0.5: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} @@ -11681,10 +11112,6 @@ packages: engines: {node: '>=8'} dev: true - /normals/1.1.0: - resolution: {integrity: sha512-XWeliW48BLvbVJ+cjQAOE+tA0m1M7Yi1iTPphAS9tBmW1A/c/cOVnEUecPCCMH5lEAihAcG6IRle56ls9k3xug==} - dev: false - /npm-run-all/4.1.5: resolution: {integrity: sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==} engines: {node: '>= 4'} @@ -11730,10 +11157,6 @@ packages: is-finite: 1.1.0 dev: false - /numeric/1.2.6: - resolution: {integrity: sha512-avBiDAP8siMa7AfJgYyuxw1oyII4z2sswS23+O+ZfV28KrtNzy0wxUFwi4f3RyM4eeeXNs1CThxR7pb5QQcMiw==} - dev: false - /nwsapi/2.2.7: resolution: {integrity: sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==} dev: false @@ -11946,13 +11369,6 @@ packages: type-check: 0.3.2 word-wrap: 1.2.5 - /orbit-camera-controller/4.0.0: - resolution: {integrity: sha512-/XTmpr6FUT6MuKPBGN2nv9cS8jhhVs8do71VagBQS5p4rxM04MhqSnI/Uu+gVNN5s6KPcS73o1dHzjuDThEJUA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - dev: false - /os-browserify/0.3.0: resolution: {integrity: sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==} @@ -12053,13 +11469,6 @@ packages: semver: 6.3.1 dev: true - /pad-left/1.0.2: - resolution: {integrity: sha512-saxSV1EYAytuZDtQYEwi0DPzooG6aN18xyHrnJtzwjVwmMauzkEecd7hynVJGolNGk1Pl9tltmZqfze4TZTCxg==} - engines: {node: '>=0.10.0'} - dependencies: - repeat-string: 1.6.1 - dev: false - /pako/1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -12097,6 +11506,17 @@ packages: pbkdf2: 3.1.2 safe-buffer: 5.2.1 + /parse-asn1/5.1.7: + resolution: {integrity: sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==} + engines: {node: '>= 0.10'} + dependencies: + asn1.js: 4.10.1 + browserify-aes: 1.2.0 + evp_bytestokey: 1.0.3 + hash-base: 3.0.4 + pbkdf2: 3.1.2 + safe-buffer: 5.2.1 + /parse-json/2.2.0: resolution: {integrity: sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==} engines: {node: '>=0.10.0'} @@ -12248,19 +11668,6 @@ packages: /performance-now/2.1.0: resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} - /permutation-parity/1.0.0: - resolution: {integrity: sha512-mRaEvnnWolbZuErWD08StRUZP9YOWG3cURP5nYpRg1D2PENzPXCUrPv8/bOk0tfln0hISLZjOdOcQCbsVpL2nQ==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /permutation-rank/1.0.0: - resolution: {integrity: sha512-kmXwlQcd4JlV8g61jz0xDyroFNlJ/mP+KbSBllMuQD7FvaQInRnnAStElcppkUXd8qVFLvemy6msUmBn7sDzHg==} - dependencies: - invert-permutation: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - /pick-by-alias/1.2.0: resolution: {integrity: sha512-ESj2+eBxhGrcA1azgHs7lARG5+5iLakc/6nlfbpjcLl00HuuUOIuORhYXN4D1HfvMSKuVtFQjAlnwi1JHEeDIw==} dev: false @@ -12340,94 +11747,56 @@ packages: find-up: 3.0.0 dev: false - /planar-dual/1.0.2: - resolution: {integrity: sha512-jfQCbX1kXu53+enC+BPQlfoZI1u5m8IUhFVtFG+9tUj84wnuaYNheR69avYWCNXWnUCkwUajmYMqX9M2Ruh4ug==} - dependencies: - compare-angle: 1.0.1 - dup: 1.0.0 - dev: false - - /planar-graph-to-polyline/1.0.6: - resolution: {integrity: sha512-h8a9kdAjo7mRhC0X6HZ42xzFp7vKDZA+Hygyhsq/08Qi4vVAQYJaLLYLvKUUzRbVKvdYqq0reXHyV0EygyEBHA==} - dependencies: - edges-to-adjacency-list: 1.0.0 - planar-dual: 1.0.2 - point-in-big-polygon: 2.0.1 - robust-orientation: 1.2.1 - robust-sum: 1.0.0 - two-product: 1.0.2 - uniq: 1.0.1 - dev: false - /please-upgrade-node/3.2.0: resolution: {integrity: sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==} dependencies: semver-compare: 1.0.0 dev: true - /plotly.js/1.58.5: - resolution: {integrity: sha512-ChTlnFXB4tB0CzcG1mqgUKYnrJsZ8REDGox8BHAa/ltsd48MOAhOmFgjyDxwsXyjjgwOI296GeYDft8g4ftLHQ==} + /plotly.js/2.25.2: + resolution: {integrity: sha512-Pf6dPYGl21W7A3FTgLQ52fpgvrqGhCPDT3+612bxwg4QXlvxhnoFwvuhT1BRW/l2nbYGpRoUH79K54yf2vCMVQ==} dependencies: + '@plotly/d3': 3.8.1 '@plotly/d3-sankey': 0.7.2 '@plotly/d3-sankey-circular': 0.33.1 - '@plotly/point-cluster': 3.1.9 '@turf/area': 6.5.0 '@turf/bbox': 6.5.0 '@turf/centroid': 6.5.0 - alpha-shape: 1.0.0 canvas-fit: 1.5.0 color-alpha: 1.0.4 color-normalize: 1.5.0 color-parse: 1.3.8 color-rgba: 2.1.1 - convex-hull: 1.0.3 country-regex: 1.1.0 - d3: 3.5.17 d3-force: 1.2.1 + d3-format: 1.4.5 + d3-geo: 1.12.1 + d3-geo-projection: 2.9.0 d3-hierarchy: 1.1.9 - d3-interpolate: 1.4.0 + d3-interpolate: 3.0.1 + d3-time: 1.1.0 d3-time-format: 2.3.0 - delaunay-triangulate: 1.1.6 - es6-promise: 4.2.8 fast-isnumeric: 1.1.4 - gl-cone3d: 1.5.2 - gl-contour2d: 1.1.7 - gl-error3d: 1.0.16 - gl-heatmap2d: 1.1.1 - gl-line3d: 1.2.1 gl-mat4: 1.2.0 - gl-mesh3d: 2.3.1 - gl-plot2d: 1.4.5 - gl-plot3d: 2.4.7 - gl-pointcloud2d: 1.0.3 - gl-scatter3d: 1.2.3 - gl-select-box: 1.0.4 - gl-spikes2d: 1.0.2 - gl-streamtube3d: 1.4.1 - gl-surface3d: 1.6.0 gl-text: 1.3.1 glslify: 7.1.1 has-hover: 1.0.1 has-passive-events: 1.0.0 - image-size: 0.7.5 - is-mobile: 2.2.2 + is-mobile: 4.0.0 mapbox-gl: 1.10.1 - matrix-camera-controller: 2.1.4 mouse-change: 1.4.0 mouse-event-offset: 3.0.2 mouse-wheel: 1.2.0 - ndarray: 1.0.19 - ndarray-linear-interpolate: 1.0.0 + native-promise-only: 0.8.1 parse-svg-path: 0.1.2 + point-in-polygon: 1.1.0 polybooljs: 1.2.0 - regl: 1.7.0 + probe-image-size: 7.2.3 + regl: /@plotly/regl/2.1.2 regl-error2d: 2.0.12 regl-line2d: 3.1.2 regl-scatter2d: 3.2.9 regl-splom: 1.0.14 - right-now: 1.0.0 - robust-orientation: 1.2.1 - sane-topojson: 4.0.0 strongly-connected-components: 1.0.1 superscript-text: 1.0.0 svg-path-sdf: 1.1.3 @@ -12436,6 +11805,8 @@ packages: topojson-client: 3.1.0 webgl-context: 2.2.0 world-calendars: 1.0.3 + transitivePeerDependencies: + - supports-color dev: false /plur/3.1.1: @@ -12463,25 +11834,14 @@ packages: - typescript dev: false - /point-in-big-polygon/2.0.1: - resolution: {integrity: sha512-DtrN8pa2VfMlvmWlCcypTFeBE4+OYz1ojDNJLKCWa4doiVAD6PRBbxFYAT71tsp5oKaRXT5sxEiHCAQKb1zr2Q==} - dependencies: - binary-search-bounds: 2.0.5 - interval-tree-1d: 1.0.4 - robust-orientation: 1.2.1 - slab-decomposition: 1.0.3 + /point-in-polygon/1.1.0: + resolution: {integrity: sha512-3ojrFwjnnw8Q9242TzgXuTD+eKiutbzyslcq1ydfu82Db2y+Ogbmyrkpv0Hgj31qwT3lbS9+QAAO/pIQM35XRw==} dev: false /polybooljs/1.2.0: resolution: {integrity: sha512-mKjR5nolISvF+q2BtC1fi/llpxBPTQ3wLWN8+ldzdw2Hocpc8C72ZqnamCM4Z6z+68GVVjkeM01WJegQmZ8MEQ==} dev: false - /polytope-closest-point/1.0.0: - resolution: {integrity: sha512-rvmt1e2ci9AUyWeHg+jsNuhGC4eBtxX4WjD9uDdvQzv2I1CVJSgbblJTslNXpGUu4KZSsUtSzvIdHKRKfRF3kw==} - dependencies: - numeric: 1.2.6 - dev: false - /portfinder/1.0.32_supports-color@6.1.0: resolution: {integrity: sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==} engines: {node: '>= 0.12.0'} @@ -13214,6 +12574,16 @@ packages: parse-ms: 2.1.0 dev: false + /probe-image-size/7.2.3: + resolution: {integrity: sha512-HubhG4Rb2UH8YtV4ba0Vp5bQ7L78RTONYu/ujmCu5nBI8wGv24s4E9xSKBi0N1MowRpxk76pFCpJtW0KPzOK0w==} + dependencies: + lodash.merge: 4.6.2 + needle: 2.9.1 + stream-parser: 0.3.1 + transitivePeerDependencies: + - supports-color + dev: false + /process-nextick-args/2.0.1: resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} @@ -13288,6 +12658,10 @@ packages: forwarded: 0.2.0 ipaddr.js: 1.9.1 + /proxy-from-env/1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: false + /prr/1.0.1: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} dev: false @@ -13365,12 +12739,6 @@ packages: resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} engines: {node: '>=0.6'} - /quat-slerp/1.0.1: - resolution: {integrity: sha512-OTozCDeP5sW7cloGR+aIycctZasBhblk1xdsSGP1Iz5pEwDqyChloTmc96xsDfusFD7GRxwDDu+tpJX0Wa1kJw==} - dependencies: - gl-quat: 1.0.0 - dev: false - /query-string/4.3.4: resolution: {integrity: sha512-O2XLNDBIg1DnTOa+2XrIwSiXEV8h2KImXUnjhhn2+UsvZ+Es2uyd5CCRTNQlDGbzUQOW3aYCBx9rVA6dzsiY7Q==} engines: {node: '>=0.10.0'} @@ -13417,21 +12785,6 @@ packages: resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} engines: {node: '>= 0.6'} - /rat-vec/1.1.1: - resolution: {integrity: sha512-FbxGwkQxmw4Jx41LR7yMOR+g8M9TWCEmf/SUBQVLuK2eh0nThnffF7IUualr3XE2x5F8AdLiCVeSGwXd4snfgg==} - dependencies: - big-rat: 1.0.4 - dev: false - - /raw-body/2.5.1: - resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} - engines: {node: '>= 0.8'} - dependencies: - bytes: 3.1.2 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - unpipe: 1.0.0 - /raw-body/2.5.2: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} @@ -13440,7 +12793,6 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 unpipe: 1.0.0 - dev: true /rc-align/2.4.5: resolution: {integrity: sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==} @@ -14069,13 +13421,13 @@ packages: resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==} dev: false - /react-plotly.js/2.6.0_f6dluzp62qf57yw3gl4ocsg3e4: + /react-plotly.js/2.6.0_qtjenpcawcnnxnr626ndcvhi4u: resolution: {integrity: sha512-g93xcyhAVCSt9kV1svqG1clAEdL6k3U+jjuSzfTV7owaSU9Go6Ph8bl25J+jKfKvIGAEYpe4qj++WHJuc9IaeA==} peerDependencies: plotly.js: '>1.34.0' react: '>0.13.0' dependencies: - plotly.js: 1.58.5 + plotly.js: 2.25.2 prop-types: 15.8.1 react: 16.14.0 dev: false @@ -14391,14 +13743,6 @@ packages: strip-indent: 2.0.0 dev: true - /reduce-simplicial-complex/1.0.0: - resolution: {integrity: sha512-t+nT7sHDtcxBx8TbglqfLsLKoFiSn9hp6GFojJEThHBAFv72wQeq/uRiPYZa4Xb8FR1Ye1foRcBV3Ki6bgm+pQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - compare-oriented-cell: 1.0.1 - dev: false - /reflect.getprototypeof/1.0.3: resolution: {integrity: sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw==} engines: {node: '>= 0.4'} @@ -14567,10 +13911,6 @@ packages: regl-scatter2d: 3.2.9 dev: false - /regl/1.7.0: - resolution: {integrity: sha512-bEAtp/qrtKucxXSJkD4ebopFZYP0q1+3Vb2WECWv/T8yQEgKxDxJ7ztO285tAMaYZVR6mM1GgI6CCn8FROtL1w==} - dev: false - /regl/2.1.0: resolution: {integrity: sha512-oWUce/aVoEvW5l2V0LK7O5KJMzUSKeiOwFuJehzpSFd43dO5spP9r+sSUfhKtsky4u6MCqWJaRL+abzExynfTg==} dev: false @@ -14830,78 +14170,6 @@ packages: classnames: 2.3.2 dev: false - /robust-compress/1.0.0: - resolution: {integrity: sha512-E8btSpQ6zZr7LvRLrLvb+N5rwQ0etUbsXFKv5NQj6TVK6RYT00Qg9iVFvIWR+GxXUvpes7FDN0WfXa3l7wtGOw==} - dev: false - - /robust-determinant/1.1.0: - resolution: {integrity: sha512-xva9bx/vyAv3pVYL2++vlnvM9q7oQOeCS5iscmlWtmaXHEgI4GFWeuYPUVVhvmYwx9N49EsQTonVJihYtcMo1Q==} - dependencies: - robust-compress: 1.0.0 - robust-scale: 1.0.2 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-dot-product/1.0.0: - resolution: {integrity: sha512-Nu/wah8B8RotyZLRPdlEL0ZDh3b7wSwUBLdbTHwS/yw0qqjMJ943PSCkd6EsF5R5QFDWF2x77DGsbmnv9/7/ew==} - dependencies: - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-in-sphere/1.2.1: - resolution: {integrity: sha512-3zJdcMIOP1gdwux93MKTS0RiMYEGwQBoE5R1IW/9ZQmGeZzP7f7i4+xdcK8ujJvF/dEOS1WPuI9IB1WNFbj3Cg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-linear-solve/1.0.0: - resolution: {integrity: sha512-I1qW8Bl9+UYeGNh2Vt8cwkcD74xWMyjnU6lSVcZrf0eyfwPmreflY3v0SvqCZOj5ddxnSS1Xp31igbFNcg1TGQ==} - dependencies: - robust-determinant: 1.1.0 - dev: false - - /robust-orientation/1.2.1: - resolution: {integrity: sha512-FuTptgKwY6iNuU15nrIJDLjXzCChWB+T4AvksRtwPS/WZ3HuP1CElCm1t+OBfgQKfWbtZIawip+61k7+buRKAg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-product/1.0.0: - resolution: {integrity: sha512-7ww6m+ICW6Dt7ylHVy1aeeNwTfMXfh2BHqHVNE+CHvrU9sI97Vb6uHnid0MN3I9afTI5DXOB7q4SQa2fxuo2Gw==} - dependencies: - robust-scale: 1.0.2 - robust-sum: 1.0.0 - dev: false - - /robust-scale/1.0.2: - resolution: {integrity: sha512-jBR91a/vomMAzazwpsPTPeuTPPmWBacwA+WYGNKcRGSh6xweuQ2ZbjRZ4v792/bZOhRKXRiQH0F48AvuajY0tQ==} - dependencies: - two-product: 1.0.2 - two-sum: 1.0.0 - dev: false - - /robust-segment-intersect/1.0.1: - resolution: {integrity: sha512-QWngxcL7rCRLK7nTMcTNBPi/q+fecrOo6aOtTPnXjT/Dve5AK20DzUSq2fznUS+rCAxyir6OdPgDCzcUxFtJoQ==} - dependencies: - robust-orientation: 1.2.1 - dev: false - - /robust-subtract/1.0.0: - resolution: {integrity: sha512-xhKUno+Rl+trmxAIVwjQMiVdpF5llxytozXJOdoT4eTIqmqsndQqFb1A0oiW3sZGlhMRhOi6pAD4MF1YYW6o/A==} - dev: false - - /robust-sum/1.0.0: - resolution: {integrity: sha512-AvLExwpaqUqD1uwLU6MwzzfRdaI6VEZsyvQ3IAQ0ZJ08v1H+DTyqskrf2ZJyh0BDduFVLN7H04Zmc+qTiahhAw==} - dev: false - /rsvp/4.8.5: resolution: {integrity: sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==} engines: {node: 6.* || >= 7.*} @@ -14963,10 +14231,6 @@ packages: /safer-buffer/2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - /sane-topojson/4.0.0: - resolution: {integrity: sha512-bJILrpBboQfabG3BNnHI2hZl52pbt80BE09u4WhnrmzuF2JbMKZdl62G5glXskJ46p+gxE2IzOwGj/awR4g8AA==} - dev: false - /sane/4.1.0: resolution: {integrity: sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==} engines: {node: 6.* || 8.* || >= 10.*} @@ -15291,10 +14555,6 @@ packages: /signal-exit/3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} - /signum/0.0.0: - resolution: {integrity: sha512-nct2ZUmwemVxeuPY5h+JLpHGJvLCXXNahGVI7IB3a6Fy5baX9AGSb854HceYH4FBw4eGjoZfEo9YRfkGfKdZQA==} - dev: false - /signum/1.0.0: resolution: {integrity: sha512-yodFGwcyt59XRh7w5W3jPcIQb3Bwi21suEfT7MAWnBX3iCdklJpgDgvGT9o04UonglZN5SNMfJFkHIR/jO8GHw==} dev: false @@ -15305,55 +14565,10 @@ packages: is-arrayish: 0.3.2 dev: false - /simplicial-complex-boundary/1.0.1: - resolution: {integrity: sha512-hz/AaVbs+s08EVoxlbCE68AlC6/mxFJLxJrGRMbDoTjz3030nhcOq+w5+f0/ZaU2EYjmwa8CdVKpiRVIrhaZjA==} - dependencies: - boundary-cells: 2.0.2 - reduce-simplicial-complex: 1.0.0 - dev: false - - /simplicial-complex-contour/1.0.2: - resolution: {integrity: sha512-Janyqvpa7jgr9MJbwR/XGyYz7bdhXNq7zgHxD0G54LCRNyn4bf3Hely2iWQeK/IGu3c5BaWFUh7ElxqXhKrq0g==} - dependencies: - marching-simplex-table: 1.0.0 - ndarray: 1.0.19 - ndarray-sort: 1.0.1 - typedarray-pool: 1.2.0 - dev: false - - /simplicial-complex/0.3.3: - resolution: {integrity: sha512-JFSxp7I5yORuKSuwGN96thhkqZVvYB4pkTMkk+PKP2QsOYYU1e84OBoHwOpFyFmjyvB9B3UDZKzHQI5S/CPUPA==} - dependencies: - bit-twiddle: 0.0.2 - union-find: 0.0.4 - dev: false - - /simplicial-complex/1.0.0: - resolution: {integrity: sha512-mHauIKSOy3GquM5VnYEiu7eP5y4A8BiaN9ezUUgyYFz1k68PqDYcyaH3kenp2cyvWZE96QKE3nrxYw65Allqiw==} - dependencies: - bit-twiddle: 1.0.2 - union-find: 1.0.2 - dev: false - - /simplify-planar-graph/2.0.1: - resolution: {integrity: sha512-KdC2ZPFvrGl9+lH/P3Yik7G0si2Zpk6Xiqjq8l9U1lOox5a/9dGLjevi9tvqoh4V7yQbs7fs6+rNCOAdrzUktw==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 0.3.3 - dev: false - /sisteransi/1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} dev: false - /slab-decomposition/1.0.3: - resolution: {integrity: sha512-1EfR304JHvX9vYQkUi4AKqN62mLsjk6W45xTk/TxwN8zd3HGwS7PVj9zj0I6fgCZqfGlimDEY+RzzASHn97ZmQ==} - dependencies: - binary-search-bounds: 2.0.5 - functional-red-black-tree: 1.0.1 - robust-orientation: 1.2.1 - dev: false - /slash/1.0.0: resolution: {integrity: sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==} engines: {node: '>=0.10.0'} @@ -15526,13 +14741,6 @@ packages: - supports-color dev: false - /split-polygon/1.0.0: - resolution: {integrity: sha512-nBFcgQUVEE8dcOjuKaRdlM53k8RxUYpRxZ//n0pHJQGhbVscrsti+gllJI3pK3y7fgFwGWgt7NFhAX5sz0UoWQ==} - dependencies: - robust-dot-product: 1.0.0 - robust-sum: 1.0.0 - dev: false - /split-string/3.1.0: resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} engines: {node: '>=0.10.0'} @@ -15542,10 +14750,6 @@ packages: /sprintf-js/1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - /sprintf-js/1.1.2: - resolution: {integrity: sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==} - dev: false - /sshpk/1.17.0: resolution: {integrity: sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==} engines: {node: '>=0.10.0'} @@ -15646,6 +14850,14 @@ packages: to-arraybuffer: 1.0.1 xtend: 4.0.2 + /stream-parser/0.3.1: + resolution: {integrity: sha512-bJ/HgKq41nlKvlhccD5kaCr/P+Hu0wPNKPJOH7en+YrJu/9EgqUF+88w5Jb6KNcjOFMhfX4B2asfeAtIGuHObQ==} + dependencies: + debug: 2.6.9 + transitivePeerDependencies: + - supports-color + dev: false + /stream-shift/1.0.1: resolution: {integrity: sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==} dev: false @@ -15900,14 +15112,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /surface-nets/1.0.2: - resolution: {integrity: sha512-Se+BaCb5yc8AV1IfT6TwTWEe/KuzzjzcMQQCbcIahzk9xRO5bIxxGM2MmKxE9nmq8+RD8DLBLXu0BjXoRs21iw==} - dependencies: - ndarray-extract-contour: 1.0.1 - triangulate-hypercube: 1.0.1 - zero-crossings: 1.0.1 - dev: false - /svg-arc-to-cubic-bezier/3.2.0: resolution: {integrity: sha512-djbJ/vZKZO+gPoSDThGNpKDO+o+bAeA4XQKovvkNCqnIS2t+S4qnLAGQhyyrulhCFRl1WWzAp0wUDV8PpTVU3g==} dev: false @@ -16050,12 +15254,6 @@ packages: require-main-filename: 2.0.0 dev: false - /text-cache/4.2.2: - resolution: {integrity: sha512-zky+UDYiX0a/aPw/YTBD+EzKMlCTu1chFuCMZeAkgoRiceySdROu1V2kJXhCbtEdBhiOviYnAdGiSYl58HW0ZQ==} - dependencies: - vectorize-text: 3.2.2 - dev: false - /text-table/0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} @@ -16207,20 +15405,6 @@ packages: punycode: 2.3.0 dev: false - /triangulate-hypercube/1.0.1: - resolution: {integrity: sha512-SAIacSBfUNfgeCna8q2i+1taOtFJkYuOqpduaJ1KUeOJpqc0lLKMYzPnZb4CA6KCOiD8Pd4YbuVq41wa9dvWyw==} - dependencies: - gamma: 0.1.0 - permutation-parity: 1.0.0 - permutation-rank: 1.0.0 - dev: false - - /triangulate-polyline/1.0.3: - resolution: {integrity: sha512-crJcVFtVPFYQ8r9iIhe9JqkauDvNWDSZLot8ly3DniSCO+zyUfKbtfD3fEoBaA5uMrQU/zBi11NBuVQeSToToQ==} - dependencies: - cdt2d: 1.0.0 - dev: false - /trim-newlines/2.0.0: resolution: {integrity: sha512-MTBWv3jhVjTU7XR3IQHllbiJs8sc75a80OEhB6or/q7pLTWgQ0bMGQXXYQSrSuXe6WiKWDZ5txXY5P59a/coVA==} engines: {node: '>=4'} @@ -16271,25 +15455,9 @@ packages: dependencies: safe-buffer: 5.2.1 - /turntable-camera-controller/3.0.1: - resolution: {integrity: sha512-UOGu9W/Mx053pAaczi0BEPqvWJOqSgtpdigWG9C8dX8rQVdyl2hWmpdJW3m15QrGxJtJHIhhDTHVtTZzPkd/FA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - /tweetnacl/0.14.5: resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} - /two-product/1.0.2: - resolution: {integrity: sha512-vOyrqmeYvzjToVM08iU52OFocWT6eB/I5LUWYnxeAPGXAhAxXYU/Yr/R2uY5/5n4bvJQL9AQulIuxpIsMoT8XQ==} - dev: false - - /two-sum/1.0.0: - resolution: {integrity: sha512-phP48e8AawgsNUjEY2WvoIWqdie8PoiDZGxTDv70LDr01uX5wLEQbOgSP7Z/B6+SW5oLtbe8qaYX2fKJs3CGTw==} - dev: false - /type-check/0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} @@ -16430,14 +15598,6 @@ packages: engines: {node: '>=4'} dev: false - /union-find/0.0.4: - resolution: {integrity: sha512-207oken6EyGDCBK5l/LTPsWfgy8N8s6idwRK2TG0ssWhzPlxEDdBA8nIV+eLbkEMdA8pAwE8F7/xwv2sCESVjQ==} - dev: false - - /union-find/1.0.2: - resolution: {integrity: sha512-wFA9bMD/40k7ZcpKVXfu6X1qD3ri5ryO8HUsuA1RnxPCQl66Mu6DgkxyR+XNnd+osD0aLENixcJVFj+uf+O4gw==} - dev: false - /union-value/1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -16684,18 +15844,6 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - /vectorize-text/3.2.2: - resolution: {integrity: sha512-34NVOCpMMQVXujU4vb/c6u98h6djI0jGdtC202H4Huvzn48B6ARsR7cmGh1xsAc0pHNQiUKGK/aHF05VtGv+eA==} - dependencies: - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - ndarray: 1.0.19 - planar-graph-to-polyline: 1.0.6 - simplify-planar-graph: 2.0.1 - surface-nets: 1.0.2 - triangulate-polyline: 1.0.3 - dev: false - /vendors/1.0.4: resolution: {integrity: sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==} dev: false @@ -16778,10 +15926,6 @@ packages: resolution: {integrity: sha512-lNR9aAefbGPpHO7AEnY0hCFjz1eTkWCXYvkTRrTHs9qv8zJp+SkVYpzfLIFXQQiG3tVvbNFQgVg2bQS8YGgxyw==} dev: false - /weakmap-shim/1.1.1: - resolution: {integrity: sha512-/wNyG+1FpiHhnfQo+TuA/XAUpvOOkKVl0A4qpT+oGcj5SlZCLmM+M1Py/3Sj8sy+YrEauCVITOxCsZKo6sPbQg==} - dev: false - /webgl-context/2.2.0: resolution: {integrity: sha512-q/fGIivtqTT7PEoF07axFIlHNk/XCPaYpq64btnepopSWvKNFkoORlQYgqDigBIuGA1ExnFd/GnSUnBNEPQY7Q==} dependencies: @@ -16824,12 +15968,12 @@ packages: connect-history-api-fallback: 1.6.0 debug: 4.3.4_supports-color@6.1.0 del: 4.1.1 - express: 4.18.2_supports-color@6.1.0 + express: 4.19.2_supports-color@6.1.0 html-entities: 1.4.0 http-proxy-middleware: 0.19.1_tmpgdztspuwvsxzgjkhoqk7duq import-local: 2.0.0 internal-ip: 4.3.0 - ip: 1.1.8 + ip: 1.1.9 is-absolute-url: 3.0.3 killable: 1.0.1 loglevel: 1.8.1 @@ -17427,9 +16571,3 @@ packages: y18n: 4.0.3 yargs-parser: 15.0.3 dev: true - - /zero-crossings/1.0.1: - resolution: {integrity: sha512-iNIldMZaDtAyIJMJ8NnGVHeejH//y4eVmpXriM+q/B/BPNz+2E7oAgSnw9MXqCd3RbQ8W+hor7T2jEyRoc/s2A==} - dependencies: - cwise-compiler: 1.1.3 - dev: false diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 19f306ae4f6b..c42bd8c1f91b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -17,7 +17,7 @@ */ import React from 'react'; -import {Table, Icon, Tooltip} from 'antd'; +import {Table, Icon, Tooltip, Popover} from 'antd'; import {PaginationConfig} from 'antd/lib/pagination'; import moment from 'moment'; import {ReplicationIcon} from 'utils/themeIcons'; @@ -192,21 +192,36 @@ const COLUMNS = [ key: 'pipelines', isVisible: true, render: (pipelines: IPipeline[], record: IDatanode) => { + let firstThreePipelinesIDs = []; + let remainingPipelinesIDs: any[] = []; + firstThreePipelinesIDs = pipelines && pipelines.filter((element, index) => index < 3); + remainingPipelinesIDs = pipelines && pipelines.slice(3, pipelines.length); + + const RenderPipelineIds = ({ pipelinesIds }) => { + return pipelinesIds && pipelinesIds.map((pipeline: any, index: any) => ( +

    + + {pipeline.pipelineID} +
    + )) + } + return ( -
    + <> { - pipelines && pipelines.map((pipeline, index) => ( -
    - - {pipeline.pipelineID} -
    - )) + } -
    + { + remainingPipelinesIDs.length > 0 && + } title="Remaining pipelines" placement="rightTop" trigger="hover"> + {`... and ${remainingPipelinesIDs.length} more pipelines`} + + } + ); } }, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less index 15d68dfc8600..a2f4c088c566 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less @@ -24,4 +24,19 @@ .ant-pagination-disabled, .ant-pagination-disabled:hover, .ant-pagination-disabled:focus { color: rgba(0, 0, 0, 0.65); cursor: pointer !important; - } \ No newline at end of file + } + +.multi-select-container { + padding-left: 5px; + margin-right: 5px; + display: inline-block; + min-width: 200px; + z-index: 99; +} + +.limit-block { + font-size: 14px; + font-weight: normal; + display: inline-block; + margin-left: 20px; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index 1846592b8995..fdd3dc85e19e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -24,17 +24,14 @@ import moment from 'moment'; import { showDataFetchError, byteToSize } from 'utils/common'; import './om.less'; import { ColumnSearch } from 'utils/columnSearch'; -import { Link } from 'react-router-dom'; import { AxiosGetHelper, cancelRequests } from 'utils/axiosRequestHelper'; +import {IOption} from "../../../components/multiSelect/multiSelect"; +import {ActionMeta, ValueType} from "react-select"; +import CreatableSelect from "react-select/creatable"; const size = filesize.partial({ standard: 'iec' }); const { TabPane } = Tabs; -//Previous Key Need to store respective Lastkey of each API -let mismatchPrevKeyList = [0]; -let openPrevKeyList =[""]; -let keysPendingPrevList =[""]; -let deletedKeysPrevList =[0]; let keysPendingExpanded: any = []; interface IContainerResponse { containerId: number; @@ -166,7 +163,7 @@ const MISMATCH_TAB_COLUMNS = [ const OPEN_KEY_TAB_COLUMNS = [ { - title: 'Key', + title: 'Key Name', dataIndex: 'path', key: 'path', isSearchable: true @@ -277,6 +274,35 @@ const DELETED_TAB_COLUMNS = [ } ]; +const PENDINGDIR_TAB_COLUMNS = [ + { + title: 'Directory Name', + dataIndex: 'path', + key: 'path' + }, + { + title: 'In state since', + dataIndex: 'inStateSince', + key: 'inStateSince', + render: (inStateSince: number) => { + return inStateSince > 0 ? moment(inStateSince).format('ll LTS') : 'NA'; + } + }, + { + title: 'Path', + dataIndex: 'key', + key: 'key', + isSearchable: true, + width: '450px' + }, + { + title: 'Data Size', + dataIndex: 'size', + key: 'size', + render: (dataSize :any) => dataSize = byteToSize(dataSize,1) + } +]; + interface IExpandedRow { [key: number]: IExpandedRowState; } @@ -295,24 +321,29 @@ interface IOmdbInsightsState { pendingDeleteKeyDataSource: any[]; expandedRowData: IExpandedRow; deletedContainerKeysDataSource: []; - prevKeyMismatch: number; mismatchMissingState: any; - prevKeyOpen: string; - prevKeyDeleted: number; - prevKeyDeletePending: string; + pendingDeleteDirDataSource: any[]; activeTab: string; - DEFAULT_LIMIT: number, - nextClickable: boolean; includeFso: boolean; includeNonFso: boolean; - prevClickable: boolean + selectedLimit: IOption; } +const LIMIT_OPTIONS: IOption[] = [ + {label: "1000", value: "1000"}, + {label: "5000", value: "5000"}, + {label: "10000", value: "10000"}, + {label: "20000", value: "20000"} +] + +const INITIAL_LIMIT_OPTION = LIMIT_OPTIONS[0] + let cancelMismatchedEndpointSignal: AbortController; let cancelOpenKeysSignal: AbortController; let cancelDeletePendingSignal: AbortController; let cancelDeletedKeysSignal: AbortController; let cancelRowExpandSignal: AbortController; +let cancelDeletedPendingDirSignal: AbortController; export class Om extends React.Component, IOmdbInsightsState> { @@ -326,18 +357,13 @@ export class Om extends React.Component, IOmdbInsightsSta openKeysDataSource: [], pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], - prevKeyMismatch: 0, + pendingDeleteDirDataSource:[], mismatchMissingState: 'SCM', - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, expandedRowData: {}, activeTab: props.location.state ? props.location.state.activeTab : '1', - DEFAULT_LIMIT: 10, - nextClickable: true, includeFso: true, includeNonFso: false, - prevClickable: false + selectedLimit: INITIAL_LIMIT_OPTION }; } @@ -389,12 +415,10 @@ export class Om extends React.Component, IOmdbInsightsSta handleExistsAtChange = (e: any) => { console.log("handleExistsAtChange", e.key); if (e.key === 'OM') { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'SCM'); + this.fetchMismatchContainers('SCM'); } else { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'OM'); + this.fetchMismatchContainers('OM'); } }; @@ -439,26 +463,30 @@ export class Om extends React.Component, IOmdbInsightsSta handlefsoNonfsoMenuChange = (e: any) => { if (e.key === 'fso') { - openPrevKeyList =[""]; - this.fetchOpenKeys(true, false, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(true, false); } else { - openPrevKeyList = [""]; - this.fetchOpenKeys(false, true, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(false, true); } }; - componentDidMount(): void { + _loadData = () => { if (this.state.activeTab === '1') { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } else if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (this.state.activeTab === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); + } else if (this.state.activeTab === '5') { + this.fetchDeletePendingDir(); } + } + + componentDidMount(): void { + this._loadData(); }; componentWillUnmount(): void { @@ -467,13 +495,12 @@ export class Om extends React.Component, IOmdbInsightsSta cancelDeletePendingSignal && cancelDeletePendingSignal.abort(); cancelDeletedKeysSignal && cancelDeletedKeysSignal.abort(); cancelRowExpandSignal && cancelRowExpandSignal.abort(); + cancelDeletedPendingDirSignal && cancelDeletedPendingDirSignal.abort(); } - fetchMismatchContainers = (limit: number, prevKeyMismatch: number, mismatchMissingState: any) => { + fetchMismatchContainers = (mismatchMissingState: any) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, mismatchMissingState }); @@ -483,39 +510,21 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${limit}&prevKey=${prevKeyMismatch}&missingIn=${mismatchMissingState}` + const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${this.state.selectedLimit.value}&missingIn=${mismatchMissingState}` const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; - if (mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey === null) { - //No Further Records may be last record - mismatchPrevKeyList = [0]; - this.setState({ - loading: false, - nextClickable: false, - mismatchDataSource: mismatchContainers, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyMismatch === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (mismatchPrevKeyList.includes(mismatchContainersResponse.data.lastKey) === false) { - mismatchPrevKeyList.push(mismatchContainersResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyMismatch: mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey, - mismatchDataSource: mismatchContainers, - }); - } + + this.setState({ + loading: false, + mismatchDataSource: mismatchContainers + }); + }).catch(error => { this.setState({ loading: false, @@ -524,11 +533,9 @@ export class Om extends React.Component, IOmdbInsightsSta }); }; - fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean, limit: number, prevKeyOpen: string) => { + fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, includeFso, includeNonFso }); @@ -539,16 +546,11 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - let openKeysEndpoint; - if (prevKeyOpen === "") { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}`; - } - else { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}&prevKey=${prevKeyOpen}`; - } + let openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller @@ -560,31 +562,11 @@ export class Om extends React.Component, IOmdbInsightsSta openKeys[key] && openKeys[key].map((item: any) => (allopenKeysResponse.push({ ...item, type: key }))); } } + this.setState({ + loading: false, + openKeysDataSource: allopenKeysResponse, + }) - if (openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey === "") { - //last key of api is null may be last record no further records - openPrevKeyList = [""]; - this.setState({ - loading: false, - nextClickable: false, - openKeysDataSource: allopenKeysResponse - }) - } - else { - if (this.state.prevKeyOpen === "" ){ - this.setState({ - prevClickable: false - }) - } - if (openPrevKeyList.includes(openKeysResponse.data.lastKey) === false) { - openPrevKeyList.push(openKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyOpen: openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey, - openKeysDataSource: allopenKeysResponse, - }) - }; }).catch(error => { this.setState({ loading: false @@ -594,11 +576,9 @@ export class Om extends React.Component, IOmdbInsightsSta }; - fetchDeletePendingKeys = (limit: number, prevKeyDeletePending: string) => { + fetchDeletePendingKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable :true + loading: true }); //Cancel any previous pending request @@ -607,17 +587,12 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); keysPendingExpanded =[]; - let deletePendingKeysEndpoint; - if (prevKeyDeletePending === "" || prevKeyDeletePending === undefined ) { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}`; - } - else { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}&prevKey=${prevKeyDeletePending}`; - } + let deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletePendingKeysEndpoint, cancelDeletePendingSignal); cancelDeletePendingSignal = controller; @@ -646,30 +621,11 @@ export class Om extends React.Component, IOmdbInsightsSta } }); - if (deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey === "") { - //last key of api is empty may be last record no further records - keysPendingPrevList =[""]; - this.setState({ - loading: false, - nextClickable: false, - pendingDeleteKeyDataSource: deletedKeyInfoData - }) - } - else { - if (this.state.prevKeyDeletePending === "" ||this.state.prevKeyDeletePending === undefined ){ - this.setState({ - prevClickable: false - }) - } - if (keysPendingPrevList.includes(deletePendingKeysResponse.data.lastKey) === false) { - keysPendingPrevList.push(deletePendingKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyDeletePending: deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey, - pendingDeleteKeyDataSource: deletedKeyInfoData - }); - } + this.setState({ + loading: false, + pendingDeleteKeyDataSource: deletedKeyInfoData + }); + }).catch(error => { this.setState({ loading: false, @@ -722,11 +678,9 @@ export class Om extends React.Component, IOmdbInsightsSta ); } - fetchDeletedKeys = (limit: number, prevKeyDeleted: number) => { + fetchDeletedKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable: true + loading: true }); //Cancel any previous pending request @@ -735,54 +689,64 @@ export class Om extends React.Component, IOmdbInsightsSta cancelOpenKeysSignal, cancelDeletePendingSignal, cancelDeletedKeysSignal, - cancelRowExpandSignal + cancelRowExpandSignal, + cancelDeletedPendingDirSignal ]); - const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${limit}&prevKey=${prevKeyDeleted}`; + const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletedKeysEndpoint, cancelDeletedKeysSignal); cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; - if (deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey === null) { - // no more further records last key - deletedKeysPrevList = [0]; - this.setState({ - loading: false, - nextClickable: false, - deletedContainerKeysDataSource: deletedContainerKeys, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyDeleted === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (deletedKeysPrevList.includes(deletedKeysResponse.data.lastKey) === false) { - deletedKeysPrevList.push(deletedKeysResponse.data.lastKey); - } + this.setState({ + loading: false, + deletedContainerKeysDataSource: deletedContainerKeys + }) + }).catch(error => { + this.setState({ + loading: false + }); + showDataFetchError(error.toString()); + }); + }; + + // Pending Delete Directories + fetchDeletePendingDir = () => { + this.setState({ + loading: true + }); + + //Cancel any previous pending request + cancelRequests([ + cancelMismatchedEndpointSignal, + cancelOpenKeysSignal, + cancelDeletePendingSignal, + cancelDeletedKeysSignal, + cancelRowExpandSignal, + cancelDeletedPendingDirSignal + ]); + + const DELETE_PENDING_DIR_ENDPOINT = `/api/v1/keys/deletePending/dirs?limit=${this.state.selectedLimit.value}`; + const { request, controller } = AxiosGetHelper(DELETE_PENDING_DIR_ENDPOINT, cancelDeletedPendingDirSignal); + cancelDeletedPendingDirSignal = controller + request.then(deletePendingDirResponse => { + let deletedDirInfo = []; + deletedDirInfo = deletePendingDirResponse && deletePendingDirResponse.data && deletePendingDirResponse.data.deletedDirInfo; this.setState({ loading: false, - prevKeyDeleted: deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey, - deletedContainerKeysDataSource: deletedContainerKeys - }) - }; + pendingDeleteDirDataSource: deletedDirInfo + }); }).catch(error => { this.setState({ - loading: false + loading: false, }); showDataFetchError(error.toString()); }); }; + changeTab = (activeKey: any) => { - //when changing tab make empty all datasets and prevkey and deafult filtering to intial values also cancel all pending requests - mismatchPrevKeyList = [0]; - openPrevKeyList =[""]; - keysPendingPrevList =[""]; - deletedKeysPrevList =[0]; this.setState({ activeTab: activeKey, mismatchDataSource: [], @@ -790,121 +754,29 @@ export class Om extends React.Component, IOmdbInsightsSta pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], expandedRowData: {}, - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, - prevKeyMismatch: 0, mismatchMissingState: 'SCM', includeFso: true, includeNonFso: false, - DEFAULT_LIMIT: 10, - + selectedLimit: INITIAL_LIMIT_OPTION }, () => { if (activeKey === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (activeKey === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (activeKey === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); + } else if (activeKey === '5') { + this.fetchDeletePendingDir (); } else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } }) }; - fetchPreviousRecords = () => { - // to fetch previous call stored all prevkey in array and fetching in respective tabs - if (this.state.activeTab === '2') { - this.setState({ - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-2] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }) - } else if (this.state.activeTab === '3') { - this.setState({ - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-2] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } else if (this.state.activeTab === '4') { - this.setState({ - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-2] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT,this.state.prevKeyDeleted); - }) - } - else { - this.setState({ - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-2] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }) - } - }; - - fetchNextRecords = () => { - // To Call API for Page Level for each page fetch next records - if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); - } else if (this.state.activeTab === '3') { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - } - else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); - } - }; - - itemRender = (_: any, type: string, originalElement: any) => { - if (type === 'prev') { - return
    {this.state.prevClickable ? Prev: No Records}
    ; - } - if (type === 'next') { - return
    {this.state.nextClickable ? {'>>'} : No More Further Records}
    ; - } - return originalElement; - }; - onShowSizeChange = (current: number, pageSize: number) => { - if (this.state.activeTab === '2') { - //open keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-1] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }); - } - else if (this.state.activeTab === '3') { - //keys pending for deletion - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-1] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } - else if (this.state.activeTab === '4') { - //deleted container keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-1] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - }) - } - else { - // active tab 1 for mismatch - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-1] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }); - } + console.log(current, pageSize); }; onRowExpandClick = (expanded: boolean, record: IContainerResponse) => { @@ -1039,16 +911,50 @@ export class Om extends React.Component, IOmdbInsightsSta }, []) }; + searchDirPendingColumn = () => { + return PENDINGDIR_TAB_COLUMNS.reduce((filtered, column) => { + if (column.isSearchable) { + const newColumn = { + ...column, + ...new ColumnSearch(column).getColumnSearchProps(column.dataIndex) + }; + filtered.push(newColumn); + } else { + filtered.push(column); + } + return filtered; + }, []) + }; + + _handleLimitChange = (selected: ValueType, _action: ActionMeta) => { + const selectedLimit = (selected as IOption) + this.setState({ + selectedLimit + }, this._loadData); + } + + _onCreateOption = (created: string) => { + // Check that it's a numeric and non-negative + if (parseInt(created)) { + const createdOption: IOption = { + label: created, + value: created + } + this.setState({ + selectedLimit: createdOption + }, this._loadData); + } else { + console.log('Not a valid option') + } + } + render() { - const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource } = this.state; + const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource, pendingDeleteDirDataSource, selectedLimit } = this.state; const paginationConfig: PaginationConfig = { - pageSize:this.state.DEFAULT_LIMIT, - defaultPageSize: this.state.DEFAULT_LIMIT, - pageSizeOptions: ['10', '20', '30', '50'], + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total}`, showSizeChanger: true, onShowSizeChange: this.onShowSizeChange, - itemRender: this.itemRender }; const generateMismatchTable = (dataSource: any) => { @@ -1065,7 +971,7 @@ export class Om extends React.Component, IOmdbInsightsSta return } @@ -1090,14 +996,43 @@ export class Om extends React.Component, IOmdbInsightsSta /> } - + const generateDirPendingTable = (dataSource: any) => { + return
    + } return ( -
    +
    OM DB Insights
    +
    + { + // Only number will be accepted + return !isNaN(parseInt(input)) + }} + options={LIMIT_OPTIONS} + hideSelectedOptions={false} + value={selectedLimit} + createOptionPosition='last' + formatCreateLabel={(input) => { + return `new limit... ${input}` + }} + /> Limit +
    {generateMismatchTable(mismatchDataSource)} @@ -1123,6 +1058,15 @@ export class Om extends React.Component, IOmdbInsightsSta }> {generateDeletedKeysTable(deletedContainerKeysDataSource)} + Directories Pending for Deletion   + + + + + }> + {generateDirPendingTable(pendingDeleteDirDataSource)} +
    diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 42d69e030f31..a9ed342faad4 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -65,6 +65,7 @@ */ public final class OMMetadataManagerTestUtils { + private static OzoneConfiguration configuration; private OMMetadataManagerTestUtils() { } @@ -129,8 +130,9 @@ public static ReconOMMetadataManager getTestReconOmMetadataManager( DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); assertNotNull(checkpoint.getCheckpointLocation()); - - OzoneConfiguration configuration = new OzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir .getAbsolutePath()); @@ -397,23 +399,31 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } + @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId) + long volumeObjectId, + long objectId) throws IOException { - // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" - String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); + // DB key in DeletedDirectoryTable => + // "volumeID/bucketID/parentId/dirName/dirObjectId" + + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); + String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( + objectId, ozoneDbKey); + - omMetadataManager.getDeletedDirTable().put(omKey, + omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) + .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } @@ -493,4 +503,14 @@ public static OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID, public static BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + public static OzoneConfiguration getConfiguration() { + return configuration; + } + + public static void setConfiguration( + OzoneConfiguration configuration) { + OMMetadataManagerTestUtils.configuration = configuration; + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index f49826e67d81..d5962c0c407d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -45,7 +45,11 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -53,6 +57,7 @@ * Test Recon Utility methods. */ public class TestReconUtils { + private static PipelineID randomPipelineID = PipelineID.randomId(); @TempDir private Path temporaryFolder; @@ -234,4 +239,24 @@ private static int oldNextClosestPowerIndexOfTwo(long dataSize) { } return index; } + + private static ContainerInfo.Builder getDefaultContainerInfoBuilder( + final HddsProtos.LifeCycleState state) { + return new ContainerInfo.Builder() + .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .setState(state) + .setSequenceId(10000L) + .setOwner("TEST"); + } + + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(randomPipelineID) + .build(); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 854ac74bd390..82c7c1b5bef0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -885,6 +885,7 @@ public void testUnhealthyContainers() throws IOException, TimeoutException { public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); + String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -904,6 +905,7 @@ public void testUnhealthyContainersFilteredResponse() uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); + createEmptyMissingUnhealthyRecords(2); response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); @@ -926,6 +928,13 @@ public void testUnhealthyContainersFilteredResponse() for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } + + Response filteredEmptyMissingResponse = containerEndpoint + .getUnhealthyContainers(emptyMissing, 1000, 1); + responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); + records = responseObject.getContainers(); + // Assert for zero empty missing containers. + assertEquals(0, records.size()); } @Test @@ -1026,6 +1035,14 @@ UUID newDatanode(String hostName, String ipAddress) throws IOException { return uuid; } + private void createEmptyMissingUnhealthyRecords(int emptyMissing) { + int cid = 0; + for (int i = 0; i < emptyMissing; i++) { + createUnhealthyRecord(++cid, UnHealthyContainerStates.EMPTY_MISSING.toString(), + 3, 3, 0, null); + } + } + private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 05d9927d6c93..310c8a28e74f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -113,6 +113,7 @@ import static org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API; import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -288,8 +289,9 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = new OmTableInsightTask( - globalStatsDao, sqlConfiguration, reconOMMetadataManager); + omTableInsightTask = + new OmTableInsightTask(globalStatsDao, sqlConfiguration, + reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -405,7 +407,7 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto4).build(); LayoutVersionProto layoutInfo = defaultLayoutVersionProto(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() .register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo); @@ -416,9 +418,7 @@ public void setUp() throws Exception { defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); // Write Data to OM // A sample volume (sampleVol) and a bucket (bucketOne) is already created // in AbstractOMMetadataManagerTest. @@ -435,14 +435,12 @@ public void setUp() throws Exception { .addOzoneAcls(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "TestUser2", - IAccessAuthorizer.ACLType.WRITE, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE )) .addOzoneAcls(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, "TestUser2", - IAccessAuthorizer.ACLType.READ, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.READ )) .build(); reconOMMetadataManager.getVolumeTable().put(volumeKey, args); @@ -453,8 +451,7 @@ public void setUp() throws Exception { .addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.GROUP, "TestGroup2", - IAccessAuthorizer.ACLType.WRITE, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.WRITE )) .setQuotaInBytes(OzoneConsts.GB) .setUsedBytes(OzoneConsts.MB) @@ -477,8 +474,7 @@ public void setUp() throws Exception { .addAcl(new OzoneAcl( IAccessAuthorizer.ACLIdentityType.GROUP, "TestGroup2", - IAccessAuthorizer.ACLType.READ, - OzoneAcl.AclScope.ACCESS + OzoneAcl.AclScope.ACCESS, IAccessAuthorizer.ACLType.READ )) .setQuotaInBytes(OzoneConsts.GB) .setUsedBytes(100 * OzoneConsts.MB) @@ -515,11 +511,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L); + 3L, 2L, 1L, 23L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L); + 6L, 5L, 4L, 22L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L); + 9L, 8L, 7L, 21L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -594,7 +590,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -699,7 +695,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } @@ -866,10 +862,12 @@ public void testGetContainerCounts() throws Exception { ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(LifeCycleState.OPEN); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(LifeCycleState.OPEN); // Create a list of container info objects List containers = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index ba00f843f447..765399f71e3a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -74,15 +74,17 @@ import java.util.Set; import java.util.HashSet; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.setConfiguration; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -875,6 +877,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + setConfiguration(omConfiguration); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java new file mode 100644 index 000000000000..8d8299aefc18 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -0,0 +1,1420 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; +import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; +import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; +import org.apache.hadoop.ozone.recon.common.CommonUtils; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests the NSSummary REST APIs within the context of an Object Store (OBS) layout, + * as well as Legacy layout buckets with FileSystemPaths disabled. The tests aim to + * validate API responses for buckets that follow the flat hierarchy model typical + * of OBS layouts. + *

    + * The test environment simulates a simple object storage structure with volumes + * containing buckets, which in turn contain files. Specifically, it includes: + * - Two OBS layout buckets (bucket1 and bucket2) under 'vol', each containing + * multiple files. + * - Two Legacy layout buckets (bucket3 and bucket4) under 'vol2', with 'bucket4' + * the fileSystemEnabled flag set to false for these legacy buckets. + *

    + * The directory structure for testing is as follows: + * . + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 (Legacy) + * └── file11 + */ +public class TestNSSummaryEndpointWithOBSAndLegacy { + @TempDir + private Path temporaryFolder; + + private ReconOMMetadataManager reconOMMetadataManager; + private NSSummaryEndpoint nsSummaryEndpoint; + private OzoneConfiguration conf; + private CommonUtils commonUtils; + + private static final String TEST_PATH_UTILITY = + "/vol1/buck1/a/b/c/d/e/file1.txt"; + private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e"; + private static final String[] TEST_NAMES = + new String[]{"vol1", "buck1", "a", "b", "c", "d", "e", "file1.txt"}; + private static final String TEST_KEY_NAMES = "a/b/c/d/e/file1.txt"; + + // Object names + private static final String VOL = "vol"; + private static final String VOL_TWO = "vol2"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String BUCKET_THREE = "bucket3"; + private static final String BUCKET_FOUR = "bucket4"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "////file2"; + private static final String KEY_THREE = "file3///"; + private static final String KEY_FOUR = "file4"; + private static final String KEY_FIVE = "_//////"; + private static final String KEY_EIGHT = "file8"; + private static final String KEY_NINE = "//////"; + private static final String KEY_TEN = "///__file10"; + private static final String KEY_ELEVEN = "////file11"; + private static final String MULTI_BLOCK_FILE = KEY_THREE; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long VOL_TWO_OBJECT_ID = 14L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long BUCKET_THREE_OBJECT_ID = 15L; + private static final long BUCKET_FOUR_OBJECT_ID = 16L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_EIGHT_OBJECT_ID = 17L; + private static final long KEY_NINE_OBJECT_ID = 19L; + private static final long KEY_TEN_OBJECT_ID = 20L; + private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; + + // container IDs + private static final long CONTAINER_ONE_ID = 1L; + private static final long CONTAINER_TWO_ID = 2L; + private static final long CONTAINER_THREE_ID = 3L; + private static final long CONTAINER_FOUR_ID = 4L; + private static final long CONTAINER_FIVE_ID = 5L; + private static final long CONTAINER_SIX_ID = 6L; + + // replication factors + private static final int CONTAINER_ONE_REPLICA_COUNT = 3; + private static final int CONTAINER_TWO_REPLICA_COUNT = 2; + private static final int CONTAINER_THREE_REPLICA_COUNT = 4; + private static final int CONTAINER_FOUR_REPLICA_COUNT = 5; + private static final int CONTAINER_FIVE_REPLICA_COUNT = 2; + private static final int CONTAINER_SIX_REPLICA_COUNT = 3; + + // block lengths + private static final long BLOCK_ONE_LENGTH = 1000L; + private static final long BLOCK_TWO_LENGTH = 2000L; + private static final long BLOCK_THREE_LENGTH = 3000L; + private static final long BLOCK_FOUR_LENGTH = 4000L; + private static final long BLOCK_FIVE_LENGTH = 5000L; + private static final long BLOCK_SIX_LENGTH = 6000L; + + // data size in bytes + private static final long FILE_ONE_SIZE = 500L; // bin 0 + private static final long FILE_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 + private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_FIVE_SIZE = 100L; // bin 0 + private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + + private static final long FILE1_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ONE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE2_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TWO_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE3_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_THREE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE4_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FOUR_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE5_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FIVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long FILE8_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_EIGHT_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE9_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_NINE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE10_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE11_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ELEVEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA + = FILE3_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA + + FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA + + FILE11_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1 + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3 + = FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA; + + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY + = FILE4_SIZE_WITH_REPLICA; + + // quota in bytes + private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB); + private static final long VOL_QUOTA = 2 * OzoneConsts.MB; + private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB; + private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; + private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + private static final String ROOT_PATH = "/"; + private static final String VOL_PATH = ROOT_PATH + VOL; + private static final String VOL_TWO_PATH = ROOT_PATH + VOL_TWO; + private static final String BUCKET_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; + private static final String BUCKET_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; + private static final String BUCKET_THREE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; + private static final String BUCKET_FOUR_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; + private static final String KEY_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; + private static final String KEY_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO; + private static final String KEY_THREE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String KEY_FOUR_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String KEY_FIVE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE; + private static final String KEY_EIGHT_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT; + private static final String KEY_NINE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE; + private static final String KEY_TEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN; + private static final String KEY_ELEVEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN; + private static final String KEY4_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String MULTI_BLOCK_KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String INVALID_PATH = "/vol/path/not/found"; + + // some expected answers + private static final long ROOT_DATA_SIZE = + FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + + FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + + FILE_ELEVEN_SIZE; + private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + private static final long VOL_TWO_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; + + private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE + + FILE_TWO_SIZE + + FILE_THREE_SIZE; + + private static final long BUCKET_TWO_DATA_SIZE = + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + private static final long BUCKET_THREE_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE; + + private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE; + + + @BeforeEach + public void setUp() throws Exception { + conf = new OzoneConfiguration(); + // By setting this config our Legacy buckets will behave like OBS buckets. + conf.set(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); + OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve( + "JunitOmDBDir")).toFile(), conf); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve( + "omMetadatDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .addBinding(OzoneStorageContainerManager.class, + getMockReconSCM()) + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(NSSummaryEndpoint.class) + .build(); + ReconNamespaceSummaryManager reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithOBS nsSummaryTaskWithOBS = + new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy = + new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + commonUtils = new CommonUtils(); + } + + @Test + public void testUtility() { + String[] names = EntityHandler.parseRequestPath(TEST_PATH_UTILITY); + assertArrayEquals(TEST_NAMES, names); + String keyName = BucketHandler.getKeyName(names); + assertEquals(TEST_KEY_NAMES, keyName); + String subpath = BucketHandler.buildSubpath(PARENT_DIR, "file1.txt"); + assertEquals(TEST_PATH_UTILITY, subpath); + } + + @Test + public void testGetBasicInfoRoot() throws Exception { + // Test root basics + Response rootResponse = nsSummaryEndpoint.getBasicInfo(ROOT_PATH); + NamespaceSummaryResponse rootResponseObj = + (NamespaceSummaryResponse) rootResponse.getEntity(); + assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); + assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); + assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey()); + } + + @Test + public void testGetBasicInfoVol() throws Exception { + // Test volume basics + Response volResponse = nsSummaryEndpoint.getBasicInfo(VOL_PATH); + NamespaceSummaryResponse volResponseObj = + (NamespaceSummaryResponse) volResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volResponseObj.getEntityType()); + assertEquals(2, volResponseObj.getCountStats().getNumBucket()); + assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL, volResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, volResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoVolTwo() throws Exception { + // Test volume 2's basics + Response volTwoResponse = nsSummaryEndpoint.getBasicInfo(VOL_TWO_PATH); + NamespaceSummaryResponse volTwoResponseObj = + (NamespaceSummaryResponse) volTwoResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volTwoResponseObj.getEntityType()); + assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket()); + assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL_TWO, volTwoResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, + volTwoResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volTwoResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoBucketOne() throws Exception { + // Test bucket 1's basics + Response bucketOneResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_ONE_PATH); + NamespaceSummaryResponse bucketOneObj = + (NamespaceSummaryResponse) bucketOneResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); + assertEquals(3, bucketOneObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_ONE, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketTwo() throws Exception { + // Test bucket 2's basics + Response bucketTwoResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_TWO_PATH); + NamespaceSummaryResponse bucketTwoObj = + (NamespaceSummaryResponse) bucketTwoResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketTwoObj.getEntityType()); + assertEquals(2, bucketTwoObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_TWO, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketThree() throws Exception { + // Test bucket 3's basics + Response bucketThreeResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_THREE_PATH); + NamespaceSummaryResponse bucketThreeObj = (NamespaceSummaryResponse) + bucketThreeResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketThreeObj.getEntityType()); + assertEquals(3, bucketThreeObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_THREE, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketFour() throws Exception { + // Test bucket 4's basics + Response bucketFourResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_FOUR_PATH); + NamespaceSummaryResponse bucketFourObj = + (NamespaceSummaryResponse) bucketFourResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketFourObj.getEntityType()); + assertEquals(1, bucketFourObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_FOUR, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoNoPath() throws Exception { + // Test invalid path + commonUtils.testNSSummaryBasicInfoNoPath(nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoKey() throws Exception { + // Test key + commonUtils.testNSSummaryBasicInfoKey(nsSummaryEndpoint); + } + + @Test + public void testDiskUsageRoot() throws Exception { + // root level DU + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, false); + DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); + assertEquals(2, duRootRes.getCount()); + List duRootData = duRootRes.getDuData(); + // sort based on subpath + Collections.sort(duRootData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duVol1 = duRootData.get(0); + DUResponse.DiskUsage duVol2 = duRootData.get(1); + assertEquals(VOL_PATH, duVol1.getSubpath()); + assertEquals(VOL_TWO_PATH, duVol2.getSubpath()); + assertEquals(VOL_DATA_SIZE, duVol1.getSize()); + assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize()); + } + + @Test + public void testDiskUsageVolume() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket1 = duData.get(0); + DUResponse.DiskUsage duBucket2 = duData.get(1); + assertEquals(BUCKET_ONE_PATH, duBucket1.getSubpath()); + assertEquals(BUCKET_TWO_PATH, duBucket2.getSubpath()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucket1.getSize()); + assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize()); + } + + @Test + public void testDiskUsageVolTwo() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket3 = duData.get(0); + DUResponse.DiskUsage duBucket4 = duData.get(1); + assertEquals(BUCKET_THREE_PATH, duBucket3.getSubpath()); + assertEquals(BUCKET_FOUR_PATH, duBucket4.getSubpath()); + assertEquals(VOL_TWO_DATA_SIZE, duVolRes.getSize()); + } + + @Test + public void testDiskUsageBucketOne() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_ONE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketTwo() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_TWO_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(2, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_TWO_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketThree() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this Legacy bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_THREE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_THREE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageKey1() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey2() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey4() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + true, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey5() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey8() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey11() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageUnknown() throws Exception { + // invalid path check + Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, + false, false); + DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidObj.getStatus()); + } + + @Test + public void testDiskUsageWithReplication() throws Exception { + setUpMultiBlockKey(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderRootWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + // withReplica is true + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + + } + + @Test + public void testDataSizeUnderVolWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketOneWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketThreeWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderKeyWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testQuotaUsage() throws Exception { + // root level quota usage + Response rootResponse = nsSummaryEndpoint.getQuotaUsage(ROOT_PATH); + QuotaUsageResponse quRootRes = + (QuotaUsageResponse) rootResponse.getEntity(); + assertEquals(ROOT_QUOTA, quRootRes.getQuota()); + assertEquals(ROOT_DATA_SIZE, quRootRes.getQuotaUsed()); + + // volume level quota usage + Response volResponse = nsSummaryEndpoint.getQuotaUsage(VOL_PATH); + QuotaUsageResponse quVolRes = (QuotaUsageResponse) volResponse.getEntity(); + assertEquals(VOL_QUOTA, quVolRes.getQuota()); + assertEquals(VOL_DATA_SIZE, quVolRes.getQuotaUsed()); + + // bucket level quota usage + Response bucketRes = nsSummaryEndpoint.getQuotaUsage(BUCKET_ONE_PATH); + QuotaUsageResponse quBucketRes = (QuotaUsageResponse) bucketRes.getEntity(); + assertEquals(BUCKET_ONE_QUOTA, quBucketRes.getQuota()); + assertEquals(BUCKET_ONE_DATA_SIZE, quBucketRes.getQuotaUsed()); + + Response bucketRes2 = nsSummaryEndpoint.getQuotaUsage(BUCKET_TWO_PATH); + QuotaUsageResponse quBucketRes2 = + (QuotaUsageResponse) bucketRes2.getEntity(); + assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); + assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + + Response bucketRes3 = nsSummaryEndpoint.getQuotaUsage(BUCKET_THREE_PATH); + QuotaUsageResponse quBucketRes3 = + (QuotaUsageResponse) bucketRes3.getEntity(); + assertEquals(BUCKET_THREE_QUOTA, quBucketRes3.getQuota()); + assertEquals(BUCKET_THREE_DATA_SIZE, quBucketRes3.getQuotaUsed()); + + Response bucketRes4 = nsSummaryEndpoint.getQuotaUsage(BUCKET_FOUR_PATH); + QuotaUsageResponse quBucketRes4 = + (QuotaUsageResponse) bucketRes4.getEntity(); + assertEquals(BUCKET_FOUR_QUOTA, quBucketRes4.getQuota()); + assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed()); + + // other level not applicable + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY4_PATH); + QuotaUsageResponse quotaUsageResponse2 = + (QuotaUsageResponse) naResponse2.getEntity(); + assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, + quotaUsageResponse2.getResponseCode()); + + // invalid path request + Response invalidRes = nsSummaryEndpoint.getQuotaUsage(INVALID_PATH); + QuotaUsageResponse invalidResObj = + (QuotaUsageResponse) invalidRes.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidResObj.getResponseCode()); + } + + + @Test + public void testFileSizeDist() throws Exception { + checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1); + checkFileSizeDist(VOL_PATH, 2, 1, 1, 1); + checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); + } + + public void checkFileSizeDist(String path, int bin0, + int bin1, int bin2, int bin3) throws Exception { + Response res = nsSummaryEndpoint.getFileSizeDistribution(path); + FileSizeDistributionResponse fileSizeDistResObj = + (FileSizeDistributionResponse) res.getEntity(); + int[] fileSizeDist = fileSizeDistResObj.getFileSizeDist(); + assertEquals(bin0, fileSizeDist[0]); + assertEquals(bin1, fileSizeDist[1]); + assertEquals(bin2, fileSizeDist[2]); + assertEquals(bin3, fileSizeDist[3]); + for (int i = 4; i < ReconConstants.NUM_OF_FILE_SIZE_BINS; ++i) { + assertEquals(0, fileSizeDist[i]); + } + } + + @Test + public void testNormalizePathUptoBucket() { + // Test null or empty path + assertEquals("/", OmUtils.normalizePathUptoBucket(null)); + assertEquals("/", OmUtils.normalizePathUptoBucket("")); + + // Test path with leading slashes + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("///volume1/bucket1/key1/key2")); + + // Test volume and bucket names + assertEquals("volume1/bucket1", + OmUtils.normalizePathUptoBucket("volume1/bucket1")); + + // Test with additional segments + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1/key2")); + + // Test path with multiple slashes in key names. + assertEquals("volume1/bucket1/key1//key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1//key2")); + + // Test path with volume, bucket, and special characters in keys + assertEquals("volume/bucket/key$%#1/./////////key$%#2", + OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); + } + + + /** + * Testing the following case. + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 (Legacy) + * └── file11 + * + * Write these keys to OM and + * replicate them. + * @throws Exception + */ + @SuppressWarnings("checkstyle:MethodLength") + private void populateOMDB() throws Exception { + + // write all keys + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_ONE_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_TWO_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_THREE_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FOUR_SIZE, + getOBSBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FIVE_SIZE, + getOBSBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_EIGHT_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_NINE_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_TEN_SIZE, + getLegacyBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_ELEVEN_SIZE, + getLegacyBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir, OzoneConfiguration omConfiguration) + throws IOException { + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_QUOTA) + .build(); + + String volume2Key = omMetadataManager.getVolumeKey(VOL_TWO); + OmVolumeArgs args2 = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_TWO_OBJECT_ID) + .setVolume(VOL_TWO) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_TWO_QUOTA) + .build(); + + omMetadataManager.getVolumeTable().put(volumeKey, args); + omMetadataManager.getVolumeTable().put(volume2Key, args2); + + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setQuotaInBytes(BUCKET_ONE_QUOTA) + .setBucketLayout(getOBSBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setQuotaInBytes(BUCKET_TWO_QUOTA) + .setBucketLayout(getOBSBucketLayout()) + .build(); + + OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(BUCKET_THREE_OBJECT_ID) + .setQuotaInBytes(BUCKET_THREE_QUOTA) + .setBucketLayout(getLegacyBucketLayout()) + .build(); + + OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(BUCKET_FOUR_OBJECT_ID) + .setQuotaInBytes(BUCKET_FOUR_QUOTA) + .setBucketLayout(getLegacyBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo.getVolumeName(), bucketInfo.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + String bucketKey3 = omMetadataManager.getBucketKey( + bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); + String bucketKey4 = omMetadataManager.getBucketKey( + bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); + omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + + return omMetadataManager; + } + + private void setUpMultiBlockKey() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup = + getLocationInfoGroup1(); + + // add the multi-block key to Recon's OM + writeKeyToOm(reconOMMetadataManager, + MULTI_BLOCK_FILE, + BUCKET_ONE, + VOL, + MULTI_BLOCK_FILE, + MULTI_BLOCK_KEY_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup), + getOBSBucketLayout(), + FILE_THREE_SIZE); + } + + private OmKeyLocationInfoGroup getLocationInfoGroup1() { + List locationInfoList = new ArrayList<>(); + BlockID block1 = new BlockID(CONTAINER_ONE_ID, 0L); + BlockID block2 = new BlockID(CONTAINER_TWO_ID, 0L); + BlockID block3 = new BlockID(CONTAINER_THREE_ID, 0L); + + OmKeyLocationInfo location1 = new OmKeyLocationInfo.Builder() + .setBlockID(block1) + .setLength(BLOCK_ONE_LENGTH) + .build(); + OmKeyLocationInfo location2 = new OmKeyLocationInfo.Builder() + .setBlockID(block2) + .setLength(BLOCK_TWO_LENGTH) + .build(); + OmKeyLocationInfo location3 = new OmKeyLocationInfo.Builder() + .setBlockID(block3) + .setLength(BLOCK_THREE_LENGTH) + .build(); + locationInfoList.add(location1); + locationInfoList.add(location2); + locationInfoList.add(location3); + + return new OmKeyLocationInfoGroup(0L, locationInfoList); + } + + + private OmKeyLocationInfoGroup getLocationInfoGroup2() { + List locationInfoList = new ArrayList<>(); + BlockID block4 = new BlockID(CONTAINER_FOUR_ID, 0L); + BlockID block5 = new BlockID(CONTAINER_FIVE_ID, 0L); + BlockID block6 = new BlockID(CONTAINER_SIX_ID, 0L); + + OmKeyLocationInfo location4 = new OmKeyLocationInfo.Builder() + .setBlockID(block4) + .setLength(BLOCK_FOUR_LENGTH) + .build(); + OmKeyLocationInfo location5 = new OmKeyLocationInfo.Builder() + .setBlockID(block5) + .setLength(BLOCK_FIVE_LENGTH) + .build(); + OmKeyLocationInfo location6 = new OmKeyLocationInfo.Builder() + .setBlockID(block6) + .setLength(BLOCK_SIX_LENGTH) + .build(); + locationInfoList.add(location4); + locationInfoList.add(location5); + locationInfoList.add(location6); + return new OmKeyLocationInfoGroup(0L, locationInfoList); + + } + + @SuppressWarnings("checkstyle:MethodLength") + private void setUpMultiBlockReplicatedKeys() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup1 = + getLocationInfoGroup1(); + OmKeyLocationInfoGroup locationInfoGroup2 = + getLocationInfoGroup2(); + + //vol/bucket1/file1 + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_ONE_SIZE); + + //vol/bucket1/file2 + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getOBSBucketLayout(), + FILE_TWO_SIZE); + + //vol/bucket1/file3 + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_THREE_SIZE); + + //vol/bucket2/file4 + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getOBSBucketLayout(), + FILE_FOUR_SIZE); + + //vol/bucket2/file5 + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getOBSBucketLayout(), + FILE_FIVE_SIZE); + + //vol2/bucket3/file8 + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getLegacyBucketLayout(), + FILE_EIGHT_SIZE); + + //vol2/bucket3/file9 + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getLegacyBucketLayout(), + FILE_NINE_SIZE); + + //vol2/bucket3/file10 + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getLegacyBucketLayout(), + FILE_TEN_SIZE); + + //vol2/bucket4/file11 + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getLegacyBucketLayout(), + FILE_ELEVEN_SIZE); + } + + /** + * Generate a set of mock container replica with a size of + * replication factor for container. + * + * @param replicationFactor number of replica + * @param containerID the container replicated based upon + * @return a set of container replica for testing + */ + private static Set generateMockContainerReplicas( + int replicationFactor, ContainerID containerID) { + Set result = new HashSet<>(); + for (int i = 0; i < replicationFactor; ++i) { + DatanodeDetails randomDatanode = randomDatanodeDetails(); + ContainerReplica replica = new ContainerReplica.ContainerReplicaBuilder() + .setContainerID(containerID) + .setContainerState( + StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN) + .setDatanodeDetails(randomDatanode) + .build(); + result.add(replica); + } + return result; + } + + private static ReconStorageContainerManagerFacade getMockReconSCM() + throws ContainerNotFoundException { + ReconStorageContainerManagerFacade reconSCM = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManager = mock(ContainerManager.class); + + // Container 1 is 3-way replicated + ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID); + Set containerReplicas1 = generateMockContainerReplicas( + CONTAINER_ONE_REPLICA_COUNT, containerID1); + when(containerManager.getContainerReplicas(containerID1)) + .thenReturn(containerReplicas1); + + // Container 2 is under replicated with 2 replica + ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID); + Set containerReplicas2 = generateMockContainerReplicas( + CONTAINER_TWO_REPLICA_COUNT, containerID2); + when(containerManager.getContainerReplicas(containerID2)) + .thenReturn(containerReplicas2); + + // Container 3 is over replicated with 4 replica + ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID); + Set containerReplicas3 = generateMockContainerReplicas( + CONTAINER_THREE_REPLICA_COUNT, containerID3); + when(containerManager.getContainerReplicas(containerID3)) + .thenReturn(containerReplicas3); + + // Container 4 is replicated with 5 replica + ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID); + Set containerReplicas4 = generateMockContainerReplicas( + CONTAINER_FOUR_REPLICA_COUNT, containerID4); + when(containerManager.getContainerReplicas(containerID4)) + .thenReturn(containerReplicas4); + + // Container 5 is replicated with 2 replica + ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID); + Set containerReplicas5 = generateMockContainerReplicas( + CONTAINER_FIVE_REPLICA_COUNT, containerID5); + when(containerManager.getContainerReplicas(containerID5)) + .thenReturn(containerReplicas5); + + // Container 6 is replicated with 3 replica + ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID); + Set containerReplicas6 = generateMockContainerReplicas( + CONTAINER_SIX_REPLICA_COUNT, containerID6); + when(containerManager.getContainerReplicas(containerID6)) + .thenReturn(containerReplicas6); + + when(reconSCM.getContainerManager()).thenReturn(containerManager); + ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class); + when(mockReconNodeManager.getStats()).thenReturn(getMockSCMRootStat()); + when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager); + return reconSCM; + } + + private static BucketLayout getOBSBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + + private static BucketLayout getLegacyBucketLayout() { + return BucketLayout.LEGACY; + } + + private static SCMNodeStat getMockSCMRootStat() { + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index 8a9452a86297..f64d93707a2c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -67,8 +67,8 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyString; @@ -329,16 +329,14 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto1) .addStorageReport(storageReportProto2).build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } @Test @@ -421,16 +419,14 @@ private void updateContainerReport(long containerId) { .setOriginNodeId(datanodeId) .build()) .build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } private void waitAndCheckConditionAfterHeartbeat(Callable check) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index 753804e5fab0..8b35bfdd4d2a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -38,8 +38,8 @@ import javax.ws.rs.core.Response; import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -63,10 +63,14 @@ private OmPrefixInfo getOmPrefixInfoForTest( String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - aclType, scope)), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } public void testNSSummaryBasicInfoRoot( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 371fb6f9d675..8647639dd134 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.fsck; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -38,6 +39,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.TestContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; @@ -110,38 +113,61 @@ public void testRun() throws Exception { when(scmClientMock.getContainerWithPipeline(c.getContainerID())) .thenReturn(new ContainerWithPipeline(c, null)); } + + ReplicatedReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(THREE); // Under replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) + ContainerInfo containerInfo1 = + TestContainerInfo.newBuilderForTest().setContainerID(1).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(1L))).thenReturn(containerInfo1); + when(containerManagerMock.getContainerReplicas(containerInfo1.containerID())) .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY)); // return all UNHEALTHY replicas for container ID 2 -> UNDER_REPLICATED - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) + ContainerInfo containerInfo2 = + TestContainerInfo.newBuilderForTest().setContainerID(2).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(2L))).thenReturn(containerInfo2); + when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> Missing - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) + // return 0 replicas for container ID 3 -> Empty Missing + ContainerInfo containerInfo3 = + TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); + when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); // Return 5 Healthy -> Over replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L))) + ContainerInfo containerInfo4 = + TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); + when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated + ContainerInfo containerInfo5 = + TestContainerInfo.newBuilderForTest().setContainerID(5).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(5L))).thenReturn(containerInfo5); Set misReplicas = getMockReplicas(5L, State.CLOSED, State.CLOSED, State.CLOSED); placementMock.setMisRepWhenDnPresent( misReplicas.iterator().next().getDatanodeDetails().getUuid()); - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L))) + when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); // Return 3 Healthy -> Healthy container - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L))) + ContainerInfo containerInfo6 = + TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); + when(containerManagerMock.getContainerReplicas(containerInfo6.containerID())) .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> EMPTY_MISSING - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(7L))) + // return 0 replicas for container ID 7 -> MISSING + ContainerInfo containerInfo7 = + TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); + when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); List all = unHealthyContainersTableHandle.findAll(); @@ -150,7 +176,7 @@ public void testRun() throws Exception { long currentTime = System.currentTimeMillis(); ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); - reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); when(reconContainerMetadataManager.getKeyCountForContainer( 7L)).thenReturn(5L); ContainerHealthTask containerHealthTask = @@ -215,7 +241,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 3 -> Still Missing + // return 0 replicas for container ID 3 -> Still empty Missing when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -227,7 +253,7 @@ public void testRun() throws Exception { // Was mis-replicated - make it healthy now placementMock.setMisRepWhenDnPresent(null); - LambdaTestUtils.await(6000, 1000, () -> + LambdaTestUtils.await(60000, 1000, () -> (unHealthyContainersTableHandle.count() == 4)); rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -252,6 +278,21 @@ public void testRun() throws Exception { // This container is now healthy, it should not be in the table any more assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); + + // Again make container Id 7 as empty which was missing as well, so in next + // container health task run, this container also should be deleted from + // UNHEALTHY_CONTAINERS table because we want to cleanup any existing + // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. + when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); + LambdaTestUtils.await(6000, 1000, () -> { + UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); + return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); + }); + + // Just check once again that count doesn't change, only state of + // container 7 changes from MISSING to EMPTY_MISSING + LambdaTestUtils.await(60000, 1000, () -> + (unHealthyContainersTableHandle.count() == 4)); } @Test @@ -343,6 +384,65 @@ public void testDeletedContainer() throws Exception { .isGreaterThan(currentTime); } + @Test + public void testNegativeSizeContainers() throws Exception { + // Setup mock objects and test environment + UnhealthyContainersDao unhealthyContainersDao = + getDao(UnhealthyContainersDao.class); + ContainerHealthSchemaManager containerHealthSchemaManager = + new ContainerHealthSchemaManager( + getSchemaDefinition(ContainerSchemaDefinition.class), + unhealthyContainersDao); + ReconStorageContainerManagerFacade scmMock = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManagerMock = mock(ContainerManager.class); + StorageContainerServiceProvider scmClientMock = + mock(StorageContainerServiceProvider.class); + ReconContainerMetadataManager reconContainerMetadataManager = + mock(ReconContainerMetadataManager.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); + + // Mock container info setup + List mockContainers = getMockContainers(3); + when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + when(containerManagerMock.getContainers(any(ContainerID.class), + anyInt())).thenReturn(mockContainers); + for (ContainerInfo c : mockContainers) { + when(containerManagerMock.getContainer( + c.containerID())).thenReturn(c); + when(scmClientMock.getContainerWithPipeline( + c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); + when(containerManagerMock.getContainer(c.containerID()) + .getUsedBytes()).thenReturn(Long.valueOf(-10)); + } + + // Verify the table is initially empty + assertThat(unhealthyContainersDao.findAll()).isEmpty(); + + // Setup and start the container health task + ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); + ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + ContainerHealthTask containerHealthTask = new ContainerHealthTask( + scmMock.getContainerManager(), scmMock.getScmServiceProvider(), + reconTaskStatusDao, + containerHealthSchemaManager, placementMock, reconTaskConfig, + reconContainerMetadataManager, + new OzoneConfiguration()); + containerHealthTask.start(); + + // Wait for the task to identify unhealthy containers + LambdaTestUtils.await(6000, 1000, + () -> unhealthyContainersDao.count() == 3); + + // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states + List negativeSizeContainers = + unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); + assertThat(negativeSizeContainers).hasSize(3); + } + + private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); @@ -364,9 +464,9 @@ private List getMockContainers(int num) { when(c.getContainerID()).thenReturn((long)i); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)); + THREE)); when(c.getReplicationFactor()) - .thenReturn(HddsProtos.ReplicationFactor.THREE); + .thenReturn(THREE); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(c.containerID()).thenReturn(ContainerID.valueOf(i)); containers.add(c); @@ -379,7 +479,7 @@ private ContainerInfo getMockDeletedContainer(int containerID) { when(c.getContainerID()).thenReturn((long)containerID); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)); + .getInstance(THREE)); when(c.containerID()).thenReturn(ContainerID.valueOf(containerID)); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.DELETED); return c; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index efde79f9bacb..3c572aa8e052 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -55,6 +56,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; +import org.apache.hadoop.ozone.recon.TestReconUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -165,6 +167,31 @@ public void testProcessICRStateMismatch() } } + @Test + public void testMergeMultipleICRs() { + final ContainerInfo container = TestReconUtils.getContainer(LifeCycleState.OPEN); + final DatanodeDetails datanodeOne = randomDatanodeDetails(); + final IncrementalContainerReportProto containerReport = + getIncrementalContainerReportProto(container.containerID(), + ContainerReplicaProto.State.CLOSED, + datanodeOne.getUuidString()); + final IncrementalContainerReportFromDatanode icrFromDatanode1 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + final IncrementalContainerReportFromDatanode icrFromDatanode2 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + assertEquals(1, icrFromDatanode1.getReport().getReportList().size()); + icrFromDatanode1.mergeReport(icrFromDatanode2); + assertEquals(2, icrFromDatanode1.getReport().getReportList().size()); + + final IncrementalContainerReportFromDatanode icrFromDatanode3 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + icrFromDatanode1.mergeReport(icrFromDatanode3); + assertEquals(3, icrFromDatanode1.getReport().getReportList().size()); + } + private LifeCycleState getContainerStateFromReplicaState( State state) { switch (state) { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index d15cd6142d3c..032bff80ade3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -297,8 +297,8 @@ public void testGetAndApplyDeltaUpdatesFromOM( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(4.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(1, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(1, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -372,8 +372,8 @@ public void testGetAndApplyDeltaUpdatesFromOMWithLimit( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(1.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(3, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(3, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -417,7 +417,7 @@ public void testSyncDataFromOMFullSnapshot( reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); // Should trigger full snapshot request. ozoneManagerServiceProvider.syncDataFromOM(); @@ -429,7 +429,7 @@ public void testSyncDataFromOMFullSnapshot( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } @Test @@ -470,7 +470,7 @@ public void testSyncDataFromOMDeltaUpdates( verify(reconTaskControllerMock, times(1)) .consumeOMEvents(any(OMUpdateEventBatch.class), any(OMMetadataManager.class)); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); } @Test @@ -509,7 +509,7 @@ public void testSyncDataFromOMFullSnapshotForSNNFE( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } private ReconTaskController getMockTaskController() { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index eff330a796c9..a996f167a1bb 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.recon.tasks; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.BDDMockito.given; @@ -84,18 +89,21 @@ public void setUp() { @Test public void testProcess() { // mock a container with invalid used bytes - final ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); + ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); given(omContainerInfo0.containerID()).willReturn(new ContainerID(0)); given(omContainerInfo0.getUsedBytes()).willReturn(-1L); + given(omContainerInfo0.getState()).willReturn(OPEN); // Write 2 keys ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(CLOSED); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSING); // mock getContainers method to return a list of containers List containers = new ArrayList<>(); @@ -105,8 +113,8 @@ public void testProcess() { task.process(containers); - // Verify 2 containers are in correct bins. - assertEquals(2, containerCountBySizeDao.count()); + // Verify 3 containers are in correct bins. + assertEquals(3, containerCountBySizeDao.count()); // container size upper bound for // 1500000000L (1.5GB) is 2147483648L = 2^31 = 2GB (next highest power of 2) @@ -124,10 +132,11 @@ public void testProcess() { containerCountBySizeDao.findById(recordToFind.value1()).getCount() .longValue()); - // Add a new key + // Add a new container ContainerInfo omContainerInfo3 = mock(ContainerInfo.class); given(omContainerInfo3.containerID()).willReturn(new ContainerID(3)); given(omContainerInfo3.getUsedBytes()).willReturn(1000000000L); // 1GB + given(omContainerInfo3.getState()).willReturn(QUASI_CLOSED); containers.add(omContainerInfo3); // Update existing key. @@ -137,7 +146,7 @@ public void testProcess() { task.process(containers); // Total size groups added to the database - assertEquals(4, containerCountBySizeDao.count()); + assertEquals(5, containerCountBySizeDao.count()); // Check whether container size upper bound for // 50000L is 536870912L = 2^29 = 512MB (next highest power of 2) @@ -164,4 +173,59 @@ public void testProcess() { .getCount() .longValue()); } + + @Test + public void testProcessDeletedAndNegativeSizedContainers() { + // Create a list of containers, including one that is deleted + ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); + given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); + given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(OPEN); + + ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); + given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); + given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSED); + + ContainerInfo omContainerInfoDeleted = mock(ContainerInfo.class); + given(omContainerInfoDeleted.containerID()).willReturn(new ContainerID(3)); + given(omContainerInfoDeleted.getUsedBytes()).willReturn(1000000000L); + given(omContainerInfoDeleted.getState()).willReturn(DELETED); // 1GB + + // Create a mock container with negative size + final ContainerInfo negativeSizeContainer = mock(ContainerInfo.class); + given(negativeSizeContainer.containerID()).willReturn(new ContainerID(0)); + given(negativeSizeContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeContainer.getState()).willReturn(OPEN); + + // Create a mock container with negative size and DELETE state + final ContainerInfo negativeSizeDeletedContainer = + mock(ContainerInfo.class); + given(negativeSizeDeletedContainer.containerID()).willReturn( + new ContainerID(0)); + given(negativeSizeDeletedContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeDeletedContainer.getState()).willReturn(DELETED); + + // Create a mock container with id 1 and updated size of 1GB from 1.5GB + final ContainerInfo validSizeContainer = mock(ContainerInfo.class); + given(validSizeContainer.containerID()).willReturn(new ContainerID(1)); + given(validSizeContainer.getUsedBytes()).willReturn(1000000000L); // 1GB + given(validSizeContainer.getState()).willReturn(CLOSED); + + // Mock getContainers method to return a list of containers including + // both valid and invalid ones + List containers = new ArrayList<>(); + containers.add(omContainerInfo1); + containers.add(omContainerInfo2); + containers.add(omContainerInfoDeleted); + containers.add(negativeSizeContainer); + containers.add(negativeSizeDeletedContainer); + containers.add(validSizeContainer); + + task.process(containers); + + // Verify that only the valid containers are counted + assertEquals(3, containerCountBySizeDao.count()); + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java index 6992c3100fb9..485804240d52 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java @@ -166,7 +166,7 @@ public void setUp() throws Exception { reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); assertNotNull(nsSummaryForBucket1); assertNotNull(nsSummaryForBucket2); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } @Test @@ -233,7 +233,7 @@ public void setUp() throws IOException { assertNotNull(nsSummaryForBucket2); nsSummaryForBucket3 = reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } private OMUpdateEventBatch processEventBatch() throws IOException { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java new file mode 100644 index 000000000000..db4803676390 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java @@ -0,0 +1,554 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout. + */ +public final class TestNSSummaryTaskWithLegacyOBSLayout { + + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static OzoneConfiguration ozoneConfiguration; + private static NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy; + + private static OMMetadataManager omMetadataManager; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "/////key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithLegacyOBSLayout() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + false); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.LEGACY; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..8f9d6b2990a5 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -0,0 +1,548 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Unit test for NSSummaryTaskWithOBS. + */ +public final class TestNSSummaryTaskWithOBS implements Serializable { + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static OMMetadataManager omMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithOBS nSSummaryTaskWithOBS; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithOBS() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, omConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithOBS reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithOBS process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + nSSummaryTaskWithOBS.processWithOBS(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index df014f4276fa..56d8fe213152 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,20 +21,28 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; - +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -44,18 +52,20 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -66,29 +76,83 @@ import static org.mockito.Mockito.when; /** - * Unit test for Object Count Task. + * This test class is designed for the OM Table Insight Task. It conducts tests + * for tables that require both Size and Count, as well as for those that only + * require Count. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private GlobalStatsDao globalStatsDao; - private OmTableInsightTask omTableInsightTask; - private DSLContext dslContext; + private static GlobalStatsDao globalStatsDao; + private static OmTableInsightTask omTableInsightTask; + private static DSLContext dslContext; private boolean isSetupDone = false; - private ReconOMMetadataManager reconOMMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; + private static OzoneConfiguration ozoneConfiguration; + private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; + + // Object names in FSO-enabled format + private static final String VOL = "volume1"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "dir1/dir2/file3"; + private static final String FILE_ONE = "file1"; + private static final String FILE_TWO = "file2"; + private static final String FILE_THREE = "file3"; + private static final String DIR_ONE = "dir1"; + private static final String DIR_TWO = "dir2"; + private static final String DIR_THREE = "dir3"; + + + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long DIR_ONE_OBJECT_ID = 14L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long DIR_TWO_OBJECT_ID = 17L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long DIR_THREE_OBJECT_ID = 10L; + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_SIZE = 1025L; + private static final long KEY_THREE_SIZE = 2000L; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + + @Mock + private Table nsSummaryTable; public TestOmTableInsightTask() { super(); } private void initializeInjector() throws IOException { + ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withContainerDB() + .build(); + reconNamespaceSummaryManager = reconTestInjector.getInstance( + ReconNamespaceSummaryManagerImpl.class); + omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); + nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( + reconNamespaceSummaryManager, reconOMMetadataManager, + ozoneConfiguration); dslContext = getDslContext(); } @@ -99,10 +163,182 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } + MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } + /** + * Populate OM-DB with the following structure. + * volume1 + * | \ + * bucket1 bucket2 + * / \ \ + * dir1 dir2 dir3 + * / \ \ + * file1 file2 file3 + * + * @throws IOException + */ + private void populateOMDB() throws IOException { + + // Create 2 Buckets bucket1 and bucket2 + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + String bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .build(); + bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); + + // Create a single volume named volume1 + String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + + // Generate keys for the File Table + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + DIR_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Generate Deleted Directories in OM + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_ONE, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_ONE_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_TWO, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_TWO_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_TWO, + VOL, + DIR_THREE, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + DIR_THREE_OBJECT_ID); + } + + @Test + public void testReprocessForDeletedDirectory() throws Exception { + // Create keys and deleted directories + populateOMDB(); + + // Generate NamespaceSummary for the OM DB + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + + @Test + public void testProcessForDeletedDirectoryTable() throws IOException { + // Prepare mock data size + Long expectedSize1 = 1000L; + Long expectedSize2 = 2000L; + NSSummary nsSummary1 = new NSSummary(); + NSSummary nsSummary2 = new NSSummary(); + nsSummary1.setSizeOfFiles(expectedSize1); + nsSummary2.setSizeOfFiles(expectedSize2); + when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); + + /* DB key in DeletedDirectoryTable => + "/volumeId/bucketId/parentId/dirName/dirObjectId" */ + List paths = Arrays.asList( + "/18/28/22/dir1/1", + "/18/26/23/dir1/2", + "/18/20/24/dir1/3", + "/18/21/25/dir1/4", + "/18/27/26/dir1/5" + ); + + // Testing PUT events + // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths + ArrayList putEvents = new ArrayList<>(); + for (long i = 0L; i < 5L; i++) { + putEvents.add(getOMUpdateEvent(paths.get((int) i), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), + DELETED_DIR_TABLE, PUT, null)); + } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + omTableInsightTask.process(putEventBatch); + assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); + + + // Testing DELETE events + // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths + ArrayList deleteEvents = new ArrayList<>(); + deleteEvents.add(getOMUpdateEvent(paths.get(0), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, + DELETE, null)); + deleteEvents.add(getOMUpdateEvent(paths.get(2), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, + DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + omTableInsightTask.process(deleteEventBatch); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -110,27 +346,32 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = mock(TypedTable - .TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = + mock(TypedTable.TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(false); + when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); + TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + + if (tableName.equals(DELETED_TABLE)) { + RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); + when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); + when(keyInfo.getOmKeyInfoList()).thenReturn( + Arrays.asList(mock(OmKeyInfo.class))); + when(mockKeyValue.getValue()).thenReturn(keyInfo); + } else { + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + } + when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertTrue(result.getRight()); assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -138,7 +379,6 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } - @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -203,44 +443,73 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - ArrayList events = new ArrayList<>(); - // Create 5 put, 1 delete and 1 update event for each table + List initialEvents = new ArrayList<>(); + + // Creating events for each table except the deleted table for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; // Skipping deleted table as it has a separate test + } + + // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); + initialEvents.add( + getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, + null)); } - // for delete event, if value is set to null, the counter will not be - // decremented. This is because the value will be null if item does not - // exist in the database and there is no need to delete. - events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, - DELETE, null)); - events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); + + // Adding 1 DELETE event where value is null, indicating non-existence + // in the database. + initialEvents.add( + getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, + null)); + // Adding 1 UPDATE event. This should not affect the count. + initialEvents.add( + getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, + mock(OmKeyInfo.class))); } - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); - omTableInsightTask.process(omUpdateEventBatch); - // Verify 4 items in each table. (5 puts - 1 delete + 0 update) - assertEquals(4L, getCountForTable(KEY_TABLE)); - assertEquals(4L, getCountForTable(VOLUME_TABLE)); - assertEquals(4L, getCountForTable(BUCKET_TABLE)); - assertEquals(4L, getCountForTable(FILE_TABLE)); + // Processing the initial batch of events + OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); + omTableInsightTask.process(initialBatch); - // add a new key and simulate delete on non-existing item (value: null) - ArrayList newEvents = new ArrayList<>(); + // Verifying the count in each table for (String tableName : omTableInsightTask.getTaskTables()) { - newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); - // This delete event should be a noop since value is null - newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); + if (tableName.equals(DELETED_TABLE)) { + continue; + } + assertEquals(4L, getCountForTable( + tableName)); // 4 items expected after processing (5 puts - 1 delete) } - omUpdateEventBatch = new OMUpdateEventBatch(newEvents); - omTableInsightTask.process(omUpdateEventBatch); + List additionalEvents = new ArrayList<>(); + // Simulating new PUT and DELETE events + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // Adding 1 new PUT event + additionalEvents.add( + getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, + null)); + // Attempting to delete a non-existing item (value: null) + additionalEvents.add( + getOMUpdateEvent("item0", null, tableName, DELETE, null)); + } - // Verify 5 items in each table. (1 new put + 0 delete) - assertEquals(5L, getCountForTable(KEY_TABLE)); - assertEquals(5L, getCountForTable(VOLUME_TABLE)); - assertEquals(5L, getCountForTable(BUCKET_TABLE)); - assertEquals(5L, getCountForTable(FILE_TABLE)); + // Processing the additional events + OMUpdateEventBatch additionalBatch = + new OMUpdateEventBatch(additionalEvents); + omTableInsightTask.process(additionalBatch); + // Verifying the final count in each table + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // 5 items expected after processing the additional events. + assertEquals(5L, getCountForTable( + tableName)); + } } @Test @@ -251,35 +520,38 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events + // Test PUT events. + // Add 5 PUT events for OpenKeyTable and OpenFileTable. ArrayList putEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - for (int i = 0; i < 5; i++) { - putEvents.add( - getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); - } + for (int i = 0; i < 10; i++) { + String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; + putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // After 5 PUTs, size should be 5 * 1000 = 5000 + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - // Delete "item0" - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); - } + // Delete "item0" for OpenKeyTable and OpenFileTable. + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -287,7 +559,8 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -295,12 +568,14 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -313,9 +588,10 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (int i = 0; i < 5; i++) { + for (long i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, + true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -353,38 +629,14 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); - - - // Test UPDATE events - ArrayList updateEvents = new ArrayList<>(); - // Update "item1" with new sizes - ImmutablePair newSizesToBeReturned = - new ImmutablePair<>(500L, 1500L); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); - when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); - when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( - omKeyInfoList.subList(1, 5)); - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); - // For item1, newSize=500 and totalCount of deleted keys should be 4 - updateEvents.add( - getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, - repeatedOmKeyInfo)); - omTableInsightTask.process(updateEventBatch); - // Since one key has been deleted, total deleted keys should be 19 - assertEquals(19L, getCountForTable(DELETED_TABLE)); - // After updating "item1", size should be 4000 - 1000 + 500 = 3500 - // presentValue - oldValue + newValue = updatedValue - assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); - assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } - private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMUpdateEventBuilder() + return new OMDBUpdateEvent.OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -409,7 +661,8 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, boolean isFile) { + String keyName, Long objectID, + boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -418,6 +671,7 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) + .setObjectID(objectID) .build(); } } diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index f875047d04a2..5956e92476a8 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -99,6 +99,12 @@ io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc @@ -159,6 +165,11 @@ hdds-test-utils test + + org.mockito + mockito-inline + test + diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java index 47b59cfcc0e8..8ae48ca4f83e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java @@ -21,6 +21,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.apache.hadoop.ozone.OzoneConsts; + import java.time.Instant; /** @@ -37,7 +39,7 @@ public class KeyMetadata { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; @XmlElement(name = "Size") diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index b8cd56d5f954..b7a5af73403e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -63,7 +63,7 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.BitSet; +import java.util.EnumSet; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -665,14 +665,11 @@ private List getAndConvertAclOnBucket(String value, throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Bucket - BitSet aclsOnBucket = - S3Acl.getOzoneAclOnBucketFromS3Permission(permission); + EnumSet aclsOnBucket = S3Acl.getOzoneAclOnBucketFromS3Permission(permission); OzoneAcl defaultOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnBucket, - OzoneAcl.AclScope.DEFAULT); - OzoneAcl accessOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnBucket, - ACCESS); + IAccessAuthorizer.ACLIdentityType.USER, part[1], OzoneAcl.AclScope.DEFAULT, aclsOnBucket + ); + OzoneAcl accessOzoneAcl = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnBucket); ozoneAclList.add(defaultOzoneAcl); ozoneAclList.add(accessOzoneAcl); } @@ -699,11 +696,9 @@ private List getAndConvertAclOnVolume(String value, throw newError(NOT_IMPLEMENTED, part[0]); } // Build ACL on Volume - BitSet aclsOnVolume = + EnumSet aclsOnVolume = S3Acl.getOzoneAclOnVolumeFromS3Permission(permission); - OzoneAcl accessOzoneAcl = new OzoneAcl( - IAccessAuthorizer.ACLIdentityType.USER, part[1], aclsOnVolume, - ACCESS); + OzoneAcl accessOzoneAcl = new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, part[1], ACCESS, aclsOnVolume); ozoneAclList.add(accessOzoneAcl); } return ozoneAclList; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 72289470c2ca..af5eafc9f438 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; + import java.util.ArrayList; import java.util.List; @@ -55,7 +57,7 @@ public static class Part { @XmlElement(name = "PartNumber") private int partNumber; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public int getPartNumber() { @@ -66,12 +68,12 @@ public void setPartNumber(int partNumber) { this.partNumber = partNumber; } - public String geteTag() { + public String getETag() { return eTag; } - public void seteTag(String eTag) { - this.eTag = eTag; + public void setETag(String eTagHash) { + this.eTag = eTagHash; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java index c636f36b175b..2aa30d6b839b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java @@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; /** * Complete Multipart Upload request response. @@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse { @XmlElement(name = "Key") private String key; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public String getLocation() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java index 6e114c2e0c64..d1136fe9ed78 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -39,7 +40,7 @@ public class CopyObjectResponse { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java index c4e65aa38ff7..ab30c1f0e7c9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; import java.time.Instant; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; /** @@ -39,7 +40,7 @@ public class CopyPartResult { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public CopyPartResult() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 5694d6f9f41b..5810c4ec2a2f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; @@ -74,8 +75,6 @@ */ public abstract class EndpointBase implements Auditor { - protected static final String ETAG = "ETag"; - protected static final String ETAG_CUSTOM = "etag-custom"; @Inject diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java index fc9da14133c8..8f3fad735441 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -154,7 +155,7 @@ public static class Part { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1e247c8eb858..26e51a6d6661 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -101,12 +101,13 @@ import java.util.OptionalLong; import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; +import static javax.ws.rs.core.HttpHeaders.ETAG; import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -125,6 +126,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; @@ -134,6 +136,7 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode; /** @@ -150,7 +153,7 @@ public class ObjectEndpoint extends EndpointBase { static { E_TAG_PROVIDER = ThreadLocal.withInitial(() -> { try { - return MessageDigest.getInstance("Md5"); + return MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } @@ -195,8 +198,8 @@ public void init() { OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); datastreamEnabled = ozoneConfiguration.getBoolean( - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); datastreamMinLength = (long) ozoneConfiguration.getStorageSize( OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); @@ -216,13 +219,14 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, - InputStream body) throws IOException, OS3Exception { + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); String copyHeader = null, storageType = null; + DigestInputStream digestInputStream = null; try { OzoneVolume volume = getVolume(); if (uploadID != null && !uploadID.equals("")) { @@ -272,7 +276,9 @@ public Response put( boolean hasAmzDecodedLengthZero = amzDecodedLength != null && Long.parseLong(amzDecodedLength) == 0; if (canCreateDirectory && - (length == 0 || hasAmzDecodedLengthZero)) { + (length == 0 || hasAmzDecodedLengthZero) && + StringUtils.endsWith(keyPath, "/") + ) { s3GAction = S3GAction.CREATE_DIRECTORY; getClientProtocol() .createDirectory(volume.getName(), bucketName, keyPath); @@ -296,11 +302,11 @@ public Response put( if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong(amzDecodedLength); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } long putLength; @@ -309,7 +315,7 @@ public Response put( perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, (DigestInputStream) body, perf); + customMetadata, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { @@ -319,9 +325,9 @@ public Response put( long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(body, output); + putLength = IOUtils.copyLarge(digestInputStream, output); eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase(); output.getMetadata().put(ETAG, eTag); } @@ -366,6 +372,11 @@ public Response put( } throw ex; } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } if (auditSuccess) { long opLatencyNs = getMetrics().updateCreateKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); @@ -482,9 +493,12 @@ public Response get( responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal); } responseBuilder - .header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))) .header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT); + if (keyDetails.getMetadata().get(ETAG) != null) { + responseBuilder.header(ETAG, wrapInQuotes(keyDetails.getMetadata().get(ETAG))); + } + // if multiple query parameters having same name, // Only the first parameters will be recognized // eg: @@ -590,9 +604,16 @@ public Response head( } ResponseBuilder response = Response.ok().status(HttpStatus.SC_OK) - .header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))) .header("Content-Length", key.getDataSize()) .header("Content-Type", "binary/octet-stream"); + + if (key.getMetadata().get(ETAG) != null) { + // Should not return ETag header if the ETag is not set + // doing so will result in "null" string being returned instead + // which breaks some AWS SDK implementation + response.header(ETAG, "" + wrapInQuotes(key.getMetadata().get(ETAG))); + } + addLastModifiedDate(response, key); addCustomMetadataHeaders(response, key); getMetrics().updateHeadKeySuccessStats(startNanos); @@ -771,7 +792,8 @@ public Response initializeMultipartUpload( private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, String storageType) throws OS3Exception { if (StringUtils.isEmpty(storageType)) { - storageType = S3StorageType.getDefault(ozoneConfiguration).toString(); + S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration); + storageType = (defaultStorageType != null ? defaultStorageType.toString() : null); } ReplicationConfig clientConfiguredReplicationConfig = null; @@ -807,7 +829,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); + partsMap.put(part.getPartNumber(), part.getETag()); } if (LOG.isDebugEnabled()) { LOG.debug("Parts map {}", partsMap); @@ -867,20 +889,21 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) private Response createMultipartKey(OzoneVolume volume, String bucket, String key, long length, int partNumber, String uploadID, - InputStream body, PerformanceStringBuilder perf) + final InputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String copyHeader = null; + DigestInputStream digestInputStream = null; try { if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong( headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); @@ -900,7 +923,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, perf.appendStreamMode(); return ObjectEndpointStreaming .createMultipartKey(ozoneBucket, key, length, partNumber, - uploadID, chunkSize, (DigestInputStream) body, perf); + uploadID, chunkSize, digestInputStream, perf); } // OmMultipartCommitUploadPartInfo can only be gotten after the // OzoneOutputStream is closed, so we need to save the KeyOutputStream @@ -955,6 +978,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( sourceObject, ozoneOutputStream, 0, length); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } else { @@ -964,6 +989,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } @@ -977,10 +1004,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(body, ozoneOutputStream); + putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); ((KeyMetadataAware)ozoneOutputStream.getOutputStream()) .getMetadata().put(ETAG, DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); @@ -993,7 +1020,13 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, assert keyOutputStream != null; OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); + String eTag = omMultipartCommitUploadPartInfo.getETag(); + // If the OmMultipartCommitUploadPartInfo does not contain eTag, + // fall back to MPU part name for compatibility in case the (old) OM + // does not return the eTag field + if (StringUtils.isEmpty(eTag)) { + eTag = omMultipartCommitUploadPartInfo.getPartName(); + } if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1020,6 +1053,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, throw os3Exception; } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } } } @@ -1064,7 +1103,10 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); + // If the ETag field does not exist, use MPU part name for backward + // compatibility + part.setETag(StringUtils.isNotEmpty(partInfo.getETag()) ? + partInfo.getETag() : partInfo.getPartName()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); @@ -1097,7 +1139,7 @@ public void setContext(ContainerRequestContext context) { } @SuppressWarnings("checkstyle:ParameterNumber") - void copy(OzoneVolume volume, InputStream src, long srcKeyLen, + void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, String destKey, String destBucket, ReplicationConfig replication, Map metadata, @@ -1119,6 +1161,8 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); + String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); + dest.getMetadata().put(ETAG, eTag); } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -1136,9 +1180,11 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); + DigestInputStream sourceDigestInputStream = null; try { + OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( + volume.getName(), sourceBucket, sourceKey); // Checking whether we trying to copying to it self. - if (sourceBucket.equals(destBucket) && sourceKey .equals(destkey)) { // When copying to same storage type when storage type is provided, @@ -1157,22 +1203,37 @@ private CopyObjectResponse copyObject(OzoneVolume volume, // still does not support this just returning dummy response // for now CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(Instant.ofEpochMilli( Time.now())); return copyObjectResponse; } } - - OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( - volume.getName(), sourceBucket, sourceKey); long sourceKeyLen = sourceKeyDetails.getDataSize(); + // Custom metadata in copyObject with metadata directive + Map customMetadata; + String metadataCopyDirective = headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER); + if (StringUtils.isEmpty(metadataCopyDirective) || metadataCopyDirective.equals(CopyDirective.COPY.name())) { + // The custom metadata will be copied from the source key + customMetadata = sourceKeyDetails.getMetadata(); + } else if (metadataCopyDirective.equals(CopyDirective.REPLACE.name())) { + // Replace the metadata with the metadata form the request headers + customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); + } else { + OS3Exception ex = newError(INVALID_ARGUMENT, metadataCopyDirective); + ex.setErrorMessage("An error occurred (InvalidArgument) " + + "when calling the CopyObject operation: " + + "The metadata directive specified is invalid. Valid values are COPY or REPLACE."); + throw ex; + } + try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), sourceBucket, sourceKey)) { getMetrics().updateCopyKeyMetadataStats(startNanos); - copy(volume, src, sourceKeyLen, destkey, destBucket, replicationConfig, - sourceKeyDetails.getMetadata(), perf, startNanos); + sourceDigestInputStream = new DigestInputStream(src, getMessageDigestInstance()); + copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket, replicationConfig, + customMetadata, perf, startNanos); } final OzoneKeyDetails destKeyDetails = getClientProtocol().getKeyDetails( @@ -1180,7 +1241,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, getMetrics().updateCopyObjectSuccessStats(startNanos); CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(destKeyDetails.getModificationTime()); return copyObjectResponse; } catch (OMException ex) { @@ -1193,6 +1254,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, destBucket + "/" + destkey, ex); } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (sourceDigestInputStream != null) { + sourceDigestInputStream.getMessageDigest().reset(); + } } } @@ -1293,4 +1360,9 @@ private String wrapInQuotes(String value) { return "\"" + value + "\""; } + @VisibleForTesting + public MessageDigest getMessageDigestInstance() { + return E_TAG_PROVIDER.get(); + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index e509acb05bdb..b916fc111d27 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -21,12 +21,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -110,7 +109,7 @@ public static Pair putKeyWithStream( eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) .toLowerCase(); perf.appendMetaLatencyNanos(metadataLatencyNs); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return Pair.of(eTag, writeLen); } @@ -123,15 +122,18 @@ public static long copyKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, - InputStream body, PerformanceStringBuilder perf, long startNanos) + DigestInputStream body, PerformanceStringBuilder perf, long startNanos) throws IOException { - long writeLen = 0; + long writeLen; try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, length, replicationConfig, keyMetadata)) { long metadataLatencyNs = METRICS.updateCopyKeyMetadataStats(startNanos); - perf.appendMetaLatencyNanos(metadataLatencyNs); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); + String eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) + .toLowerCase(); + perf.appendMetaLatencyNanos(metadataLatencyNs); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return writeLen; } @@ -161,11 +163,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String eTag; - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { @@ -174,11 +171,10 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, writeToStreamOutput(streamOutput, body, chunkSize, length); eTag = DatatypeConverter.printHexBinary( body.getMessageDigest().digest()).toLowerCase(); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); METRICS.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); - keyDataStreamOutput = streamOutput.getKeyDataStreamOutput(); } } catch (OMException ex) { if (ex.getResult() == @@ -190,13 +186,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, ozoneBucket.getName() + "/" + key); } throw ex; - } finally { - if (keyDataStreamOutput != null) { - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - keyDataStreamOutput.getCommitUploadPartInfo(); - eTag = commitUploadPartInfo.getPartName(); - } } - return Response.ok().header("ETag", eTag).build(); + return Response.ok().header(OzoneConsts.ETAG, eTag).build(); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index 792f2e2ef5e9..5d6057f061b4 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.BitSet; +import java.util.EnumSet; import java.util.List; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INVALID_ARGUMENT; @@ -228,15 +228,15 @@ public static List s3AclToOzoneNativeAclOnBucket( grant.getGrantee().getXsiType()); if (identityType != null && identityType.isSupported()) { String permission = grant.getPermission(); - BitSet acls = getOzoneAclOnBucketFromS3Permission(permission); + EnumSet acls = getOzoneAclOnBucketFromS3Permission(permission); OzoneAcl defaultOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.DEFAULT); + grant.getGrantee().getId(), OzoneAcl.AclScope.DEFAULT, acls + ); OzoneAcl accessOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.ACCESS); + grant.getGrantee().getId(), OzoneAcl.AclScope.ACCESS, acls + ); ozoneAclList.add(defaultOzoneAcl); ozoneAclList.add(accessOzoneAcl); } else { @@ -249,31 +249,31 @@ public static List s3AclToOzoneNativeAclOnBucket( return ozoneAclList; } - public static BitSet getOzoneAclOnBucketFromS3Permission(String permission) + public static EnumSet getOzoneAclOnBucketFromS3Permission(String permission) throws OS3Exception { ACLType permissionType = ACLType.getType(permission); if (permissionType == null) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, permission); } - BitSet acls = new BitSet(IAccessAuthorizer.ACLType.getNoOfAcls()); + EnumSet acls = EnumSet.noneOf(IAccessAuthorizer.ACLType.class); switch (permissionType) { case FULL_CONTROL: - acls.set(IAccessAuthorizer.ACLType.ALL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.ALL); break; case WRITE_ACP: - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case READ_ACP: - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); break; case WRITE: - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.DELETE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.CREATE.ordinal()); + acls.add(IAccessAuthorizer.ACLType.WRITE); + acls.add(IAccessAuthorizer.ACLType.DELETE); + acls.add(IAccessAuthorizer.ACLType.CREATE); break; case READ: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.LIST.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.LIST); break; default: LOG.error("Failed to recognize S3 permission {}", permission); @@ -292,11 +292,11 @@ public static List s3AclToOzoneNativeAclOnVolume( grant.getGrantee().getXsiType()); if (identityType != null && identityType.isSupported()) { String permission = grant.getPermission(); - BitSet acls = getOzoneAclOnVolumeFromS3Permission(permission); + EnumSet acls = getOzoneAclOnVolumeFromS3Permission(permission); OzoneAcl accessOzoneAcl = new OzoneAcl( IAccessAuthorizer.ACLIdentityType.USER, - grant.getGrantee().getId(), acls, - OzoneAcl.AclScope.ACCESS); + grant.getGrantee().getId(), OzoneAcl.AclScope.ACCESS, acls + ); ozoneAclList.add(accessOzoneAcl); } else { LOG.error("Grantee type {} is not supported", @@ -309,35 +309,35 @@ public static List s3AclToOzoneNativeAclOnVolume( } // User privilege on volume follows the "lest privilege" principle. - public static BitSet getOzoneAclOnVolumeFromS3Permission(String permission) + public static EnumSet getOzoneAclOnVolumeFromS3Permission(String permission) throws OS3Exception { - BitSet acls = new BitSet(IAccessAuthorizer.ACLType.getNoOfAcls()); + EnumSet acls = EnumSet.noneOf(IAccessAuthorizer.ACLType.class); ACLType permissionType = ACLType.getType(permission); if (permissionType == null) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, permission); } switch (permissionType) { case FULL_CONTROL: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.WRITE); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case WRITE_ACP: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); + acls.add(IAccessAuthorizer.ACLType.WRITE_ACL); break; case READ_ACP: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.READ_ACL.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.READ_ACL); break; case WRITE: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); - acls.set(IAccessAuthorizer.ACLType.WRITE.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); + acls.add(IAccessAuthorizer.ACLType.WRITE); break; case READ: - acls.set(IAccessAuthorizer.ACLType.READ.ordinal()); + acls.add(IAccessAuthorizer.ACLType.READ); break; default: LOG.error("Failed to recognize S3 permission {}", permission); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index df3d01936b18..3b38ff03c420 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -62,10 +62,20 @@ private S3Consts() { public static final String S3_XML_NAMESPACE = "http://s3.amazonaws" + ".com/doc/2006-03-01/"; + // Constants related to custom metadata public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; + public static final String CUSTOM_METADATA_COPY_DIRECTIVE_HEADER = "x-amz-metadata-directive"; public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; + /** + * Copy directive for metadata and tags. + */ + public enum CopyDirective { + COPY, // Default directive + REPLACE + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index ae42e812fb3e..9eb88989a32e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -62,6 +62,10 @@ public ReplicationType getType() { public static S3StorageType getDefault(ConfigurationSource config) { String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION); ReplicationFactor configFactor; + if (replicationString == null) { + // if no config is set then let server take decision + return null; + } try { configFactor = ReplicationFactor.valueOf( Integer.parseInt(replicationString)); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 7515d991eba0..0400bc60500c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -482,7 +482,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - + getBucket(volumeName, bucketName).createDirectory(keyName); } @Override @@ -650,6 +650,13 @@ public String createSnapshot(String volumeName, return ""; } + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) + throws IOException { + + } + @Override public List listSnapshot( String volumeName, String bucketName, String snapshotPrefix, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index b79e49f834cb..e9fb15e613fe 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.client; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -62,7 +61,7 @@ public void createVolume(String volumeName) throws IOException { .setAdmin("root") .setOwner("root") .setQuotaInBytes(Integer.MAX_VALUE) - .setAcls(new ArrayList<>()).build()); + .build()); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fad3386c61c4..d272360fc3cd 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -23,6 +23,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -32,13 +34,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.OzoneAcl; @@ -52,13 +54,19 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** * In-memory ozone bucket for testing. */ -public class OzoneBucketStub extends OzoneBucket { +public final class OzoneBucketStub extends OzoneBucket { + + private static final Logger LOG = LoggerFactory.getLogger(OzoneBucketStub.class); private Map keyDetails = new HashMap<>(); @@ -75,7 +83,7 @@ public static Builder newBuilder() { return new Builder(); } - public OzoneBucketStub(Builder b) { + private OzoneBucketStub(Builder b) { super(b); this.replicationConfig = super.getReplicationConfig(); } @@ -88,43 +96,6 @@ public static final class Builder extends OzoneBucket.Builder { private Builder() { } - @Override - public Builder setVolumeName(String volumeName) { - super.setVolumeName(volumeName); - return this; - } - - @Override - public Builder setName(String name) { - super.setName(name); - return this; - } - - @Override - public Builder setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - super.setDefaultReplicationConfig(defaultReplicationConfig); - return this; - } - - @Override - public Builder setStorageType(StorageType storageType) { - super.setStorageType(storageType); - return this; - } - - @Override - public Builder setVersioning(Boolean versioning) { - super.setVersioning(versioning); - return this; - } - - @Override - public Builder setCreationTime(long creationTime) { - super.setCreationTime(creationTime); - return this; - } - @Override public OzoneBucketStub build() { return new OzoneBucketStub(this); @@ -144,31 +115,16 @@ public OzoneOutputStream createKey(String key, long size, ReplicationFactor factor, Map metadata) throws IOException { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - keyContents.put(key, toByteArray()); - keyDetails.put(key, new OzoneKeyDetails( - getVolumeName(), - getName(), - key, - size, - System.currentTimeMillis(), - System.currentTimeMillis(), - new ArrayList<>(), replicationConfig, metadata, null, - () -> readKey(key), true - )); - super.close(); - } - }; - return new OzoneOutputStream(byteArrayOutputStream, null); + ReplicationConfig replication = ReplicationConfig.fromTypeAndFactor(type, factor); + return createKey(key, size, replication, metadata); } @Override public OzoneOutputStream createKey(String key, long size, ReplicationConfig rConfig, Map metadata) throws IOException { + assertDoesNotExist(key + "/"); + final ReplicationConfig repConfig; if (rConfig == null) { repConfig = getReplicationConfig(); @@ -203,8 +159,10 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, ReplicationConfig rConfig, Map keyMetadata) throws IOException { + assertDoesNotExist(key + "/"); + ByteBufferStreamOutput byteBufferStreamOutput = - new ByteBufferStreamOutput() { + new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { private final ByteBuffer buffer = ByteBuffer.allocate((int) size); @@ -267,7 +225,8 @@ public void close() throws IOException { byte[] bytes = new byte[position]; buffer.get(bytes); - Part part = new Part(key + size, bytes); + Part part = new Part(key + size, bytes, + getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -425,7 +384,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray()); + toByteArray(), getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -463,7 +422,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, for (Map.Entry part: partsMap.entrySet()) { Part recordedPart = partsList.get(part.getKey()); if (recordedPart == null || - !recordedPart.getPartName().equals(part.getValue())) { + !recordedPart.getETag().equals(part.getValue())) { throw new OMException(ResultCodes.INVALID_PART); } else { output.write(recordedPart.getContent()); @@ -506,13 +465,21 @@ public OzoneMultipartUploadPartListParts listParts(String key, int count = 0; int nextPartNumberMarker = 0; boolean truncated = false; + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } while (count < maxParts && partIterator.hasNext()) { Map.Entry partEntry = partIterator.next(); nextPartNumberMarker = partEntry.getKey(); if (partEntry.getKey() > partNumberMarker) { PartInfo partInfo = new PartInfo(partEntry.getKey(), partEntry.getValue().getPartName(), - Time.now(), partEntry.getValue().getContent().length); + Time.now(), partEntry.getValue().getContent().length, + DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry + .getValue().getContent())).toLowerCase()); partInfoList.add(partInfo); count++; } @@ -563,9 +530,12 @@ public static class Part { private String partName; private byte[] content; - public Part(String name, byte[] data) { + private String eTag; + + public Part(String name, byte[] data, String eTag) { this.partName = name; this.content = data.clone(); + this.eTag = eTag; } public String getPartName() { @@ -575,6 +545,11 @@ public String getPartName() { public byte[] getContent() { return content.clone(); } + + public String getETag() { + return eTag; + } + } @Override @@ -589,6 +564,9 @@ public ReplicationConfig getReplicationConfig() { @Override public void createDirectory(String keyName) throws IOException { + assertDoesNotExist(StringUtils.stripEnd(keyName, "/")); + + LOG.info("createDirectory({})", keyName); keyDetails.put(keyName, new OzoneKeyDetails( getVolumeName(), getName(), @@ -600,6 +578,12 @@ public void createDirectory(String keyName) throws IOException { () -> readKey(keyName), false)); } + private void assertDoesNotExist(String keyName) throws OMException { + if (keyDetails.get(keyName) != null) { + throw new OMException("already exists", ResultCodes.FILE_ALREADY_EXISTS); + } + } + /** * ByteArrayOutputStream stub with metadata. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index 7bb35682d8da..b472320b7fe7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -65,6 +66,7 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 983516002909..ca3caa4ee777 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -22,8 +22,8 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -79,10 +79,7 @@ public KeyOutputStream getKeyOutputStream() { OzoneConfiguration conf = new OzoneConfiguration(); ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf); - OzoneClientConfig ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - StreamBufferArgs streamBufferArgs = - StreamBufferArgs.getDefaultStreamBufferArgs(replicationConfig, ozoneClientConfig); - return new KeyOutputStream(replicationConfig, null, ozoneClientConfig, streamBufferArgs) { + return new KeyOutputStream(replicationConfig, null) { @Override public synchronized OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { @@ -93,7 +90,8 @@ public KeyOutputStream getKeyOutputStream() { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + ((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java index 9fab5a181b56..4ce18b41f1cf 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java @@ -38,17 +38,17 @@ /** * Ozone volume with in-memory state for testing. */ -public class OzoneVolumeStub extends OzoneVolume { +public final class OzoneVolumeStub extends OzoneVolume { - private Map buckets = new HashMap<>(); + private final Map buckets = new HashMap<>(); - private ArrayList aclList = new ArrayList<>(); + private final ArrayList aclList = new ArrayList<>(); public static Builder newBuilder() { return new Builder(); } - public OzoneVolumeStub(Builder b) { + private OzoneVolumeStub(Builder b) { super(b); } @@ -124,6 +124,7 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) { .setDefaultReplicationConfig(new DefaultReplicationConfig( RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE))) + .setBucketLayout(bucketArgs.getBucketLayout()) .setStorageType(bucketArgs.getStorageType()) .setVersioning(bucketArgs.getVersioning()) .setCreationTime(Time.now()) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java index ab87f9c98e11..cd0fbfed4e65 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java @@ -80,8 +80,8 @@ private void checkContent(CompleteMultipartUploadRequest request) { List parts = request.getPartList(); - assertEquals(part1, parts.get(0).geteTag()); - assertEquals(part2, parts.get(1).geteTag()); + assertEquals(part1, parts.get(0).getETag()); + assertEquals(part2, parts.get(1).getETag()); } private CompleteMultipartUploadRequest unmarshall( diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 3e8beb2c3a1e..677367e6d812 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -79,17 +79,17 @@ public static void setUp() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 2, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 3, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index eedee2855e7d..3c0c87a177f6 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -93,9 +93,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -202,7 +202,7 @@ public void testMultipartInvalidPartError() throws Exception { Part part1 = uploadPart(key, uploadID, partNumber, content); // Change part name. - part1.seteTag("random"); + part1.setETag("random"); partsList.add(part1); content = "Multipart Upload 2"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index a773b8757981..d9595aeff796 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Scanner; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -91,7 +92,11 @@ public static void setUp() throws Exception { try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>())) { + ReplicationFactor.THREE), + new HashMap() {{ + put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + }} + )) { stream.write(keyContent); } @@ -327,9 +332,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -377,7 +382,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, assertNotNull(result.getETag()); assertNotNull(result.getLastModified()); Part part = new Part(); - part.seteTag(result.getETag()); + part.setETag(result.getETag()); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index ae8279f25861..abae489b4135 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -23,40 +23,56 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.stream.Stream; +import java.io.OutputStream; +import java.security.MessageDigest; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -64,104 +80,104 @@ /** * Test put object. */ -public class TestObjectPut { - public static final String CONTENT = "0123456789"; - private String bucketName = "b1"; - private String keyName = "key=value/1"; - private String destBucket = "b2"; - private String destkey = "key=value/2"; - private String nonexist = "nonexist"; +class TestObjectPut { + private static final String CONTENT = "0123456789"; + private static final String FSO_BUCKET_NAME = "fso-bucket"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + private static final String DEST_BUCKET_NAME = "b2"; + private static final String DEST_KEY = "key=value/2"; + private static final String NO_SUCH_BUCKET = "nonexist"; + private OzoneClient clientStub; private ObjectEndpoint objectEndpoint; + private HttpHeaders headers; + private OzoneBucket bucket; + private OzoneBucket fsoBucket; + + static Stream argumentsForPutObject() { + ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); + ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K"); + return Stream.of( + Arguments.of(0, ratis3), + Arguments.of(10, ratis3), + Arguments.of(0, ec), + Arguments.of(10, ec) + ); + } @BeforeEach - public void setup() throws IOException { + void setup() throws IOException { + OzoneConfiguration config = new OzoneConfiguration(); + //Create client stub and object store stub. clientStub = new OzoneClientStub(); // Create bucket - clientStub.getObjectStore().createS3Bucket(bucketName); - clientStub.getObjectStore().createS3Bucket(destBucket); + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME); + clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME); // Create PutObject and setClient to OzoneClientStub - objectEndpoint = new ObjectEndpoint(); + objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); - objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setOzoneConfiguration(config); + + headers = mock(HttpHeaders.class); + objectEndpoint.setHeaders(headers); + + String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, + OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT); + OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName); + BucketArgs fsoBucketArgs = BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + volume.createBucket(FSO_BUCKET_NAME, fsoBucketArgs); + fsoBucket = volume.getBucket(FSO_BUCKET_NAME); } - @Test - public void testPutObject() throws IOException, OS3Exception { + @ParameterizedTest + @MethodSource("argumentsForPutObject") + void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); + final String content = RandomStringUtils.randomAlphanumeric(length); + ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); //THEN - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); - } - @Test - public void testPutObjectWithECReplicationConfig() - throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - ECReplicationConfig ecReplicationConfig = - new ECReplicationConfig("rs-3-2-1024K"); - clientStub.getObjectStore().getS3Bucket(bucketName) - .setReplicationConfig(ecReplicationConfig); - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - assertEquals(ecReplicationConfig, - clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName) - .getReplicationConfig()); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); + String keyContent; + try (InputStream input = bucket.readKey(KEY_NAME)) { + keyContent = IOUtils.toString(input, UTF_8); + } + assertEquals(content, keyContent); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME); + assertEquals(replication, keyDetails.getReplicationConfig()); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test - public void testPutObjectContentLength() throws IOException, OS3Exception { + void testPutObjectContentLength() throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); long dataSize = CONTENT.length(); - objectEndpoint.put(bucketName, keyName, dataSize, 0, null, body); - assertEquals(dataSize, getKeyDataSize(keyName)); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + assertEquals(dataSize, getKeyDataSize()); } @Test - public void testPutObjectContentLengthForStreaming() + void testPutObjectContentLengthForStreaming() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -172,22 +188,19 @@ public void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 0, null, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); - assertEquals(15, getKeyDataSize(keyName)); + assertEquals(15, getKeyDataSize()); } - private long getKeyDataSize(String key) throws IOException { - return clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(key).getDataSize(); + private long getKeyDataSize() throws IOException { + return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME).getDataSize(); } @Test - public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { + void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -199,202 +212,296 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { .thenReturn("15"); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + } + + @Test + public void testPutObjectMessageDigestResetDuringException() throws OS3Exception { + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // For example, EOFException during put-object due to client cancelling the operation before it completes + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT + .length(), 1, null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } } @Test - public void testCopyObject() throws IOException, OS3Exception { + void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - Response response = objectEndpoint.put(bucketName, keyName, + // Add some custom metadata + MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1", "custom-value-1"); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2", "custom-value-2"); + when(headers.getRequestHeaders()).thenReturn(metadataHeaders); + // Add COPY metadata directive (default) + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(bucketName) - .readKey(keyName); + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + assertThat(keyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(keyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); + String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); + + // This will be ignored since the copy directive is COPY + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-3", "custom-value-3"); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body); // Check destination key and response - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) - .readKey(destkey); + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(destKeyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-3")).isFalse(); + + // Now use REPLACE metadata directive (default) and remove some custom metadata used in the source key + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("REPLACE"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); + + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); - // source and dest same + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); + + keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-1")).isFalse(); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-2")).isFalse(); + assertThat(destKeyDetails.getMetadata().get("custom-key-3")).isEqualTo("custom-value-3"); + + + // wrong copy metadata directive + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + "test copy object failed"); + assertThat(e.getHttpCode()).isEqualTo(400); + assertThat(e.getCode()).isEqualTo("InvalidArgument"); + assertThat(e.getErrorMessage()).contains("The metadata directive specified is invalid"); + + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + // source and dest same + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); // source bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(destBucket, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(nonexist)); + BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", keyName, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @Test - public void testInvalidStorageType() throws IOException { - HttpHeaders headers = mock(HttpHeaders.class); + public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { + // Put object in to source bucket + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, + CONTENT.length(), 1, null, body); + + OzoneInputStream ozoneInputStream = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); + + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + // Add copy header, and then call put + when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + + try { + objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + + @Test + void testInvalidStorageType() { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); } @Test - public void testEmptyStorageType() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); + void testEmptyStorageType() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT .length(), 1, null, body); OzoneKeyDetails key = - clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(keyName); - + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME); //default type is set - assertEquals(ReplicationType.RATIS, key.getReplicationType()); + assertEquals( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + key.getReplicationConfig()); } @Test - public void testDirectoryCreation() throws IOException, + void testDirectoryCreation() throws IOException, OS3Exception { // GIVEN - final String path = "dir"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; - final InputStream body = null; - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + final String path = "dir/"; // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - final Response response = objEndpoint.put(bucketName, path, length, - partNumber, uploadId, body); + try (Response response = objectEndpoint.put(fsoBucket.getName(), path, + 0L, 0, "", null)) { + assertEquals(HttpStatus.SC_OK, response.getStatus()); + } // THEN - assertEquals(HttpStatus.SC_OK, response.getStatus()); - verify(protocol).createDirectory(any(), eq(bucketName), eq(path)); + OzoneKeyDetails key = fsoBucket.getKey(path); + assertThat(key.isFile()).as("directory").isFalse(); } @Test - public void testDirectoryCreationOverFile() throws IOException { + void testDirectoryCreationOverFile() throws IOException, OS3Exception { // GIVEN final String path = "key"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; final ByteArrayInputStream body = - new ByteArrayInputStream("content".getBytes(UTF_8)); - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - doThrow(new OMException(OMException.ResultCodes.FILE_ALREADY_EXISTS)) - .when(protocol) - .createDirectory(any(), any(), any()); + final OS3Exception exception = assertThrows(OS3Exception.class, + () -> objectEndpoint + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .close()); // THEN - final OS3Exception exception = assertThrows(OS3Exception.class, - () -> objEndpoint - .put(bucketName, path, length, partNumber, uploadId, body)); - assertEquals("Conflict", exception.getCode()); - assertEquals(409, exception.getHttpCode()); - verify(protocol, times(1)).createDirectory(any(), any(), any()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 90d490dea0b6..aecc56fe172b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; @@ -28,12 +29,16 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; import java.util.UUID; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -44,7 +49,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** @@ -90,7 +101,7 @@ public void testPartUpload() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -112,16 +123,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } @@ -194,6 +205,53 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { assertContentLength(uploadID, keyName, content.length()); } + @Test + public void testPartUploadMessageDigestResetDuringException() throws IOException, OS3Exception { + OzoneClient clientStub = new OzoneClientStub(); + clientStub.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); + + + HttpHeaders headers = mock(HttpHeaders.class); + when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + "STANDARD"); + + ObjectEndpoint objectEndpoint = spy(new ObjectEndpoint()); + + objectEndpoint.setHeaders(headers); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + + Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + String content = "Multipart Upload"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + try { + objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + private void assertContentLength(String uploadID, String key, long contentLength) throws IOException { OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 787aa6e8777a..28ce32e74707 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -67,7 +68,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); REST.setOzoneConfiguration(conf); REST.init(); @@ -95,7 +96,7 @@ public void testPartUpload() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -116,16 +117,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index f92496249e20..d988b4302308 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -81,7 +81,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setStorageSize(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, 1, StorageUnit.BYTES); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 0585fea000c9..3e7214ce988b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -17,18 +17,16 @@ */ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.internal.LinkedTreeMap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.shell.ListOptions; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; - +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -101,21 +99,20 @@ public Void call() throws Exception { return null; } - HashMap duResponse = getResponseMap(response); + JsonNode duResponse = JsonUtils.readTree(response); - if (duResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(duResponse.path("status").asText(""))) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } - long totalSize = (long)(double)duResponse.get("size"); - + long totalSize = duResponse.path("size").asLong(-1); if (!noHeader) { printWithUnderline("Path", false); printKVSeparator(); - System.out.println(duResponse.get("path")); + System.out.println(duResponse.path("path").asText("")); printWithUnderline("Total Size", false); printKVSeparator(); @@ -124,11 +121,11 @@ public Void call() throws Exception { if (withReplica) { printWithUnderline("Total Disk Usage", false); printKVSeparator(); - long du = (long)(double)duResponse.get("sizeWithReplica"); + long du = duResponse.path("sizeWithReplica").asLong(-1); System.out.println(FileUtils.byteCountToDisplaySize(du)); } - long sizeDirectKey = (long)(double)duResponse.get("sizeDirectKey"); + long sizeDirectKey = duResponse.path("sizeDirectKey").asLong(-1); if (!listFiles && sizeDirectKey != -1) { printWithUnderline("Size of Direct Keys", false); printKVSeparator(); @@ -137,7 +134,7 @@ public Void call() throws Exception { printNewLines(1); } - if ((double)duResponse.get("subPathCount") == 0) { + if (duResponse.path("subPathCount").asInt(-1) == 0) { if (totalSize == 0) { // the object is empty System.out.println("The object is empty.\n" + @@ -160,20 +157,19 @@ public Void call() throws Exception { seekStr = ""; } - ArrayList duData = (ArrayList)duResponse.get("subPaths"); + ArrayNode subPaths = (ArrayNode) duResponse.path("subPaths"); int cnt = 0; - for (int i = 0; i < duData.size(); ++i) { + for (JsonNode subPathDU : subPaths) { if (cnt >= limit) { break; } - LinkedTreeMap subPathDU = (LinkedTreeMap) duData.get(i); - String subPath = subPathDU.get("path").toString(); + String subPath = subPathDU.path("path").asText(""); // differentiate key from other types - if (!(boolean)subPathDU.get("isKey")) { + if (!subPathDU.path("isKey").asBoolean(false)) { subPath += OM_KEY_PREFIX; } - long size = (long)(double)subPathDU.get("size"); - long sizeWithReplica = (long)(double)subPathDU.get("sizeWithReplica"); + long size = subPathDU.path("size").asLong(-1); + long sizeWithReplica = subPathDU.path("sizeWithReplica").asLong(-1); if (subPath.startsWith(seekStr)) { printDURow(subPath, size, sizeWithReplica); ++cnt; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index f74ee109504c..0af263dbe31d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +72,11 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap distResponse = getResponseMap(response); + JsonNode distResponse = JsonUtils.readTree(response); - if (distResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(distResponse.path("status").asText())) { printPathNotFound(); - } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(distResponse.path("status").asText())) { printTypeNA("File Size Distribution"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,11 +84,11 @@ public Void call() throws Exception { } printWithUnderline("File Size Distribution", true); - ArrayList fileSizeDist = (ArrayList) distResponse.get("dist"); + JsonNode fileSizeDist = distResponse.path("dist"); double sum = 0; for (int i = 0; i < fileSizeDist.size(); ++i) { - sum += (double) fileSizeDist.get(i); + sum += fileSizeDist.get(i).asDouble(); } if (sum == 0) { printSpaces(2); @@ -100,11 +99,11 @@ public Void call() throws Exception { } for (int i = 0; i < fileSizeDist.size(); ++i) { - if ((double)fileSizeDist.get(i) == 0) { + if (fileSizeDist.get(i).asDouble() == 0) { continue; } String label = convertBinIndexToReadableRange(i); - printDistRow(label, (double) fileSizeDist.get(i), sum); + printDistRow(label, fileSizeDist.get(i).asDouble(), sum); } } printNewLines(1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java index 729aa20c5ce3..9aff2e9999ad 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.Gson; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -31,7 +30,6 @@ import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.util.HashMap; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; @@ -107,10 +105,6 @@ public static String makeHttpCall(StringBuffer url, String path, } } - public static HashMap getResponseMap(String response) { - return new Gson().fromJson(response, HashMap.class); - } - public static void printNewLines(int cnt) { for (int i = 0; i < cnt; ++i) { System.out.println(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index 113193c929b4..1e4e719baf83 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +73,11 @@ public Void call() throws Exception { return null; } - HashMap quotaResponse = getResponseMap(response); + JsonNode quotaResponse = JsonUtils.readTree(response); - if (quotaResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(quotaResponse.path("status").asText())) { printPathNotFound(); - } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(quotaResponse.path("status").asText())) { printTypeNA("Quota"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,8 +85,10 @@ public Void call() throws Exception { } printWithUnderline("Quota", true); - long quotaAllowed = (long)(double)quotaResponse.get("allowed"); - long quotaUsed = (long)(double)quotaResponse.get("used"); + + long quotaAllowed = quotaResponse.get("allowed").asLong(); + long quotaUsed = quotaResponse.get("used").asLong(); + printSpaces(2); System.out.print("Allowed"); printKVSeparator(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 9180274b9c70..d2060b8db526 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -71,9 +71,9 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap summaryResponse = getResponseMap(response); + JsonNode summaryResponse = JsonUtils.readTree(response); - if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(summaryResponse.path("status").asText())) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -83,10 +83,11 @@ public Void call() throws Exception { printWithUnderline("Entity Type", false); printKVSeparator(); System.out.println(summaryResponse.get("type")); - int numVol = ((Double) summaryResponse.get("numVolume")).intValue(); - int numBucket = ((Double) summaryResponse.get("numBucket")).intValue(); - int numDir = ((Double) summaryResponse.get("numDir")).intValue(); - int numKey = ((Double) summaryResponse.get("numKey")).intValue(); + + int numVol = summaryResponse.path("numVolume").asInt(-1); + int numBucket = summaryResponse.path("numBucket").asInt(-1); + int numDir = summaryResponse.path("numDir").asInt(-1); + int numKey = summaryResponse.path("numKey").asInt(-1); if (numVol != -1) { printWithUnderline("Volumes", false); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java index 99af758b5bad..0a2666d30ee2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.admin.reconfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine; import java.util.List; @@ -44,10 +45,10 @@ public Void call() throws Exception { " --in-service-datanodes is not given."); return null; } - executeCommand(parent.getAddress()); + executeCommand(parent.getService(), parent.getAddress()); } return null; } - protected abstract void executeCommand(String address); + protected abstract void executeCommand(HddsProtos.NodeType nodeType, String address); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index 0c25b1f67b3b..fc171e52d8d3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.kohsuke.MetaInfServices; @@ -56,6 +57,11 @@ public class ReconfigureCommands implements Callable, @Spec private CommandSpec spec; + @CommandLine.Option(names = {"--service"}, + description = "service: OM, SCM, DATANODE.", + required = true) + private String service; + @CommandLine.Option(names = {"--address"}, description = "node address: or .", required = false) @@ -77,6 +83,10 @@ public String getAddress() { return address; } + public HddsProtos.NodeType getService() { + return HddsProtos.NodeType.valueOf(service); + } + @Override public Class getParentType() { return OzoneAdmin.class; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java index 60bc9c2ef557..99450715ac98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -36,9 +37,9 @@ public class ReconfigurePropertiesSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); List properties = reconfigProxy.listReconfigureProperties(); System.out.printf("%s: Node [%s] Reconfigurable properties:%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java index 86d95bf06457..ae2e5a1a7432 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -34,9 +35,9 @@ public class ReconfigureStartSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); reconfigProxy.startReconfigure(); System.out.printf("%s: Started reconfiguration task on node [%s].%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java index 20e0ee8281cf..07bd2d6f4ac6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -40,9 +41,9 @@ public class ReconfigureStatusSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); ReconfigurationTaskStatus status = reconfigProxy.getReconfigureStatus(); System.out.printf("%s: Reconfiguring status for node [%s]: ", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java index e7e1860c2cb7..b24190dceacd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java @@ -34,7 +34,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; @@ -47,23 +47,23 @@ private ReconfigureSubCommandUtil() { } public static ReconfigureProtocol getSingleNodeReconfigureProxy( - String address) throws IOException { + HddsProtos.NodeType nodeType, String address) throws IOException { OzoneConfiguration ozoneConf = new OzoneConfiguration(); UserGroupInformation user = UserGroupInformation.getCurrentUser(); InetSocketAddress nodeAddr = NetUtils.createSocketAddr(address); - return new ReconfigureProtocolClientSideTranslatorPB( + return new ReconfigureProtocolClientSideTranslatorPB(nodeType, nodeAddr, user, ozoneConf); } public static void parallelExecute(ExecutorService executorService, - List nodes, Consumer operation) { + List nodes, BiConsumer operation) { AtomicInteger successCount = new AtomicInteger(); AtomicInteger failCount = new AtomicInteger(); if (nodes != null) { for (T node : nodes) { executorService.submit(() -> { try { - operation.accept(node); + operation.accept(HddsProtos.NodeType.DATANODE, node); successCount.incrementAndGet(); } catch (Exception e) { failCount.incrementAndGet(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java index e8ced23b348f..f66f4f3abda2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java @@ -33,8 +33,7 @@ */ @Command(name = "ozonemanagers", aliases = {"-ozonemanagers"}, - description = "gets list of ozone storage container " - + "manager nodes in the cluster", + description = "gets list of Ozone Manager nodes in the cluster", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) public class OzoneManagersCommandHandler implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index b71dd1c01566..5c311d49c93f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -24,11 +24,9 @@ import java.util.List; import java.util.Map; import java.util.HashSet; -import com.google.gson.GsonBuilder; -import com.google.gson.Gson; -import com.google.gson.JsonObject; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; + +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -40,10 +38,10 @@ import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -81,13 +79,12 @@ protected void execute(OzoneClient client, OzoneAddress address) XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); address.ensureKeyAddress(); - JsonElement element; - JsonObject result = new JsonObject(); + ObjectNode result = JsonUtils.createObjectNode(null); String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); - List tempchunks = null; - List chunkDetailsList = new ArrayList(); + List tempchunks; + List chunkDetailsList = new ArrayList<>(); HashSet chunkPaths = new HashSet<>(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); @@ -103,7 +100,7 @@ protected void execute(OzoneClient client, OzoneAddress address) } ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion .getConfiguredVersion(getConf()); - JsonArray responseArrayList = new JsonArray(); + ArrayNode responseArrayList = JsonUtils.createArrayNode(); for (OmKeyLocationInfo keyLocation : locationInfos) { ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); @@ -129,24 +126,17 @@ protected void execute(OzoneClient client, OzoneAddress address) keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); // doing a getBlock on all nodes Map - responses = null; - Map - readContainerResponses = null; - try { - responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, - datanodeBlockID, keyLocation.getToken()); - readContainerResponses = - containerOperationClient.readContainerFromAllNodes( - keyLocation.getContainerID(), pipeline); - } catch (InterruptedException e) { - LOG.error("Execution interrupted due to " + e); - Thread.currentThread().interrupt(); - } - JsonArray responseFromAllNodes = new JsonArray(); - for (Map.Entry - entry : responses.entrySet()) { + responses = + ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, + keyLocation.getBlockID().getDatanodeBlockIDProtobuf(), + keyLocation.getToken()); + Map readContainerResponses = + containerOperationClient.readContainerFromAllNodes( + keyLocation.getContainerID(), pipeline); + ArrayNode responseFromAllNodes = JsonUtils.createArrayNode(); + for (Map.Entry entry : responses.entrySet()) { chunkPaths.clear(); - JsonObject jsonObj = new JsonObject(); + ObjectNode jsonObj = JsonUtils.createObjectNode(null); if (entry.getValue() == null) { LOG.error("Cant execute getBlock on this node"); continue; @@ -158,7 +148,7 @@ protected void execute(OzoneClient client, OzoneAddress address) String fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkInfo.getChunkName()).toString(); chunkPaths.add(fileName); ChunkDetails chunkDetails = new ChunkDetails(); chunkDetails.setChunkName(fileName); @@ -178,29 +168,29 @@ protected void execute(OzoneClient client, OzoneAddress address) containerChunkInfoVerbose.setChunkType(blockChunksType); containerChunkInfo.setChunkType(blockChunksType); } - Gson gson = new GsonBuilder().create(); + if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfoVerbose)); } else { - element = gson.toJsonTree(containerChunkInfo); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfo)); } - jsonObj.addProperty("Datanode-HostName", entry.getKey() - .getHostName()); - jsonObj.addProperty("Datanode-IP", entry.getKey() - .getIpAddress()); - jsonObj.addProperty("Container-ID", containerId); - jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); - jsonObj.add("Locations", element); + jsonObj.put("Datanode-HostName", entry.getKey().getHostName()); + jsonObj.put("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.put("Container-ID", containerId); + jsonObj.put("Block-ID", keyLocation.getLocalID()); responseFromAllNodes.add(jsonObj); } responseArrayList.add(responseFromAllNodes); + } catch (InterruptedException e) { + throw new RuntimeException(e); } finally { xceiverClientManager.releaseClientForReadData(xceiverClient, false); } } - result.add("KeyLocations", responseArrayList); - Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson2.toJson(result); + result.set("KeyLocations", responseArrayList); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); System.out.println(prettyJson); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java index f88e08413d4b..130c1bca0fc8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java @@ -32,6 +32,7 @@ public class ContainerChunkInfo { private String containerPath; private List chunkInfos; + private HashSet files; private UUID pipelineID; private Pipeline pipeline; @@ -65,6 +66,27 @@ public void setChunkType(ChunkType chunkType) { this.chunkType = chunkType; } + public String getContainerPath() { + return containerPath; + } + + public List getChunkInfos() { + return chunkInfos; + } + + public HashSet getFiles() { + return files; + } + + public UUID getPipelineID() { + return pipelineID; + } + + public ChunkType getChunkType() { + return chunkType; + } + + @Override public String toString() { return "Container{" diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index 30f2b4eca1fd..48ed7c74ae7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -17,14 +17,11 @@ package org.apache.hadoop.ozone.debug; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; @@ -36,6 +33,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import jakarta.annotation.Nonnull; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -129,18 +128,17 @@ protected void execute(OzoneClient client, OzoneAddress address) replicasWithoutChecksum = noChecksumClient .getKeysEveryReplicas(volumeName, bucketName, keyName); - JsonObject result = new JsonObject(); - result.addProperty(JSON_PROPERTY_FILE_NAME, + ObjectNode result = JsonUtils.createObjectNode(null); + result.put(JSON_PROPERTY_FILE_NAME, volumeName + "/" + bucketName + "/" + keyName); - result.addProperty(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); + result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); - JsonArray blocks = new JsonArray(); + ArrayNode blocks = JsonUtils.createArrayNode(); downloadReplicasAndCreateManifest(keyName, replicas, replicasWithoutChecksum, dir, blocks); - result.add(JSON_PROPERTY_FILE_BLOCKS, blocks); + result.set(JSON_PROPERTY_FILE_BLOCKS, blocks); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson(result); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); String manifestFileName = keyName + "_manifest"; System.out.println("Writing manifest file : " + manifestFileName); @@ -158,25 +156,22 @@ private void downloadReplicasAndCreateManifest( Map> replicas, Map> replicasWithoutChecksum, - File dir, JsonArray blocks) throws IOException { + File dir, ArrayNode blocks) throws IOException { int blockIndex = 0; for (Map.Entry> block : replicas.entrySet()) { - JsonObject blockJson = new JsonObject(); - JsonArray replicasJson = new JsonArray(); + ObjectNode blockJson = JsonUtils.createObjectNode(null); + ArrayNode replicasJson = JsonUtils.createArrayNode(); blockIndex += 1; - blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex); + blockJson.put(JSON_PROPERTY_BLOCK_INDEX, blockIndex); OmKeyLocationInfo locationInfo = block.getKey(); - blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, + blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, locationInfo.getContainerID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, - locationInfo.getLocalID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, - locationInfo.getLength()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, - locationInfo.getOffset()); + blockJson.put(JSON_PROPERTY_BLOCK_LOCALID, locationInfo.getLocalID()); + blockJson.put(JSON_PROPERTY_BLOCK_LENGTH, locationInfo.getLength()); + blockJson.put(JSON_PROPERTY_BLOCK_OFFSET, locationInfo.getOffset()); BlockID blockID = locationInfo.getBlockID(); Map blockReplicasWithoutChecksum = @@ -186,12 +181,10 @@ private void downloadReplicasAndCreateManifest( replica : block.getValue().entrySet()) { DatanodeDetails datanode = replica.getKey(); - JsonObject replicaJson = new JsonObject(); + ObjectNode replicaJson = JsonUtils.createObjectNode(null); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, - datanode.getHostName()); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, - datanode.getUuidString()); + replicaJson.put(JSON_PROPERTY_REPLICA_HOSTNAME, datanode.getHostName()); + replicaJson.put(JSON_PROPERTY_REPLICA_UUID, datanode.getUuidString()); String fileName = keyName + "_block" + blockIndex + "_" + datanode.getHostName(); @@ -202,8 +195,7 @@ private void downloadReplicasAndCreateManifest( Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { Throwable cause = e.getCause(); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, - e.getMessage()); + replicaJson.put(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage()); if (cause instanceof OzoneChecksumException) { try (InputStream is = getReplica( blockReplicasWithoutChecksum, datanode)) { @@ -213,11 +205,10 @@ private void downloadReplicasAndCreateManifest( } replicasJson.add(replicaJson); } - blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); + blockJson.set(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); blocks.add(blockJson); - blockReplicasWithoutChecksum.values() - .forEach(each -> IOUtils.close(LOG, each)); + IOUtils.close(LOG, blockReplicasWithoutChecksum.values()); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 8afe064299f2..f9dccadb1124 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -265,7 +265,7 @@ private void shutdown() { */ private void reportAnyFailure() { if (failureCounter.get() > 0) { - throw new RuntimeException("One ore more freon test is failed."); + throw new RuntimeException("One or more freon test is failed."); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java new file mode 100644 index 000000000000..1d1b898a7d95 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

    + * http://www.apache.org/licenses/LICENSE-2.0 + *

    + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.freon; + +import com.codahale.metrics.Timer; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.client.ClientTrustManager; +import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider; +import org.apache.hadoop.hdds.utils.HAUtils; +import org.apache.hadoop.ozone.OzoneSecurityUtil; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.ozone.common.PayloadUtils.generatePayloadBytes; + +/** + * Utility to generate RPC request to DN. + */ +@Command(name = "dn-echo", + aliases = "dne", + description = + "Generate echo RPC request to DataNode", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true, + showDefaultValues = true) +public class DNRPCLoadGenerator extends BaseFreonGenerator + implements Callable { + + private static final int RPC_PAYLOAD_MULTIPLICATION_FACTOR = 1024; + private static final int MAX_SIZE_KB = 2097151; + private Timer timer; + private OzoneConfiguration configuration; + private ByteString payloadReqBytes; + private int payloadRespSize; + private List clients; + private String encodedContainerToken; + @Option(names = {"--payload-req"}, + description = + "Specifies the size of payload in KB in RPC request. ", + defaultValue = "0") + private int payloadReqSizeKB = 0; + + @Option(names = {"--payload-resp"}, + description = + "Specifies the size of payload in KB in RPC response. ", + defaultValue = "0") + private int payloadRespSizeKB = 0; + + @Option(names = {"--container-id"}, + description = "Send echo to DataNodes associated with this container") + private long containerID; + + @Option(names = {"--sleep-time-ms"}, + description = "Let DataNode to pause for a duration (in milliseconds) for each request", + defaultValue = "0") + private int sleepTimeMs = 0; + + @Option(names = {"--clients"}, + description = "number of xceiver clients", + defaultValue = "1") + private int numClients = 1; + + @CommandLine.ParentCommand + private Freon freon; + + // empy constructor for picocli + DNRPCLoadGenerator() { + } + + @VisibleForTesting + DNRPCLoadGenerator(OzoneConfiguration ozoneConfiguration) { + this.configuration = ozoneConfiguration; + } + + @Override + public Void call() throws Exception { + Preconditions.checkArgument(payloadReqSizeKB >= 0, + "OM echo request payload size should be positive value or zero."); + Preconditions.checkArgument(payloadRespSizeKB >= 0, + "OM echo response payload size should be positive value or zero."); + + if (configuration == null) { + configuration = freon.createOzoneConfiguration(); + } + ContainerOperationClient scmClient = new ContainerOperationClient(configuration); + ContainerInfo containerInfo = scmClient.getContainer(containerID); + + List pipelineList = scmClient.listPipelines(); + Pipeline pipeline = pipelineList.stream() + .filter(p -> p.getId().equals(containerInfo.getPipelineID())) + .findFirst() + .orElse(null); + encodedContainerToken = scmClient.getEncodedContainerToken(containerID); + XceiverClientFactory xceiverClientManager; + if (OzoneSecurityUtil.isSecurityEnabled(configuration)) { + CACertificateProvider caCerts = () -> HAUtils.buildCAX509List(null, configuration); + xceiverClientManager = new XceiverClientManager(configuration, + configuration.getObject(XceiverClientManager.ScmClientConfig.class), + new ClientTrustManager(caCerts, null)); + } else { + xceiverClientManager = new XceiverClientManager(configuration); + } + clients = new ArrayList<>(numClients); + for (int i = 0; i < numClients; i++) { + clients.add(xceiverClientManager.acquireClient(pipeline)); + } + + init(); + payloadReqBytes = UnsafeByteOperations.unsafeWrap(generatePayloadBytes(payloadReqSizeKB)); + payloadRespSize = calculateMaxPayloadSize(payloadRespSizeKB); + timer = getMetrics().timer("rpc-payload"); + try { + runTests(this::sendRPCReq); + } finally { + for (XceiverClientSpi client : clients) { + xceiverClientManager.releaseClient(client, false); + } + xceiverClientManager.close(); + scmClient.close(); + } + return null; + } + + private int calculateMaxPayloadSize(int payloadSizeKB) { + if (payloadSizeKB > 0) { + return Math.min( + Math.toIntExact((long)payloadSizeKB * + RPC_PAYLOAD_MULTIPLICATION_FACTOR), + MAX_SIZE_KB); + } + return 0; + } + + private void sendRPCReq(long l) throws Exception { + timer.time(() -> { + int clientIndex = (numClients == 1) ? 0 : (int)l % numClients; + ContainerProtos.EchoResponseProto response = + ContainerProtocolCalls.echo(clients.get(clientIndex), encodedContainerToken, + containerID, payloadReqBytes, payloadRespSize, sleepTimeMs); + return null; + }); + } +} + + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java index b290da2da1f5..2bbf8b6d5b24 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -193,7 +193,7 @@ private ChecksumData computeChecksum(ContainerCommandResponseProto response) throws OzoneChecksumException { ContainerProtos.ReadChunkResponseProto readChunk = response.getReadChunk(); if (readChunk.hasData()) { - return checksum.computeChecksum(readChunk.getData().toByteArray()); + return checksum.computeChecksum(readChunk.getData().asReadOnlyByteBuffer()); } else { return checksum.computeChecksum( readChunk.getDataBuffers().getBuffersList()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index 20800757b1aa..2b178ac0aec2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.freon; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DatanodeVersion; @@ -43,6 +42,7 @@ import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; @@ -186,7 +186,7 @@ public Void call() throws Exception { } catch (InterruptedException e) { throw new RuntimeException(e); } - scmClients.values().forEach(IOUtils::closeQuietly); + IOUtils.closeQuietly(scmClients.values()); IOUtils.closeQuietly(reconClient); LOGGER.info("Successfully closed all the used resources"); saveDatanodesToFile(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index bd5510695fa1..349887a776d3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -73,7 +73,8 @@ OzoneClientKeyReadWriteListOps.class, RangeKeysGenerator.class, DatanodeSimulator.class, - OmMetadataGenerator.class + OmMetadataGenerator.class, + DNRPCLoadGenerator.class }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java index 958df4c11a14..90807a0e6fe2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java @@ -19,7 +19,6 @@ import com.codahale.metrics.Timer; import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; @@ -27,6 +26,8 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Option; +import static org.apache.hadoop.ozone.common.PayloadUtils.generatePayloadBytes; + /** * Utility to generate RPC request to OM with or without payload. */ @@ -88,8 +89,7 @@ public Void call() throws Exception { } init(); - payloadReqBytes = RandomUtils.nextBytes( - calculateMaxPayloadSize(payloadReqSizeKB)); + payloadReqBytes = generatePayloadBytes(payloadReqSizeKB); payloadRespSize = calculateMaxPayloadSize(payloadRespSizeKB); timer = getMetrics().timer("rpc-payload"); try { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java index 3a43ddd8ab09..dbca12c8b26d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java @@ -60,7 +60,6 @@ import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import com.codahale.metrics.Timer; @@ -111,6 +110,7 @@ public class GeneratorDatanode extends BaseGenerator { private int overlap; private ChunkManager chunkManager; + private BlockManagerImpl blockManager; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; @@ -133,7 +133,7 @@ public Void call() throws Exception { config = createOzoneConfiguration(); - BlockManager blockManager = new BlockManagerImpl(config); + blockManager = new BlockManagerImpl(config); chunkManager = ChunkManagerFactory .createChunkManager(config, blockManager, null); @@ -286,7 +286,7 @@ public void generateData(long index) throws Exception { writtenBytes += currentChunkSize; } - BlockManagerImpl.persistPutBlock(container, blockData, config, true); + blockManager.persistPutBlock(container, blockData, true); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java index b8509d60c9cd..7390488c8158 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java @@ -156,10 +156,10 @@ private void writeOmBucketVolume() throws IOException { .setQuotaInBytes(100L) .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.WORLD, "", - IAccessAuthorizer.ACLType.ALL, ACCESS)) + ACCESS, IAccessAuthorizer.ACLType.ALL)) .addOzoneAcls( new OzoneAcl(IAccessAuthorizer.ACLIdentityType.USER, getUserId(), - IAccessAuthorizer.ACLType.ALL, ACCESS) + ACCESS, IAccessAuthorizer.ACLType.ALL) ).build(); volTable.put("/" + volumeName, omVolumeArgs); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java new file mode 100644 index 000000000000..3bbbded58028 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +/** + * Ozone Repair Command line tool. + */ +@CommandLine.Command(name = "ozone repair", + description = "Operational tool to repair Ozone", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneRepair extends GenericCli { + + private OzoneConfiguration ozoneConf; + + public OzoneRepair() { + super(OzoneRepair.class); + } + + @VisibleForTesting + public OzoneRepair(OzoneConfiguration configuration) { + super(OzoneRepair.class); + this.ozoneConf = configuration; + } + + public OzoneConfiguration getOzoneConf() { + if (ozoneConf == null) { + ozoneConf = createOzoneConfiguration(); + } + return ozoneConf; + } + + /** + * Main for the Ozone Repair shell Command handling. + * + * @param argv - System Args Strings[] + * @throws Exception + */ + public static void main(String[] argv) throws Exception { + new OzoneRepair().run(argv); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java new file mode 100644 index 000000000000..0f36934ec14d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Ozone Repair CLI for RocksDB. + */ +@CommandLine.Command(name = "ldb", + description = "Operational tool to repair RocksDB table.") +@MetaInfServices(SubcommandWithParent.class) +public class RDBRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Database File Path") + private String dbPath; + + public String getDbPath() { + return dbPath; + } + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } + + @Override + public Class getParentType() { + return OzoneRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java new file mode 100644 index 000000000000..ec5e2f8f9366 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.repair.RDBRepair; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; +import picocli.CommandLine.Model.CommandSpec; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; + +/** + * Tool to repair snapshotInfoTable in case it has corrupted entries. + */ +@CommandLine.Command( + name = "snapshot", + description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." +) +@MetaInfServices(SubcommandWithParent.class) +public class SnapshotRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private static CommandSpec spec; + + @CommandLine.ParentCommand + private RDBRepair parent; + + @CommandLine.Mixin + private BucketUri bucketUri; + + @CommandLine.Parameters(description = "Snapshot name to update", index = "1") + private String snapshotName; + + @CommandLine.Option(names = {"--global-previous", "--gp"}, + required = true, + description = "Global previous snapshotId to set for the given snapshot") + private UUID globalPreviousSnapshotId; + + @CommandLine.Option(names = {"--path-previous", "--pp"}, + required = true, + description = "Path previous snapshotId to set for the given snapshot") + private UUID pathPreviousSnapshotId; + + @CommandLine.Option(names = {"--dry-run"}, + required = true, + description = "To dry-run the command.", defaultValue = "true") + private boolean dryRun; + + @Override + public Void call() throws Exception { + List cfHandleList = new ArrayList<>(); + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); + + try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(), cfDescList, cfHandleList)) { + ColumnFamilyHandle snapshotInfoCfh = getSnapshotInfoCfh(cfHandleList); + if (snapshotInfoCfh == null) { + System.err.println(SNAPSHOT_INFO_TABLE + " is not in a column family in DB for the given path."); + return null; + } + + String snapshotInfoTableKey = SnapshotInfo.getTableKey(bucketUri.getValue().getVolumeName(), + bucketUri.getValue().getBucketName(), snapshotName); + + SnapshotInfo snapshotInfo = getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey); + if (snapshotInfo == null) { + System.err.println(snapshotName + " does not exist for given bucketUri: " + OM_KEY_PREFIX + + bucketUri.getValue().getVolumeName() + OM_KEY_PREFIX + bucketUri.getValue().getBucketName()); + return null; + } + + // snapshotIdSet is the set of the all existed snapshots ID to make that the provided global previous and path + // previous exist and after the update snapshot does not point to ghost snapshot. + Set snapshotIdSet = getSnapshotIdSet(db, snapshotInfoCfh); + + if (Objects.equals(snapshotInfo.getSnapshotId(), globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (Objects.equals(snapshotInfo.getSnapshotId(), pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (!snapshotIdSet.contains(globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + if (!snapshotIdSet.contains(pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + snapshotInfo.setGlobalPreviousSnapshotId(globalPreviousSnapshotId); + snapshotInfo.setPathPreviousSnapshotId(pathPreviousSnapshotId); + + if (dryRun) { + System.out.println("SnapshotInfo would be updated to : " + snapshotInfo); + } else { + byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); + db.get() + .put(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes); + + System.out.println("Snapshot Info is updated to : " + + getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey)); + } + } catch (RocksDBException exception) { + System.err.println("Failed to update the RocksDB for the given path: " + parent.getDbPath()); + System.err.println( + "Make sure that Ozone entity (OM, SCM or DN) is not running for the give dbPath and current host."); + System.err.println(exception); + } finally { + IOUtils.closeQuietly(cfHandleList); + } + + return null; + } + + private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh) + throws IOException { + Set snapshotIdSet = new HashSet<>(); + try (ManagedRocksIterator iterator = new ManagedRocksIterator(db.get().newIterator(snapshotInfoCfh))) { + iterator.get().seekToFirst(); + + while (iterator.get().isValid()) { + SnapshotInfo snapshotInfo = SnapshotInfo.getCodec().fromPersistedFormat(iterator.get().value()); + snapshotIdSet.add(snapshotInfo.getSnapshotId()); + iterator.get().next(); + } + } + return snapshotIdSet; + } + + private ColumnFamilyHandle getSnapshotInfoCfh(List cfHandleList) throws RocksDBException { + byte[] nameBytes = SNAPSHOT_INFO_TABLE.getBytes(StandardCharsets.UTF_8); + + for (ColumnFamilyHandle cf : cfHandleList) { + if (Arrays.equals(cf.getName(), nameBytes)) { + return cf; + } + } + + return null; + } + + private SnapshotInfo getSnapshotInfo(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh, String snapshotInfoLKey) + throws IOException, RocksDBException { + byte[] bytes = db.get().get(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoLKey)); + return bytes != null ? SnapshotInfo.getCodec().fromPersistedFormat(bytes) : null; + } + + @Override + public Class getParentType() { + return RDBRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java new file mode 100644 index 000000000000..9e2324a4a6f8 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * OM related repair tools. + */ +package org.apache.hadoop.ozone.repair.om; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java new file mode 100644 index 000000000000..bd382d04cf79 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Ozone Repair tools. + */ +package org.apache.hadoop.ozone.repair; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index d1a6a4e156fd..4c795f1e82b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -41,6 +41,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT; /** * Executes Delete Key. @@ -68,6 +70,12 @@ protected void execute(OzoneClient client, OzoneAddress address) return; } + if (bucket.getBucketLayout().isLegacy() && keyName.endsWith(OZONE_URI_DELIMITER) + && (getConf().getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT))) { + out().printf("Use FS(ofs/o3fs) interface to delete legacy bucket directory %n"); + return; + } + if (bucket.getBucketLayout().isFileSystemOptimized()) { // Handle FSO delete key which supports trash also deleteFSOKey(bucket, keyName); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java new file mode 100644 index 000000000000..63b61b1ec662 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell.snapshot; + +import java.io.IOException; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import picocli.CommandLine; + +/** + * ozone sh snapshot rename. + */ +@CommandLine.Command(name = "rename", + description = "Rename a snapshot") +public class RenameSnapshotHandler extends Handler { + + @CommandLine.Mixin + private BucketUri snapshotPath; + + @CommandLine.Parameters(description = "Current snapshot name", + index = "1", arity = "1") + private String snapshotOldName; + + @CommandLine.Parameters(description = "New snapshot name", + index = "2", arity = "1") + private String snapshotNewName; + + @Override + protected OzoneAddress getAddress() { + return snapshotPath.getValue(); + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + String volumeName = snapshotPath.getValue().getVolumeName(); + String bucketName = snapshotPath.getValue().getBucketName(); + OmUtils.validateSnapshotName(snapshotNewName); + client.getObjectStore() + .renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + if (isVerbose()) { + out().format("Renamed snapshot from'%s' to %s under '%s/%s'.%n", + snapshotOldName, snapshotNewName, volumeName, bucketName); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index cf513b9e913f..25a3c1c66fe9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -43,7 +43,8 @@ ListSnapshotHandler.class, SnapshotDiffHandler.class, ListSnapshotDiffHandler.class, - InfoSnapshotHandler.class + InfoSnapshotHandler.class, + RenameSnapshotHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java index c9b58064fb56..be8b4ceed173 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java @@ -17,11 +17,10 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExtendedUserAccessIdInfo; @@ -71,39 +70,32 @@ protected void execute(OzoneClient client, OzoneAddress address) if (!printJson) { out().println("User '" + userPrincipal + "' is assigned to:"); accessIdInfoList.forEach(accessIdInfo -> { - // Get admin info - final String adminInfoString; - if (accessIdInfo.getIsAdmin()) { - adminInfoString = accessIdInfo.getIsDelegatedAdmin() ? - " delegated admin" : " admin"; - } else { - adminInfoString = ""; - } + final String adminInfoString = accessIdInfo.getIsAdmin() ? + (accessIdInfo.getIsDelegatedAdmin() ? " delegated admin" : + " admin") : ""; out().format("- Tenant '%s'%s with accessId '%s'%n", accessIdInfo.getTenantId(), adminInfoString, accessIdInfo.getAccessId()); }); } else { + ObjectNode resObj = JsonUtils.createObjectNode(null); + resObj.put("user", userPrincipal); - final JsonObject resObj = new JsonObject(); - resObj.addProperty("user", userPrincipal); - - final JsonArray arr = new JsonArray(); + ArrayNode arr = JsonUtils.createArrayNode(); accessIdInfoList.forEach(accessIdInfo -> { - final JsonObject tenantObj = new JsonObject(); - tenantObj.addProperty("accessId", accessIdInfo.getAccessId()); - tenantObj.addProperty("tenantId", accessIdInfo.getTenantId()); - tenantObj.addProperty("isAdmin", accessIdInfo.getIsAdmin()); - tenantObj.addProperty("isDelegatedAdmin", - accessIdInfo.getIsDelegatedAdmin()); + ObjectNode tenantObj = JsonUtils.createObjectNode(null); + tenantObj.put("accessId", accessIdInfo.getAccessId()); + tenantObj.put("tenantId", accessIdInfo.getTenantId()); + tenantObj.put("isAdmin", accessIdInfo.getIsAdmin()); + tenantObj.put("isDelegatedAdmin", accessIdInfo.getIsDelegatedAdmin()); arr.add(tenantObj); }); - resObj.add("tenants", arr); - - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resObj)); + resObj.set("tenants", arr); + String prettyJson = + JsonUtils.toJsonStringWithDefaultPrettyPrinter(resObj); + out().println(prettyJson); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java index 364fd21233b0..041b559608e2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -55,14 +54,14 @@ protected void execute(OzoneClient client, OzoneAddress address) client.getObjectStore().tenantAssignAdmin(accessId, tenantId, delegated); if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("accessId", accessId); - obj.addProperty("tenantId", tenantId); - obj.addProperty("isAdmin", true); - obj.addProperty("isDelegatedAdmin", delegated); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); - } + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("accessId", accessId); + obj.put("tenantId", tenantId); + obj.put("isAdmin", true); + obj.put("isDelegatedAdmin", delegated); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); + } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java index fd6c4109604c..1eac7685be76 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.TenantArgs; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -58,11 +57,11 @@ protected void execute(OzoneClient client, OzoneAddress address) // RpcClient#createTenant prints INFO level log of tenant and volume name if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantId); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); - } + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantId); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); + } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java index 9924ac827ae2..c5e43e27a42c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -60,13 +59,13 @@ protected void execute(OzoneClient client, OzoneAddress address) } if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantId); - obj.addProperty("volumeName", resp.getVolumeName()); - obj.addProperty("volumeRefCount", resp.getVolRefCount()); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantId); + obj.put("volumeName", resp.getVolumeName()); + obj.put("volumeRefCount", resp.getVolRefCount()); // Print raw response to stderr if verbose - out().println(gson.toJson(obj)); + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java index 6f0428bd7b8c..3201eb456396 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java @@ -17,10 +17,9 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -50,23 +49,21 @@ protected void execute(OzoneClient client, OzoneAddress address) tenantStateList.getTenantStateList().forEach(tenantState -> out().println(tenantState.getTenantId())); } else { - final JsonArray resArray = new JsonArray(); + ArrayNode resArray = JsonUtils.createArrayNode(); tenantStateList.getTenantStateList().forEach(tenantState -> { - final JsonObject obj = new JsonObject(); - obj.addProperty("tenantId", tenantState.getTenantId()); - obj.addProperty("bucketNamespaceName", - tenantState.getBucketNamespaceName()); - obj.addProperty("userRoleName", tenantState.getUserRoleName()); - obj.addProperty("adminRoleName", tenantState.getAdminRoleName()); - obj.addProperty("bucketNamespacePolicyName", + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("tenantId", tenantState.getTenantId()); + obj.put("bucketNamespaceName", tenantState.getBucketNamespaceName()); + obj.put("userRoleName", tenantState.getUserRoleName()); + obj.put("adminRoleName", tenantState.getAdminRoleName()); + obj.put("bucketNamespacePolicyName", tenantState.getBucketNamespacePolicyName()); - obj.addProperty("bucketPolicyName", - tenantState.getBucketPolicyName()); + obj.put("bucketPolicyName", tenantState.getBucketPolicyName()); resArray.add(obj); }); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resArray)); + // Serialize and print the JSON string with pretty printing + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(resArray); + out().println(jsonString); } - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java index e27a8cecd861..ae56f0ba16f7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java @@ -20,10 +20,9 @@ import java.io.IOException; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; +import org.apache.hadoop.hdds.server.JsonUtils; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserList; import org.apache.hadoop.ozone.shell.OzoneAddress; @@ -66,15 +65,15 @@ protected void execute(OzoneClient client, OzoneAddress address) "' with accessId '" + accessIdInfo.getAccessId() + "'"); }); } else { - final JsonArray resArray = new JsonArray(); + ArrayNode resArray = JsonUtils.createArrayNode(); usersInTenant.getUserAccessIds().forEach(accessIdInfo -> { - final JsonObject obj = new JsonObject(); - obj.addProperty("user", accessIdInfo.getUserPrincipal()); - obj.addProperty("accessId", accessIdInfo.getAccessId()); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("user", accessIdInfo.getUserPrincipal()); + obj.put("accessId", accessIdInfo.getAccessId()); resArray.add(obj); }); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(resArray)); + String prettyJsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(resArray); + out().println(prettyJsonString); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java index 419628246fe6..671864931a6f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java @@ -17,9 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonObject; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.shell.OzoneAddress; import picocli.CommandLine; @@ -48,14 +47,14 @@ protected void execute(OzoneClient client, OzoneAddress address) client.getObjectStore().tenantRevokeAdmin(accessId, tenantId); if (isVerbose()) { - final JsonObject obj = new JsonObject(); - obj.addProperty("accessId", accessId); - obj.addProperty("tenantId", tenantId); - obj.addProperty("isAdmin", false); - obj.addProperty("isDelegatedAdmin", false); - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - out().println(gson.toJson(obj)); + ObjectNode obj = JsonUtils.createObjectNode(null); + obj.put("accessId", accessId); + obj.put("tenantId", tenantId); + obj.put("isAdmin", false); + obj.put("isDelegatedAdmin", false); + + String jsonString = JsonUtils.toJsonStringWithDefaultPrettyPrinter(obj); + out().println(jsonString); } - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index e380e98561b0..8cc80502386f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -121,6 +121,11 @@ private void deleteVolumeRecursive() totalBucketCount++; } doCleanBuckets(); + // Reset counters and bucket list + numberOfBucketsCleaned.set(0); + totalBucketCount = 0; + cleanedBucketCounter.set(0); + bucketIdList.clear(); } /** @@ -201,6 +206,7 @@ public void run() { if (!cleanOBSBucket(bucket)) { throw new RuntimeException("Failed to clean bucket"); } + break; default: throw new RuntimeException("Invalid bucket layout"); } diff --git a/pom.xml b/pom.xml index 37dfb139e2cc..90f5667ae2ea 100644 --- a/pom.xml +++ b/pom.xml @@ -94,7 +94,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs UTF-8 UTF-8 - 1.5 + 3.2.2 bash @@ -106,8 +106,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 9.4.53.v20231009 5.2.0 4.2.0 - _ - _ 4 @@ -116,17 +114,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.0 1.15 3.2.2 - 1.25.0 - 2.8.0 - 1.5.2-5 + 1.26.0 + 2.10.1 + 1.5.6-2 1.0.13 - 2.11.0 + 2.16.0 3.14.0 1.2 1.1 3.6.1 3.10.0 - 2.6.0 1.11.0 1.6 1.5 @@ -139,14 +136,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.3.2 3.12.2 - 5.0.4 0.8.0.RELEASE 1.77 - 3.3.0 10.14.2.0 3.0.2 3.2.4 - 0.8.5 + 0.8.12 3.21.0-GA 1.2.2 2.3.3 @@ -160,12 +155,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.41 + 2.42 1.9.13 1.9.13 - 2.13.4.20221013 + 2.16.1 5.4.0 @@ -175,20 +170,20 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.6.1 - 4.5.13 + 4.5.14 4.4.16 - 2.0.10 + 2.0.12 2.17.1 - 3.4.2 + 3.4.4 1.2.25 1.9.22 - 1.8 + 1.11 4.7.5 0.16.0 - 0.9.11 + 0.10.2 1.7 @@ -199,32 +194,29 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.1 1.1.1 - 3.0.0 - 3.1.12 + 3.1.12.2 2.1.7 4.12.0 4.2.2 2.6.1 2.1.1 - 2.12.5 + 2.12.7 0.19 2.2.0 32.0.0-jre - 5.1.0 - 2.9.0 + 6.0.0 + 2.10.1 - 1.0 2.7.5 3.6.0 4.11.0 2.2 - 1.24 - 5.10.1 - 3.7.2 + 5.10.2 + 3.8.4 - 0.5.1 + 0.6.1 3.19.6 1.7.1 @@ -251,8 +243,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError - flaky | slow | unhealthy - 3.0.0-M5 + flaky | native | slow | unhealthy + 3.0.0-M4 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} @@ -260,7 +252,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.9.0 3.1.1 3.1.0 - 3.5.1 + 3.5.2 3.3.0 3.4.0 3.3.0 @@ -268,14 +260,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.5 3.4.1 3.0.1 - 3.6.0 + 3.7.1 0.16.1 - 2.8.1 - 1.9 + 3.1.1 + 3.5.0 3.6.1 4.2.2 - 0.29.0 - 1.3.1 + 0.44.0 + 3.1.1 2.3.0 1.0-beta-1 1.0-alpha-11 @@ -284,7 +276,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1.0 9.3 1200 - 1.12.632 + 1.12.661 1.15.0 @@ -297,22 +289,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.9.7 1.14.0 - 2.4.0 - 1.0.8 - 1.2.13 - 1.9.3 - 1.1.8 - 1.4.9 - 1.0.1 + 2.5.0 + 1.4.0 5.3.27 3.11.10 5.1.0 + 1.2.1 + 3.9.6 + 1.1.10.5 + 1.2.0 + 9.37.2 - @@ -345,44 +336,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-annotations ${hadoop.version} - - org.apache.hadoop - hadoop-client-modules - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-api - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-check-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-check-test-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-integration-tests - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-runtime - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-minicluster - ${hadoop.version} - org.apache.hadoop hadoop-common @@ -405,11 +358,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-nfs - ${hadoop.version} - org.apache.hadoop hadoop-hdfs @@ -420,237 +368,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-hdfs-client ${hadoop.version} - - org.apache.hadoop - hadoop-hdfs-rbf - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - test-jar - - - org.apache.hadoop - hadoop-mapreduce-client-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-api - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-core - ${hadoop.version} - - org.apache.hadoop hadoop-mapreduce-client-jobclient ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-shuffle - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn - ${hadoop.version} - pom - - - - org.apache.hadoop - hadoop-yarn-server - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-web-proxy - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-tests - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-registry - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-nodemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-applicationhistoryservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-applications-distributedshell - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - test-jar - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-router - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-hs - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-examples - ${hadoop.version} - - - org.apache.hadoop - hadoop-gridmix - ${hadoop.version} - - - - org.apache.hadoop - hadoop-streaming - ${hadoop.version} - - - org.apache.hadoop - hadoop-archives - ${hadoop.version} - - - org.apache.hadoop - hadoop-archive-logs - ${hadoop.version} + test org.apache.hadoop @@ -663,70 +385,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-datajoin - ${hadoop.version} - - - org.apache.hadoop - hadoop-rumen - ${hadoop.version} - - - org.apache.hadoop - hadoop-extras - ${hadoop.version} - - org.apache.hadoop hadoop-client ${hadoop.version} - - - org.apache.hadoop - hadoop-minicluster - ${hadoop.version} - - org.apache.hadoop hadoop-minikdc ${hadoop.version} - - - org.apache.hadoop - hadoop-openstack - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure-datalake - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aws - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aliyun - ${hadoop.version} - - org.apache.hadoop hadoop-kms @@ -738,11 +406,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - com.google.guava guava ${guava.version} + + + com.google.code.findbugs + jsr305 + + com.google.code.gson @@ -789,11 +462,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs commons-net ${commons-net.version} - - org.apache.commons - commons-pool2 - ${commons-pool2.version} - commons-validator commons-validator @@ -946,12 +614,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs jersey-media-jaxb ${jersey2.version} - - - org.ow2.asm - asm - ${asm.version} - com.sun.jersey jersey-core @@ -1006,13 +668,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs bonecp ${bonecp.version} - - - cglib - cglib - ${cglib.version} - - com.sun.jersey.contribs jersey-guice @@ -1234,6 +889,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.zookeeper zookeeper ${zookeeper.version} + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + org.slf4j @@ -1273,11 +938,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs pom import - - org.jmockit - jmockit - ${jmockit.version} - org.mockito mockito-core @@ -1293,11 +953,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs compile-testing ${compile-testing.version} - - org.objenesis - objenesis - ${objenesis.version} - com.google.re2j re2j @@ -1403,11 +1058,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-cloud-storage ${hadoop.version} - - com.google.code.findbugs - jsr305 - ${findbugs.version} - jakarta.xml.bind jakarta.xml.bind-api @@ -1471,6 +1121,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.reflections reflections ${reflections.version} + + + com.google.code.findbugs + jsr305 + + org.rocksdb @@ -1552,6 +1208,21 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs mockito-inline ${mockito.version} + + org.xerial.snappy + snappy-java + ${snappy-java.version} + + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + ${hadoop-shaded-guava.version} + + + com.nimbusds + nimbus-jose-jwt + ${com.nimbusds.nimbus-jose-jwt.version} + @@ -2026,11 +1697,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs **/Test*.java - - **/${test.exclude}.java - ${test.exclude.pattern} - **/Test*$*.java - @@ -2091,6 +1757,25 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + org.codehaus.mojo + properties-maven-plugin + ${properties.maven.plugin.version} + + + org.apache.maven + maven-core + ${maven.core.version} + + + + + org.apache.rat + apache-rat-plugin + + dev-support/rat/rat-exclusions.txt + + @@ -2243,6 +1928,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + go-offline + + void + true + true + true + true + true + + client @@ -2260,6 +1956,24 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + container + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.hdds.scm.container.** + org.apache.hadoop.ozone.container.** + + ${unstable-test-groups} + + + + + om @@ -2271,6 +1985,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.om.** + + org.apache.hadoop.ozone.om.snapshot.** + ${unstable-test-groups} @@ -2278,7 +1995,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - contract + snapshot @@ -2286,7 +2003,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.contract.** + org.apache.hadoop.ozone.om.snapshot.** ${unstable-test-groups} @@ -2303,11 +2020,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.** + org.apache.hadoop.fs.** - - org.apache.hadoop.fs.ozone.contract.** - ${unstable-test-groups} @@ -2325,6 +2039,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.hdds.** + + org.apache.hadoop.hdds.scm.container.** + ${unstable-test-groups} @@ -2340,13 +2057,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin + org.apache.ozone.** org.apache.hadoop.ozone.** org.apache.hadoop.ozone.client.** + org.apache.hadoop.ozone.container.** org.apache.hadoop.ozone.debug.** org.apache.hadoop.ozone.freon.** org.apache.hadoop.ozone.om.** + org.apache.hadoop.ozone.recon.** org.apache.hadoop.ozone.shell.** ${unstable-test-groups} @@ -2355,6 +2075,23 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + recon + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.ozone.recon.** + + ${unstable-test-groups} + + + + + shell @@ -2397,7 +2134,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin flaky - slow | unhealthy + native | slow | unhealthy @@ -2412,6 +2149,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin native + slow | unhealthy