diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b1df2254c587..e4c9431caa9a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -434,12 +434,14 @@ jobs: matrix: profile: - client - - contract + - container - filesystem - hdds - om - ozone + - recon - shell + - snapshot - flaky fail-fast: false steps: diff --git a/LICENSE.txt b/LICENSE.txt index 021266844b82..8a367a318628 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -213,8 +213,6 @@ Apache License 2.0 hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.css.map hadoop-hdds/framework/src/main/resources/webapps/static/nvd3-1.8.5.min.js -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java -hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java BSD 3-Clause diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index a21a4c387b88..69ca1d9f99f6 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -108,7 +108,7 @@ load bats-assert/load.bash @test "integration and unit: java change" { run dev-support/ci/selective_ci_checks.sh 9aebf6e25 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -120,7 +120,7 @@ load bats-assert/load.bash @test "integration and unit: script change" { run dev-support/ci/selective_ci_checks.sh c6850484f - assert_output -p 'basic-checks=["rat","bats","unit"]' + assert_output -p 'basic-checks=["rat","bats"]' assert_output -p needs-build=false assert_output -p needs-compile=false assert_output -p needs-compose-tests=false @@ -132,7 +132,7 @@ load bats-assert/load.bash @test "script change including junit.sh" { run dev-support/ci/selective_ci_checks.sh 66093e52c6 - assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","bats","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -144,19 +144,19 @@ load bats-assert/load.bash @test "unit only" { run dev-support/ci/selective_ci_checks.sh 1dd1d0ba3 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false assert_output -p needs-dependency-check=false - assert_output -p needs-integration-tests=false + assert_output -p needs-integration-tests=true assert_output -p needs-kubernetes-tests=false } @test "unit helper" { run dev-support/ci/selective_ci_checks.sh 88383d1d5 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=false @@ -189,20 +189,17 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } -# disabled, because this test fails if -# hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java -# is not present in the current tree (i.e. if file is renamed, moved or deleted) -#@test "native test in other module" { -# run dev-support/ci/selective_ci_checks.sh 7d01cc14a6 -# -# assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native","unit"]' -# assert_output -p needs-build=true -# assert_output -p needs-compile=true -# assert_output -p needs-compose-tests=false -# assert_output -p needs-dependency-check=false -# assert_output -p needs-integration-tests=false -# assert_output -p needs-kubernetes-tests=false -#} +@test "native test in other module" { + run dev-support/ci/selective_ci_checks.sh 822c0dee1a + + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","native"]' + assert_output -p needs-build=true + assert_output -p needs-compile=true + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} @test "kubernetes only" { run dev-support/ci/selective_ci_checks.sh 5336bb9bd @@ -231,7 +228,7 @@ load bats-assert/load.bash @test "main/java change" { run dev-support/ci/selective_ci_checks.sh 86a771dfe - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -243,7 +240,7 @@ load bats-assert/load.bash @test "..../java change" { run dev-support/ci/selective_ci_checks.sh 01c616536 - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -255,7 +252,7 @@ load bats-assert/load.bash @test "java and compose change" { run dev-support/ci/selective_ci_checks.sh d0f0f806e - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -267,7 +264,7 @@ load bats-assert/load.bash @test "java and docs change" { run dev-support/ci/selective_ci_checks.sh 2c0adac26 - assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","docs","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -279,7 +276,7 @@ load bats-assert/load.bash @test "pom change" { run dev-support/ci/selective_ci_checks.sh 9129424a9 - assert_output -p 'basic-checks=["rat","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","checkstyle","findbugs"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -291,7 +288,7 @@ load bats-assert/load.bash @test "CI lib change" { run dev-support/ci/selective_ci_checks.sh ceb79acaa - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -303,7 +300,7 @@ load bats-assert/load.bash @test "CI workflow change" { run dev-support/ci/selective_ci_checks.sh 90a8d7c01 - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true @@ -328,7 +325,7 @@ load bats-assert/load.bash @test "CI workflow change (ci.yaml)" { run dev-support/ci/selective_ci_checks.sh 90fd5f2adc - assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat","unit"]' + assert_output -p 'basic-checks=["author","bats","checkstyle","docs","findbugs","native","rat"]' assert_output -p needs-build=true assert_output -p needs-compile=true assert_output -p needs-compose-tests=true diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 3cfeaa4a6ece..bb0faa962e46 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -263,18 +263,10 @@ function get_count_integration_files() { "^hadoop-ozone/integration-test" "^hadoop-ozone/fault-injection-test/mini-chaos-tests" "src/test/java" + "src/test/resources" ) - # Ozone's unit test naming convention: Test*.java - # The following makes this filter ignore all tests except those in - # integration-test and fault-injection-test. - # Directories starting with `i` under hadoop-ozone need to be listed - # explicitly, other subdirectories are captured by the second item. local ignore_array=( - "^hadoop-hdds/.*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/[a-eghj-z].*/src/test/java/.*/Test.*.java" - "^hadoop-ozone/insight/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-client/src/test/java/.*/Test.*.java" - "^hadoop-ozone/interface-storage/src/test/java/.*/Test.*.java" + $(grep -Flr 'org.apache.ozone.test.tag.Native' hadoop-ozone/integration-test) ) filter_changed_files true COUNT_INTEGRATION_CHANGED_FILES=${match_count} @@ -456,29 +448,6 @@ function check_needs_native() { start_end::group_end } -function check_needs_unit_test() { - start_end::group_start "Check if unit test is needed" - local pattern_array=( - "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" - "^hadoop-ozone/dev-support/checks/unit.sh" - "^hadoop-ozone/dev-support/checks/junit.sh" - "src/test/java" - "src/test/resources" - ) - local ignore_array=( - "^hadoop-ozone/dist" - "^hadoop-ozone/fault-injection-test/mini-chaos-tests" - "^hadoop-ozone/integration-test" - ) - filter_changed_files true - - if [[ ${match_count} != "0" ]]; then - add_basic_check unit - fi - - start_end::group_end -} - # Counts other files which do not need to trigger any functional test # (i.e. no compose/integration/kubernetes) function get_count_misc_files() { @@ -502,6 +471,7 @@ function get_count_misc_files() { "^hadoop-ozone/dev-support/checks/_mvn_unit_report.sh" "^hadoop-ozone/dev-support/checks/acceptance.sh" "^hadoop-ozone/dev-support/checks/integration.sh" + "^hadoop-ozone/dev-support/checks/junit.sh" "^hadoop-ozone/dev-support/checks/kubernetes.sh" ) filter_changed_files true @@ -534,7 +504,6 @@ function calculate_test_types_to_run() { compose_tests_needed=true integration_tests_needed=true kubernetes_tests_needed=true - add_basic_check unit else echo "All ${COUNT_ALL_CHANGED_FILES} changed files are known to be handled by specific checks." echo @@ -614,6 +583,5 @@ check_needs_dependency check_needs_docs check_needs_findbugs check_needs_native -check_needs_unit_test calculate_test_types_to_run set_outputs diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index d51dfa416313..422943fff042 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -27,6 +27,7 @@ import org.apache.hadoop.metrics2.lib.Interns; import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.ozone.OzoneConsts; import java.util.Map; @@ -51,6 +52,11 @@ public final class ContainerClientMetrics { private MutableCounterLong totalWriteChunkCalls; @Metric private MutableCounterLong totalWriteChunkBytes; + private MutableQuantiles[] listBlockLatency; + private MutableQuantiles[] getBlockLatency; + private MutableQuantiles[] getCommittedBlockLengthLatency; + private MutableQuantiles[] readChunkLatency; + private MutableQuantiles[] getSmallFileLatency; private final Map writeChunkCallsByPipeline; private final Map writeChunkBytesByPipeline; private final Map writeChunksCallsByLeaders; @@ -84,6 +90,36 @@ private ContainerClientMetrics() { writeChunkCallsByPipeline = new ConcurrentHashMap<>(); writeChunkBytesByPipeline = new ConcurrentHashMap<>(); writeChunksCallsByLeaders = new ConcurrentHashMap<>(); + + listBlockLatency = new MutableQuantiles[3]; + getBlockLatency = new MutableQuantiles[3]; + getCommittedBlockLengthLatency = new MutableQuantiles[3]; + readChunkLatency = new MutableQuantiles[3]; + getSmallFileLatency = new MutableQuantiles[3]; + int[] intervals = {60, 300, 900}; + for (int i = 0; i < intervals.length; i++) { + int interval = intervals[i]; + listBlockLatency[i] = registry + .newQuantiles("listBlockLatency" + interval + + "s", "ListBlock latency in microseconds", "ops", + "latency", interval); + getBlockLatency[i] = registry + .newQuantiles("getBlockLatency" + interval + + "s", "GetBlock latency in microseconds", "ops", + "latency", interval); + getCommittedBlockLengthLatency[i] = registry + .newQuantiles("getCommittedBlockLengthLatency" + interval + + "s", "GetCommittedBlockLength latency in microseconds", + "ops", "latency", interval); + readChunkLatency[i] = registry + .newQuantiles("readChunkLatency" + interval + + "s", "ReadChunk latency in microseconds", "ops", + "latency", interval); + getSmallFileLatency[i] = registry + .newQuantiles("getSmallFileLatency" + interval + + "s", "GetSmallFile latency in microseconds", "ops", + "latency", interval); + } } public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { @@ -111,7 +147,48 @@ public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { totalWriteChunkBytes.incr(chunkSizeBytes); } - MutableCounterLong getTotalWriteChunkBytes() { + public void addListBlockLatency(long latency) { + for (MutableQuantiles q : listBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetBlockLatency(long latency) { + for (MutableQuantiles q : getBlockLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetCommittedBlockLengthLatency(long latency) { + for (MutableQuantiles q : getCommittedBlockLengthLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addReadChunkLatency(long latency) { + for (MutableQuantiles q : readChunkLatency) { + if (q != null) { + q.add(latency); + } + } + } + + public void addGetSmallFileLatency(long latency) { + for (MutableQuantiles q : getSmallFileLatency) { + if (q != null) { + q.add(latency); + } + } + } + + @VisibleForTesting + public MutableCounterLong getTotalWriteChunkBytes() { return totalWriteChunkBytes; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 4af9009e16a1..d1992ac931e5 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -256,6 +256,14 @@ public enum ChecksumCombineMode { tags = ConfigTag.CLIENT) private boolean incrementalChunkList = true; + @Config(key = "stream.putblock.piggybacking", + defaultValue = "false", + type = ConfigType.BOOLEAN, + description = "Allow PutBlock to be piggybacked in WriteChunk " + + "requests if the chunk is small.", + tags = ConfigTag.CLIENT) + private boolean enablePutblockPiggybacking = false; + @PostConstruct public void validate() { Preconditions.checkState(streamBufferSize > 0); @@ -454,6 +462,14 @@ public String getFsDefaultBucketLayout() { return fsDefaultBucketLayout; } + public void setEnablePutblockPiggybacking(boolean enablePutblockPiggybacking) { + this.enablePutblockPiggybacking = enablePutblockPiggybacking; + } + + public boolean getEnablePutblockPiggybacking() { + return enablePutblockPiggybacking; + } + public boolean isDatastreamPipelineMode() { return datastreamPipelineMode; } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 52f435dc826d..cb2b85ef1e29 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.io.InterruptedIOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -42,6 +44,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc.XceiverClientProtocolServiceStub; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.client.ClientTrustManager; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -384,6 +387,12 @@ private XceiverClientReply sendCommandWithRetry( } } + boolean allInService = datanodeList.stream() + .allMatch(dn -> dn.getPersistedOpState() == NodeOperationalState.IN_SERVICE); + if (!allInService) { + datanodeList = sortDatanodeByOperationalState(datanodeList); + } + for (DatanodeDetails dn : datanodeList) { try { if (LOG.isDebugEnabled()) { @@ -440,13 +449,37 @@ private XceiverClientReply sendCommandWithRetry( LOG.debug(message + " on the pipeline {}.", processForDebug(request), pipeline); } else { - LOG.error(message + " on the pipeline {}.", + LOG.warn(message + " on the pipeline {}.", request.getCmdType(), pipeline); } throw ioException; } } + private static List sortDatanodeByOperationalState( + List datanodeList) { + List sortedDatanodeList = new ArrayList<>(datanodeList); + // Make IN_SERVICE's Datanode precede all other State's Datanodes. + // This is a stable sort that does not change the order of the + // IN_SERVICE's Datanode. + Comparator byOpStateStable = (first, second) -> { + boolean firstInService = first.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + boolean secondInService = second.getPersistedOpState() == + NodeOperationalState.IN_SERVICE; + + if (firstInService == secondInService) { + return 0; + } else if (firstInService) { + return -1; + } else { + return 1; + } + }; + sortedDatanodeList.sort(byOpStateStable); + return sortedDatanodeList; + } + @Override public XceiverClientReply sendCommandAsync( ContainerCommandRequestProto request) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index b66999de1fb2..374e90a24c76 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -21,6 +21,7 @@ import java.io.EOFException; import java.io.IOException; import java.io.InputStream; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -224,18 +225,25 @@ private boolean isConnectivityIssue(IOException ex) { } private void refreshBlockInfo(IOException cause) throws IOException { - LOG.info("Unable to read information for block {} from pipeline {}: {}", + LOG.info("Attempting to update pipeline and block token for block {} from pipeline {}: {}", blockID, pipelineRef.get().getId(), cause.getMessage()); if (refreshFunction != null) { LOG.debug("Re-fetching pipeline and block token for block {}", blockID); BlockLocationInfo blockLocationInfo = refreshFunction.apply(blockID); if (blockLocationInfo == null) { - LOG.debug("No new block location info for block {}", blockID); + LOG.warn("No new block location info for block {}", blockID); } else { - LOG.debug("New pipeline for block {}: {}", blockID, - blockLocationInfo.getPipeline()); setPipeline(blockLocationInfo.getPipeline()); + LOG.info("New pipeline for block {}: {}", blockID, + blockLocationInfo.getPipeline()); + tokenRef.set(blockLocationInfo.getToken()); + if (blockLocationInfo.getToken() != null) { + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier(); + tokenId.readFromByteArray(tokenRef.get().getIdentifier()); + LOG.info("A new token is added for block {}. Expiry: {}", + blockID, Instant.ofEpochMilli(tokenId.getExpiryDate())); + } } } else { throw cause; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 239800746c8b..f29bf490382f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -55,6 +55,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; + +import static org.apache.hadoop.hdds.DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.writeChunkAsync; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; @@ -140,6 +142,7 @@ public class BlockOutputStream extends OutputStream { private int replicationIndex; private Pipeline pipeline; private final ContainerClientMetrics clientMetrics; + private boolean allowPutBlockPiggybacking; /** * Creates a new BlockOutputStream. @@ -211,6 +214,20 @@ public BlockOutputStream( this.clientMetrics = clientMetrics; this.pipeline = pipeline; this.streamBufferArgs = streamBufferArgs; + this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && + allDataNodesSupportPiggybacking(); + } + + private boolean allDataNodesSupportPiggybacking() { + // return true only if all DataNodes in the pipeline are on a version + // that supports PutBlock piggybacking. + for (DatanodeDetails dn : pipeline.getNodes()) { + if (dn.getCurrentVersion() < + COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue()) { + return false; + } + } + return true; } void refreshCurrentBuffer() { @@ -499,22 +516,8 @@ ContainerCommandResponseProto> executePutBlock(boolean close, } // if the ioException is not set, putBlock is successful if (getIoException() == null && !force) { - BlockID responseBlockID = BlockID.getFromProtobuf( - e.getPutBlock().getCommittedBlockLength().getBlockID()); - Preconditions.checkState(blockID.get().getContainerBlockID() - .equals(responseBlockID.getContainerBlockID())); - // updates the bcsId of the block - blockID.set(responseBlockID); - if (LOG.isDebugEnabled()) { - LOG.debug( - "Adding index " + asyncReply.getLogIndex() + " flushLength " - + flushPos + " numBuffers " + byteBufferList.size() - + " blockID " + blockID + " bufferPool size" + bufferPool - .getSize() + " currentBufferIndex " + bufferPool - .getCurrentBufferIndex()); - } - // for standalone protocol, logIndex will always be 0. - updateCommitInfo(asyncReply, byteBufferList); + handleSuccessfulPutBlock(e.getPutBlock().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); } return e; }, responseExecutor).exceptionally(e -> { @@ -551,7 +554,7 @@ public void flush() throws IOException { } } - private void writeChunk(ChunkBuffer buffer) + private void writeChunkCommon(ChunkBuffer buffer) throws IOException { // This data in the buffer will be pushed to datanode and a reference will // be added to the bufferList. Once putBlock gets executed, this list will @@ -562,7 +565,18 @@ private void writeChunk(ChunkBuffer buffer) bufferList = new ArrayList<>(); } bufferList.add(buffer); - writeChunkToContainer(buffer.duplicate(0, buffer.position())); + } + + private void writeChunk(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), false); + } + + private void writeChunkAndPutBlock(ChunkBuffer buffer) + throws IOException { + writeChunkCommon(buffer); + writeChunkToContainer(buffer.duplicate(0, buffer.position()), true); } /** @@ -594,14 +608,23 @@ private void handleFlushInternal(boolean close) if (totalDataFlushedLength < writtenDataLength) { refreshCurrentBuffer(); Preconditions.checkArgument(currentBuffer.position() > 0); - if (currentBuffer.hasRemaining()) { - writeChunk(currentBuffer); - } + // This can be a partially filled chunk. Since we are flushing the buffer // here, we just limit this buffer to the current position. So that next // write will happen in new buffer - updateFlushLength(); - executePutBlock(close, false); + if (currentBuffer.hasRemaining()) { + if (allowPutBlockPiggybacking) { + updateFlushLength(); + writeChunkAndPutBlock(currentBuffer); + } else { + writeChunk(currentBuffer); + updateFlushLength(); + executePutBlock(close, false); + } + } else { + updateFlushLength(); + executePutBlock(close, false); + } } else if (close) { // forcing an "empty" putBlock if stream is being closed without new // data since latest flush - we need to send the "EOF" flag @@ -713,7 +736,7 @@ public boolean isClosed() { * @return */ CompletableFuture writeChunkToContainer( - ChunkBuffer chunk) throws IOException { + ChunkBuffer chunk, boolean putBlockPiggybacking) throws IOException { int effectiveChunkSize = chunk.remaining(); final long offset = chunkOffset.getAndAdd(effectiveChunkSize); final ByteString data = chunk.toByteString( @@ -726,6 +749,8 @@ CompletableFuture writeChunkToContainer( .setChecksumData(checksumData.getProtoBufMessage()) .build(); + long flushPos = totalDataFlushedLength; + if (LOG.isDebugEnabled()) { LOG.debug("Writing chunk {} length {} at offset {}", chunkInfo.getChunkName(), effectiveChunkSize, offset); @@ -743,42 +768,93 @@ CompletableFuture writeChunkToContainer( + ", previous = " + previous); } + final List byteBufferList; + CompletableFuture + validateFuture = null; try { - XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, - blockID.get(), data, tokenString, replicationIndex); - CompletableFuture - respFuture = asyncReply.getResponse(); - CompletableFuture - validateFuture = respFuture.thenApplyAsync(e -> { - try { - validateResponse(e); - } catch (IOException sce) { - respFuture.completeExceptionally(sce); - } - return e; - }, responseExecutor).exceptionally(e -> { - String msg = "Failed to write chunk " + chunkInfo.getChunkName() + - " into block " + blockID; - LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); - CompletionException ce = new CompletionException(msg, e); - setIoException(ce); - throw ce; - }); + BlockData blockData = null; + if (config.getIncrementalChunkList()) { updateBlockDataForWriteChunk(chunk); } else { containerBlockData.addChunks(chunkInfo); } + if (putBlockPiggybacking) { + Preconditions.checkNotNull(bufferList); + byteBufferList = bufferList; + bufferList = null; + Preconditions.checkNotNull(byteBufferList); + + blockData = containerBlockData.build(); + LOG.debug("piggyback chunk list {}", blockData); + + if (config.getIncrementalChunkList()) { + // remove any chunks in the containerBlockData list. + // since they are sent. + containerBlockData.clearChunks(); + } + } else { + byteBufferList = null; + } + XceiverClientReply asyncReply = writeChunkAsync(xceiverClient, chunkInfo, + blockID.get(), data, tokenString, replicationIndex, blockData); + CompletableFuture + respFuture = asyncReply.getResponse(); + validateFuture = respFuture.thenApplyAsync(e -> { + try { + validateResponse(e); + } catch (IOException sce) { + respFuture.completeExceptionally(sce); + } + // if the ioException is not set, putBlock is successful + if (getIoException() == null && putBlockPiggybacking) { + handleSuccessfulPutBlock(e.getWriteChunk().getCommittedBlockLength(), + asyncReply, flushPos, byteBufferList); + } + return e; + }, responseExecutor).exceptionally(e -> { + String msg = "Failed to write chunk " + chunkInfo.getChunkName() + + " into block " + blockID; + LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage()); + CompletionException ce = new CompletionException(msg, e); + setIoException(ce); + throw ce; + }); clientMetrics.recordWriteChunk(pipeline, chunkInfo.getLen()); - return validateFuture; + } catch (IOException | ExecutionException e) { throw new IOException(EXCEPTION_MSG + e.toString(), e); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); handleInterruptedException(ex, false); } - return null; + if (putBlockPiggybacking) { + putFlushFuture(flushPos, validateFuture); + } + return validateFuture; + } + + private void handleSuccessfulPutBlock( + ContainerProtos.GetCommittedBlockLengthResponseProto e, + XceiverClientReply asyncReply, long flushPos, + List byteBufferList) { + BlockID responseBlockID = BlockID.getFromProtobuf( + e.getBlockID()); + Preconditions.checkState(blockID.get().getContainerBlockID() + .equals(responseBlockID.getContainerBlockID())); + // updates the bcsId of the block + blockID.set(responseBlockID); + if (LOG.isDebugEnabled()) { + LOG.debug( + "Adding index " + asyncReply.getLogIndex() + " flushLength " + + flushPos + " numBuffers " + byteBufferList.size() + + " blockID " + blockID + " bufferPool size" + bufferPool + .getSize() + " currentBufferIndex " + bufferPool + .getCurrentBufferIndex()); + } + // for standalone protocol, logIndex will always be 0. + updateCommitInfo(asyncReply, byteBufferList); } /** diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index adecc3e4c1e2..c8bfaf3e1bce 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -89,13 +89,14 @@ public ECBlockOutputStream( @Override public void write(byte[] b, int off, int len) throws IOException { this.currentChunkRspFuture = - writeChunkToContainer(ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len))); + writeChunkToContainer( + ChunkBuffer.wrap(ByteBuffer.wrap(b, off, len)), false); updateWrittenDataLength(len); } public CompletableFuture write( ByteBuffer buff) throws IOException { - return writeChunkToContainer(ChunkBuffer.wrap(buff)); + return writeChunkToContainer(ChunkBuffer.wrap(buff), false); } public CompletableFuture queryNode(HddsProtos.NodeOperationalState opState, * Allows a list of hosts to be decommissioned. The hosts are identified * by their hostname and optionally port in the format foo.com:port. * @param hosts A list of hostnames, optionally with port + * @param force true to forcefully decommission Datanodes * @throws IOException * @return A list of DatanodeAdminError for any hosts which failed to * decommission */ - List decommissionNodes(List hosts) + List decommissionNodes(List hosts, boolean force) throws IOException; /** @@ -356,13 +357,20 @@ Map> getSafeModeRuleStatuses() /** * Start ContainerBalancer. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index f6f013259c59..1f3d0f02e6de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -30,6 +30,7 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.NavigableMap; +import java.util.Objects; import java.util.TreeMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.locks.ReadWriteLock; @@ -232,10 +233,10 @@ public boolean contains(Node node) { private boolean containsNode(Node node) { Node parent = node.getParent(); - while (parent != null && parent != clusterTree) { + while (parent != null && !Objects.equals(parent, clusterTree)) { parent = parent.getParent(); } - return parent == clusterTree; + return Objects.equals(parent, clusterTree); } /** @@ -249,7 +250,9 @@ public boolean isSameAncestor(Node node1, Node node2, int ancestorGen) { } netlock.readLock().lock(); try { - return node1.getAncestor(ancestorGen) == node2.getAncestor(ancestorGen); + Node ancestor1 = node1.getAncestor(ancestorGen); + Node ancestor2 = node2.getAncestor(ancestorGen); + return Objects.equals(ancestor1, ancestor2); } finally { netlock.readLock().unlock(); } @@ -268,7 +271,7 @@ public boolean isSameParent(Node node1, Node node2) { try { node1 = node1.getParent(); node2 = node2.getParent(); - return node1 == node2; + return Objects.equals(node1, node2); } finally { netlock.readLock().unlock(); } @@ -713,8 +716,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, */ @Override public int getDistanceCost(Node node1, Node node2) { - if ((node1 != null && node1.equals(node2)) || - (node1 == null && node2 == null)) { + if (Objects.equals(node1, node2)) { return 0; } if (node1 == null || node2 == null) { @@ -736,12 +738,9 @@ public int getDistanceCost(Node node1, Node node2) { netlock.readLock().lock(); try { Node ancestor1 = node1.getAncestor(level1 - 1); - boolean node1Topology = (ancestor1 != null && clusterTree != null && - !ancestor1.equals(clusterTree)) || (ancestor1 != clusterTree); Node ancestor2 = node2.getAncestor(level2 - 1); - boolean node2Topology = (ancestor2 != null && clusterTree != null && - !ancestor2.equals(clusterTree)) || (ancestor2 != clusterTree); - if (node1Topology || node2Topology) { + if (!Objects.equals(ancestor1, clusterTree) || + !Objects.equals(ancestor2, clusterTree)) { LOG.debug("One of the nodes is outside of network topology"); return Integer.MAX_VALUE; } @@ -755,7 +754,7 @@ public int getDistanceCost(Node node1, Node node2) { level2--; cost += node2 == null ? 0 : node2.getCost(); } - while (node1 != null && node2 != null && !node1.equals(node2)) { + while (node1 != null && node2 != null && !Objects.equals(node1, node2)) { node1 = node1.getParent(); node2 = node2.getParent(); cost += node1 == null ? 0 : node1.getCost(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 663f317a3b3b..90838366317f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -245,7 +245,7 @@ List queryNode(HddsProtos.NodeOperationalState opState, HddsProtos.Node queryNode(UUID uuid) throws IOException; - List decommissionNodes(List nodes) + List decommissionNodes(List nodes, boolean force) throws IOException; List recommissionNodes(List nodes) @@ -402,13 +402,20 @@ Map> getSafeModeRuleStatuses() * @return {@link StartContainerBalancerResponseProto} that contains the * start status and an optional message. */ + @SuppressWarnings("checkstyle:parameternumber") StartContainerBalancerResponseProto startContainerBalancer( Optional threshold, Optional iterations, Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException; + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException; /** * Stop ContainerBalancer. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index 86336e9bc7b6..d3f39c023b73 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadContainerResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ListBlockResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.ozone.common.ChunkBuffer; @@ -213,6 +214,28 @@ public static ContainerCommandResponseProto getPutFileResponseSuccess( .build(); } + /** + * Gets a response for the WriteChunk RPC. + * @param msg - ContainerCommandRequestProto + * @return - ContainerCommandResponseProto + */ + public static ContainerCommandResponseProto getWriteChunkResponseSuccess( + ContainerCommandRequestProto msg, BlockData blockData) { + + WriteChunkResponseProto.Builder writeChunk = + WriteChunkResponseProto.newBuilder(); + if (blockData != null) { + writeChunk.setCommittedBlockLength( + getCommittedBlockLengthResponseBuilder( + blockData.getSize(), blockData.getBlockID())); + + } + return getSuccessResponseBuilder(msg) + .setCmdType(Type.WriteChunk) + .setWriteChunk(writeChunk) + .build(); + } + /** * Gets a response to the read small file call. * @param request - Msg diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 1453ae56b4f5..5f94f6d08474 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -29,6 +29,9 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -132,6 +135,10 @@ public static ListBlockResponseProto listBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = @@ -150,14 +157,17 @@ static T tryEachDatanode(Pipeline pipeline, try { return op.apply(d); } catch (IOException e) { + Span span = GlobalTracer.get().activeSpan(); if (e instanceof StorageContainerException) { StorageContainerException sce = (StorageContainerException)e; // Block token expired. There's no point retrying other DN. // Throw the exception to request a new block token right away. if (sce.getResult() == BLOCK_TOKEN_VERIFICATION_FAILED) { + span.log("block token verification failed at DN " + d); throw e; } } + span.log("failed to connect to DN " + d); excluded.add(d); if (excluded.size() < pipeline.size()) { LOG.warn(toErrorMessage.apply(d) @@ -215,6 +225,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails datanode) throws IOException { + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } final ContainerCommandRequestProto request = builder .setDatanodeUuid(datanode.getUuidString()).build(); ContainerCommandResponseProto response = @@ -250,6 +264,10 @@ private static GetBlockResponseProto getBlock(XceiverClientSpi xceiverClient, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = xceiverClient.sendCommand(request, getValidatorList()); @@ -353,10 +371,19 @@ public static ContainerProtos.ReadChunkResponseProto readChunk( builder.setEncodedToken(token.encodeToUrlString()); } - return tryEachDatanode(xceiverClient.getPipeline(), - d -> readChunk(xceiverClient, chunk, blockID, - validators, builder, d), - d -> toErrorMessage(chunk, blockID, d)); + Span span = GlobalTracer.get() + .buildSpan("readChunk").start(); + try (Scope ignored = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", chunk.getOffset()) + .setTag("length", chunk.getLen()) + .setTag("block", blockID.toString()); + return tryEachDatanode(xceiverClient.getPipeline(), + d -> readChunk(xceiverClient, chunk, blockID, + validators, builder, d), + d -> toErrorMessage(chunk, blockID, d)); + } finally { + span.finish(); + } } private static ContainerProtos.ReadChunkResponseProto readChunk( @@ -364,10 +391,15 @@ private static ContainerProtos.ReadChunkResponseProto readChunk( List validators, ContainerCommandRequestProto.Builder builder, DatanodeDetails d) throws IOException { - final ContainerCommandRequestProto request = builder - .setDatanodeUuid(d.getUuidString()).build(); + ContainerCommandRequestProto.Builder requestBuilder = builder + .setDatanodeUuid(d.getUuidString()); + Span span = GlobalTracer.get().activeSpan(); + String traceId = TracingUtil.exportSpan(span); + if (traceId != null) { + requestBuilder = requestBuilder.setTraceID(traceId); + } ContainerCommandResponseProto reply = - xceiverClient.sendCommand(request, validators); + xceiverClient.sendCommand(requestBuilder.build(), validators); final ReadChunkResponseProto response = reply.getReadChunk(); final long readLen = getLen(response); if (readLen != chunk.getLen()) { @@ -406,8 +438,10 @@ static long getLen(ReadChunkResponseProto response) { */ public static XceiverClientReply writeChunkAsync( XceiverClientSpi xceiverClient, ChunkInfo chunk, BlockID blockID, - ByteString data, String tokenString, int replicationIndex) + ByteString data, String tokenString, + int replicationIndex, BlockData blockData) throws IOException, ExecutionException, InterruptedException { + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto.newBuilder() .setBlockID(DatanodeBlockID.newBuilder() @@ -418,6 +452,12 @@ public static XceiverClientReply writeChunkAsync( .build()) .setChunkData(chunk) .setData(data); + if (blockData != null) { + PutBlockRequestProto.Builder createBlockRequest = + PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + writeChunkRequest.setBlock(createBlockRequest); + } String id = xceiverClient.getPipeline().getFirstNode().getUuidString(); ContainerCommandRequestProto.Builder builder = ContainerCommandRequestProto.newBuilder() @@ -549,6 +589,11 @@ public static void createContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } + request.setCmdType(ContainerProtos.Type.CreateContainer); request.setContainerID(containerID); request.setCreateContainer(createRequest.build()); @@ -578,6 +623,10 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -600,6 +649,10 @@ public static void closeContainer(XceiverClientSpi client, if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } client.sendCommand(request.build(), getValidatorList()); } @@ -623,6 +676,10 @@ public static ReadContainerResponseProto readContainer( if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } ContainerCommandResponseProto response = client.sendCommand(request.build(), getValidatorList()); @@ -658,6 +715,10 @@ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); ContainerCommandResponseProto response = client.sendCommand(request, getValidatorList()); @@ -763,6 +824,10 @@ public static List toValidatorList(Validator validator) { if (token != null) { builder.setEncodedToken(token.encodeToUrlString()); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + builder.setTraceID(traceId); + } ContainerCommandRequestProto request = builder.build(); Map responses = xceiverClient.sendCommandOnAllNodes(request); @@ -788,6 +853,10 @@ public static List toValidatorList(Validator validator) { if (encodedToken != null) { request.setEncodedToken(encodedToken); } + String traceId = TracingUtil.exportCurrentSpan(); + if (traceId != null) { + request.setTraceID(traceId); + } Map responses = client.sendCommandOnAllNodes(request.build()); for (Map.Entry entry : diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java index 538dd522d019..489cf3c41ce0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java @@ -20,9 +20,11 @@ import java.io.File; import java.io.IOException; +import java.util.HashMap; import java.util.List; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.MappingIterator; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; @@ -81,6 +83,14 @@ public static JsonNode readTree(String content) throws IOException { return MAPPER.readTree(content); } + public static List> readTreeAsListOfMaps(String json) + throws IOException { + return MAPPER.readValue(json, + new TypeReference>>() { + }); + } + + /** * Utility to sequentially write a large collection of items to a file. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java index b968d407232c..29bd847319ea 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java @@ -139,6 +139,16 @@ public static boolean isTracingEnabled( ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); } + /** + * Execute {@code runnable} inside an activated new span. + */ + public static void executeInNewSpan(String spanName, + CheckedRunnable runnable) throws E { + Span span = GlobalTracer.get() + .buildSpan(spanName).start(); + executeInSpan(span, runnable); + } + /** * Execute {@code supplier} inside an activated new span. */ diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml index 0791ffb9eab0..f68fa91db864 100644 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml @@ -15,18 +15,6 @@ limitations under the License. --> - - - - - - - - - - - - diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index f4ffc4ef278e..9eb5b909ccea 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -432,11 +432,10 @@ public TransactionContext startTransaction(RaftClientRequest request) if (!blockAlreadyFinalized) { // create the log entry proto final WriteChunkRequestProto commitWriteChunkProto = - WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) - .setChunkData(write.getChunkData()) + WriteChunkRequestProto.newBuilder(write) // skipping the data field as it is // already set in statemachine data proto + .clearData() .build(); ContainerCommandRequestProto commitContainerCommandProto = ContainerCommandRequestProto diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java deleted file mode 100644 index 0a2375b4f44e..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AbstractFuture.java +++ /dev/null @@ -1,1295 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.annotations.Beta; -import com.google.common.annotations.GwtCompatible; -import com.google.common.base.Preconditions; -import static com.google.common.base.Preconditions.checkNotNull; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListeningExecutorService; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.SettableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater - .newUpdater; - -import jakarta.annotation.Nullable; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executor; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; -import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy; -import java.util.concurrent.locks.LockSupport; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * An abstract implementation of {@link ListenableFuture}, intended for - * advanced users only. More common ways to create a {@code ListenableFuture} - * include instantiating a {@link SettableFuture}, submitting a task to a - * {@link ListeningExecutorService}, and deriving a {@code Future} from an - * existing one, typically using methods like {@link Futures#transform - * (ListenableFuture, com.google.common.base.Function) Futures.transform} - * and its overloaded versions. - *

- *

This class implements all methods in {@code ListenableFuture}. - * Subclasses should provide a way to set the result of the computation - * through the protected methods {@link #set(Object)}, - * {@link #setFuture(ListenableFuture)} and {@link #setException(Throwable)}. - * Subclasses may also override {@link #interruptTask()}, which will be - * invoked automatically if a call to {@link #cancel(boolean) cancel(true)} - * succeeds in canceling the future. Subclasses should rarely override other - * methods. - */ - -@GwtCompatible(emulated = true) -public abstract class AbstractFuture implements ListenableFuture { - // NOTE: Whenever both tests are cheap and functional, it's faster to use &, - // | instead of &&, || - - private static final boolean GENERATE_CANCELLATION_CAUSES = - Boolean.parseBoolean( - System.getProperty("guava.concurrent.generate_cancellation_cause", - "false")); - - /** - * A less abstract subclass of AbstractFuture. This can be used to optimize - * setFuture by ensuring that {@link #get} calls exactly the implementation - * of {@link AbstractFuture#get}. - */ - abstract static class TrustedFuture extends AbstractFuture { - @Override - public final V get() throws InterruptedException, ExecutionException { - return super.get(); - } - - @Override - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return super.get(timeout, unit); - } - - @Override - public final boolean isDone() { - return super.isDone(); - } - - @Override - public final boolean isCancelled() { - return super.isCancelled(); - } - - @Override - public final void addListener(Runnable listener, Executor executor) { - super.addListener(listener, executor); - } - - @Override - public final boolean cancel(boolean mayInterruptIfRunning) { - return super.cancel(mayInterruptIfRunning); - } - } - - // Logger to log exceptions caught when running listeners. - private static final Logger LOG = Logger - .getLogger(AbstractFuture.class.getName()); - - // A heuristic for timed gets. If the remaining timeout is less than this, - // spin instead of - // blocking. This value is what AbstractQueuedSynchronizer uses. - private static final long SPIN_THRESHOLD_NANOS = 1000L; - - private static final AtomicHelper ATOMIC_HELPER; - - static { - AtomicHelper helper; - - try { - helper = new UnsafeAtomicHelper(); - } catch (Throwable unsafeFailure) { - // catch absolutely everything and fall through to our 'SafeAtomicHelper' - // The access control checks that ARFU does means the caller class has - // to be AbstractFuture - // instead of SafeAtomicHelper, so we annoyingly define these here - try { - helper = - new SafeAtomicHelper( - newUpdater(Waiter.class, Thread.class, "thread"), - newUpdater(Waiter.class, Waiter.class, "next"), - newUpdater(AbstractFuture.class, Waiter.class, "waiters"), - newUpdater(AbstractFuture.class, Listener.class, "listeners"), - newUpdater(AbstractFuture.class, Object.class, "value")); - } catch (Throwable atomicReferenceFieldUpdaterFailure) { - // Some Android 5.0.x Samsung devices have bugs in JDK reflection APIs - // that cause getDeclaredField to throw a NoSuchFieldException when - // the field is definitely there. - // For these users fallback to a suboptimal implementation, based on - // synchronized. This will be a definite performance hit to those users. - LOG.log(Level.SEVERE, "UnsafeAtomicHelper is broken!", unsafeFailure); - LOG.log( - Level.SEVERE, "SafeAtomicHelper is broken!", - atomicReferenceFieldUpdaterFailure); - helper = new SynchronizedHelper(); - } - } - ATOMIC_HELPER = helper; - - // Prevent rare disastrous classloading in first call to LockSupport.park. - // See: https://bugs.openjdk.java.net/browse/JDK-8074773 - @SuppressWarnings("unused") - Class ensureLoaded = LockSupport.class; - } - - /** - * Waiter links form a Treiber stack, in the {@link #waiters} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Waiter { - static final Waiter TOMBSTONE = new Waiter(false /* ignored param */); - - @Nullable volatile Thread thread; - @Nullable volatile Waiter next; - - /** - * Constructor for the TOMBSTONE, avoids use of ATOMIC_HELPER in case this - * class is loaded before the ATOMIC_HELPER. Apparently this is possible - * on some android platforms. - */ - Waiter(boolean unused) { - } - - Waiter() { - // avoid volatile write, write is made visible by subsequent CAS on - // waiters field - ATOMIC_HELPER.putThread(this, Thread.currentThread()); - } - - // non-volatile write to the next field. Should be made visible by - // subsequent CAS on waiters field. - void setNext(Waiter next) { - ATOMIC_HELPER.putNext(this, next); - } - - void unpark() { - // This is racy with removeWaiter. The consequence of the race is that - // we may spuriously call unpark even though the thread has already - // removed itself from the list. But even if we did use a CAS, that - // race would still exist (it would just be ever so slightly smaller). - Thread w = thread; - if (w != null) { - thread = null; - LockSupport.unpark(w); - } - } - } - - /** - * Marks the given node as 'deleted' (null waiter) and then scans the list - * to unlink all deleted nodes. This is an O(n) operation in the common - * case (and O(n^2) in the worst), but we are saved by two things. - *

    - *
  • This is only called when a waiting thread times out or is - * interrupted. Both of which should be rare. - *
  • The waiters list should be very short. - *
- */ - private void removeWaiter(Waiter node) { - node.thread = null; // mark as 'deleted' - restart: - while (true) { - Waiter pred = null; - Waiter curr = waiters; - if (curr == Waiter.TOMBSTONE) { - return; // give up if someone is calling complete - } - Waiter succ; - while (curr != null) { - succ = curr.next; - if (curr.thread != null) { // we aren't unlinking this node, update - // pred. - pred = curr; - } else if (pred != null) { // We are unlinking this node and it has a - // predecessor. - pred.next = succ; - if (pred.thread == null) { // We raced with another node that - // unlinked pred. Restart. - continue restart; - } - } else if (!ATOMIC_HELPER - .casWaiters(this, curr, succ)) { // We are unlinking head - continue restart; // We raced with an add or complete - } - curr = succ; - } - break; - } - } - - /** - * Listeners also form a stack through the {@link #listeners} field. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Listener { - static final Listener TOMBSTONE = new Listener(null, null); - final Runnable task; - final Executor executor; - - // writes to next are made visible by subsequent CAS's on the listeners - // field - @Nullable Listener next; - - Listener(Runnable task, Executor executor) { - this.task = task; - this.executor = executor; - } - } - - /** - * A special value to represent {@code null}. - */ - private static final Object NULL = new Object(); - - /** - * A special value to represent failure, when {@link #setException} is - * called successfully. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Failure { - static final Failure FALLBACK_INSTANCE = - new Failure( - new Throwable("Failure occurred while trying to finish a future.") { - @Override - public synchronized Throwable fillInStackTrace() { - return this; // no stack trace - } - }); - final Throwable exception; - - Failure(Throwable exception) { - this.exception = checkNotNull(exception); - } - } - - /** - * A special value to represent cancellation and the 'wasInterrupted' bit. - */ - @SuppressWarnings("visibilitymodifier") - private static final class Cancellation { - final boolean wasInterrupted; - @Nullable final Throwable cause; - - Cancellation(boolean wasInterrupted, @Nullable Throwable cause) { - this.wasInterrupted = wasInterrupted; - this.cause = cause; - } - } - - /** - * A special value that encodes the 'setFuture' state. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SetFuture implements Runnable { - final AbstractFuture owner; - final ListenableFuture future; - - SetFuture(AbstractFuture owner, ListenableFuture future) { - this.owner = owner; - this.future = future; - } - - @Override - public void run() { - if (owner.value != this) { - // nothing to do, we must have been cancelled, don't bother inspecting - // the future. - return; - } - Object valueToSet = getFutureValue(future); - if (ATOMIC_HELPER.casValue(owner, this, valueToSet)) { - complete(owner); - } - } - } - - /** - * This field encodes the current state of the future. - *

- *

The valid values are: - *

    - *
  • {@code null} initial state, nothing has happened. - *
  • {@link Cancellation} terminal state, {@code cancel} was called. - *
  • {@link Failure} terminal state, {@code setException} was called. - *
  • {@link SetFuture} intermediate state, {@code setFuture} was called. - *
  • {@link #NULL} terminal state, {@code set(null)} was called. - *
  • Any other non-null value, terminal state, {@code set} was called with - * a non-null argument. - *
- */ - private volatile Object value; - - /** - * All listeners. - */ - private volatile Listener listeners; - - /** - * All waiting threads. - */ - private volatile Waiter waiters; - - /** - * Constructor for use by subclasses. - */ - protected AbstractFuture() { - } - - // Gets and Timed Gets - // - // * Be responsive to interruption - // * Don't create Waiter nodes if you aren't going to park, this helps - // reduce contention on the waiters field. - // * Future completion is defined by when #value becomes non-null/non - // SetFuture - // * Future completion can be observed if the waiters field contains a - // TOMBSTONE - - // Timed Get - // There are a few design constraints to consider - // * We want to be responsive to small timeouts, unpark() has non trivial - // latency overheads (I have observed 12 micros on 64 bit linux systems to - // wake up a parked thread). So if the timeout is small we shouldn't park(). - // This needs to be traded off with the cpu overhead of spinning, so we use - // SPIN_THRESHOLD_NANOS which is what AbstractQueuedSynchronizer uses for - // similar purposes. - // * We want to behave reasonably for timeouts of 0 - // * We are more responsive to completion than timeouts. This is because - // parkNanos depends on system scheduling and as such we could either miss - // our deadline, or unpark() could be delayed so that it looks like we - // timed out even though we didn't. For comparison FutureTask respects - // completion preferably and AQS is non-deterministic (depends on where in - // the queue the waiter is). If we wanted to be strict about it, we could - // store the unpark() time in the Waiter node and we could use that to make - // a decision about whether or not we timed out prior to being unparked. - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get(long timeout, TimeUnit unit) - throws InterruptedException, TimeoutException, ExecutionException { - // NOTE: if timeout < 0, remainingNanos will be < 0 and we will fall into - // the while(true) loop at the bottom and throw a timeoutexception. - long remainingNanos = unit - .toNanos(timeout); // we rely on the implicit null check on unit. - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - // we delay calling nanoTime until we know we will need to either park or - // spin - final long endNanos = remainingNanos > 0 ? System - .nanoTime() + remainingNanos : 0; - long_wait_loop: - if (remainingNanos >= SPIN_THRESHOLD_NANOS) { - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - while (true) { - LockSupport.parkNanos(this, remainingNanos); - // Check interruption first, if we woke up due to interruption - // we need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - - // timed out? - remainingNanos = endNanos - System.nanoTime(); - if (remainingNanos < SPIN_THRESHOLD_NANOS) { - // Remove the waiter, one way or another we are done parking - // this thread. - removeWaiter(node); - break long_wait_loop; // jump down to the busy wait loop - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - // If we get here then we have remainingNanos < SPIN_THRESHOLD_NANOS and - // there is no node on the waiters list - while (remainingNanos > 0) { - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - if (Thread.interrupted()) { - throw new InterruptedException(); - } - remainingNanos = endNanos - System.nanoTime(); - } - throw new TimeoutException(); - } - - /* - * Improve the documentation of when InterruptedException is thrown. Our - * behavior matches the JDK's, but the JDK's documentation is misleading. - */ - - /** - * {@inheritDoc} - *

- *

The default {@link AbstractFuture} implementation throws {@code - * InterruptedException} if the current thread is interrupted before or - * during the call, even if the value is already available. - * - * @throws InterruptedException if the current thread was interrupted - * before or during the call - * (optional but recommended). - * @throws CancellationException {@inheritDoc} - */ - @Override - public V get() throws InterruptedException, ExecutionException { - if (Thread.interrupted()) { - throw new InterruptedException(); - } - Object localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - Waiter oldHead = waiters; - if (oldHead != Waiter.TOMBSTONE) { - Waiter node = new Waiter(); - do { - node.setNext(oldHead); - if (ATOMIC_HELPER.casWaiters(this, oldHead, node)) { - // we are on the stack, now wait for completion. - while (true) { - LockSupport.park(this); - // Check interruption first, if we woke up due to interruption we - // need to honor that. - if (Thread.interrupted()) { - removeWaiter(node); - throw new InterruptedException(); - } - // Otherwise re-read and check doneness. If we loop then it must - // have been a spurious wakeup - localValue = value; - if (localValue != null & !(localValue instanceof SetFuture)) { - return getDoneValue(localValue); - } - } - } - oldHead = waiters; // re-read and loop. - } while (oldHead != Waiter.TOMBSTONE); - } - // re-read value, if we get here then we must have observed a TOMBSTONE - // while trying to add a waiter. - return getDoneValue(value); - } - - /** - * Unboxes {@code obj}. Assumes that obj is not {@code null} or a - * {@link SetFuture}. - */ - private V getDoneValue(Object obj) throws ExecutionException { - // While this seems like it might be too branch-y, simple benchmarking - // proves it to be unmeasurable (comparing done AbstractFutures with - // immediateFuture) - if (obj instanceof Cancellation) { - throw cancellationExceptionWithCause( - "Task was cancelled.", ((Cancellation) obj).cause); - } else if (obj instanceof Failure) { - throw new ExecutionException(((Failure) obj).exception); - } else if (obj == NULL) { - return null; - } else { - @SuppressWarnings("unchecked") // this is the only other option - V asV = (V) obj; - return asV; - } - } - - @Override - public boolean isDone() { - final Object localValue = value; - return localValue != null & !(localValue instanceof SetFuture); - } - - @Override - public boolean isCancelled() { - final Object localValue = value; - return localValue instanceof Cancellation; - } - - /** - * {@inheritDoc} - *

- *

If a cancellation attempt succeeds on a {@code Future} that had - * previously been {@linkplain#setFuture set asynchronously}, then the - * cancellation will also be propagated to the delegate {@code Future} that - * was supplied in the {@code setFuture} call. - */ - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - Object localValue = value; - boolean rValue = false; - if (localValue == null | localValue instanceof SetFuture) { - // Try to delay allocating the exception. At this point we may still - // lose the CAS, but it is certainly less likely. - Throwable cause = - GENERATE_CANCELLATION_CAUSES - ? new CancellationException("Future.cancel() was called.") - : null; - Object valueToSet = new Cancellation(mayInterruptIfRunning, cause); - AbstractFuture abstractFuture = this; - while (true) { - if (ATOMIC_HELPER.casValue(abstractFuture, localValue, valueToSet)) { - rValue = true; - // We call interuptTask before calling complete(), which is - // consistent with FutureTask - if (mayInterruptIfRunning) { - abstractFuture.interruptTask(); - } - complete(abstractFuture); - if (localValue instanceof SetFuture) { - // propagate cancellation to the future set in setfuture, this is - // racy, and we don't care if we are successful or not. - ListenableFuture futureToPropagateTo = ((SetFuture) localValue) - .future; - if (futureToPropagateTo instanceof TrustedFuture) { - // If the future is a TrustedFuture then we specifically avoid - // calling cancel() this has 2 benefits - // 1. for long chains of futures strung together with setFuture - // we consume less stack - // 2. we avoid allocating Cancellation objects at every level of - // the cancellation chain - // We can only do this for TrustedFuture, because - // TrustedFuture.cancel is final and does nothing but delegate - // to this method. - AbstractFuture trusted = (AbstractFuture) - futureToPropagateTo; - localValue = trusted.value; - if (localValue == null | localValue instanceof SetFuture) { - abstractFuture = trusted; - continue; // loop back up and try to complete the new future - } - } else { - // not a TrustedFuture, call cancel directly. - futureToPropagateTo.cancel(mayInterruptIfRunning); - } - } - break; - } - // obj changed, reread - localValue = abstractFuture.value; - if (!(localValue instanceof SetFuture)) { - // obj cannot be null at this point, because value can only change - // from null to non-null. So if value changed (and it did since we - // lost the CAS), then it cannot be null and since it isn't a - // SetFuture, then the future must be done and we should exit the loop - break; - } - } - } - return rValue; - } - - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a - * successful call to {@link #cancel(boolean) cancel(true)}. - *

- *

The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() { - } - - /** - * Returns true if this future was cancelled with {@code - * mayInterruptIfRunning} set to {@code true}. - * - * @since 14.0 - */ - protected final boolean wasInterrupted() { - final Object localValue = value; - return (localValue instanceof Cancellation) && ((Cancellation) localValue) - .wasInterrupted; - } - - /** - * {@inheritDoc} - * - * @since 10.0 - */ - @Override - public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); - Listener oldHead = listeners; - if (oldHead != Listener.TOMBSTONE) { - Listener newNode = new Listener(listener, executor); - do { - newNode.next = oldHead; - if (ATOMIC_HELPER.casListeners(this, oldHead, newNode)) { - return; - } - oldHead = listeners; // re-read - } while (oldHead != Listener.TOMBSTONE); - } - // If we get here then the Listener TOMBSTONE was set, which means the - // future is done, call the listener. - executeListener(listener, executor); - } - - /** - * Sets the result of this {@code Future} unless this {@code Future} has - * already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the {@code - * Future} may have previously been set asynchronously, in which case its - * result may not be known yet. That result, though not yet known, cannot - * be overridden by a call to a {@code set*} method, only by a call to - * {@link #cancel}. - * - * @param value the value to be used as the result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean set(@Nullable V val) { - Object valueToSet = value == null ? NULL : val; - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the failed result of this {@code Future} unless this {@code Future} - * has already been cancelled or set (including - * {@linkplain #setFuture set asynchronously}). When a call to this method - * returns, the {@code Future} is guaranteed to be - * {@linkplain #isDone done} only if the call was accepted (in which - * case it returns {@code true}). If it returns {@code false}, the - * {@code Future} may have previously been set asynchronously, in which case - * its result may not be known yet. That result, though not yet known, - * cannot be overridden by a call to a {@code set*} method, only by a call - * to {@link #cancel}. - * - * @param throwable the exception to be used as the failed result - * @return true if the attempt was accepted, completing the {@code Future} - */ - protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - complete(this); - return true; - } - return false; - } - - /** - * Sets the result of this {@code Future} to match the supplied input - * {@code Future} once the supplied {@code Future} is done, unless this - * {@code Future} has already been cancelled or set (including "set - * asynchronously," defined below). - *

- *

If the supplied future is {@linkplain #isDone done} when this method - * is called and the call is accepted, then this future is guaranteed to - * have been completed with the supplied future by the time this method - * returns. If the supplied future is not done and the call is accepted, then - * the future will be set asynchronously. Note that such a result, - * though not yet known, cannot be overridden by a call to a {@code set*} - * method, only by a call to {@link #cancel}. - *

- *

If the call {@code setFuture(delegate)} is accepted and this {@code - * Future} is later cancelled, cancellation will be propagated to {@code - * delegate}. Additionally, any call to {@code setFuture} after any - * cancellation will propagate cancellation to the supplied {@code Future}. - * - * @param future the future to delegate to - * @return true if the attempt was accepted, indicating that the {@code - * Future} was not previously cancelled or set. - * @since 19.0 - */ - @Beta - @SuppressWarnings("deadstore") - protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); - Object localValue = value; - if (localValue == null) { - if (future.isDone()) { - Object val = getFutureValue(future); - if (ATOMIC_HELPER.casValue(this, null, val)) { - complete(this); - return true; - } - return false; - } - SetFuture valueToSet = new SetFuture(this, future); - if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { - // the listener is responsible for calling completeWithFuture, - // directExecutor is appropriate since all we are doing is unpacking - // a completed future which should be fast. - try { - future.addListener(valueToSet, directExecutor()); - } catch (Throwable t) { - // addListener has thrown an exception! SetFuture.run can't throw - // any exceptions so this must have been caused by addListener - // itself. The most likely explanation is a misconfigured mock. Try - // to switch to Failure. - Failure failure; - try { - failure = new Failure(t); - } catch (Throwable oomMostLikely) { - failure = Failure.FALLBACK_INSTANCE; - } - // Note: The only way this CAS could fail is if cancel() has raced - // with us. That is ok. - boolean unused = ATOMIC_HELPER.casValue(this, valueToSet, failure); - } - return true; - } - localValue = value; // we lost the cas, fall through and maybe cancel - } - // The future has already been set to something. If it is cancellation we - // should cancel the incoming future. - if (localValue instanceof Cancellation) { - // we don't care if it fails, this is best-effort. - future.cancel(((Cancellation) localValue).wasInterrupted); - } - return false; - } - - /** - * Returns a value, suitable for storing in the {@link #value} field. From - * the given future, which is assumed to be done. - *

- *

This is approximately the inverse of {@link #getDoneValue(Object)} - */ - private static Object getFutureValue(ListenableFuture future) { - Object valueToSet; - if (future instanceof TrustedFuture) { - // Break encapsulation for TrustedFuture instances since we know that - // subclasses cannot override .get() (since it is final) and therefore - // this is equivalent to calling .get() and unpacking the exceptions - // like we do below (just much faster because it is a single field read - // instead of a read, several branches and possibly creating exceptions). - return ((AbstractFuture) future).value; - } else { - // Otherwise calculate valueToSet by calling .get() - try { - Object v = getDone(future); - valueToSet = v == null ? NULL : v; - } catch (ExecutionException exception) { - valueToSet = new Failure(exception.getCause()); - } catch (CancellationException cancellation) { - valueToSet = new Cancellation(false, cancellation); - } catch (Throwable t) { - valueToSet = new Failure(t); - } - } - return valueToSet; - } - - /** - * Unblocks all threads and runs all listeners. - */ - private static void complete(AbstractFuture future) { - Listener next = null; - outer: - while (true) { - future.releaseWaiters(); - // We call this before the listeners in order to avoid needing to manage - // a separate stack data structure for them. afterDone() should be - // generally fast and only used for cleanup work... but in theory can - // also be recursive and create StackOverflowErrors - future.afterDone(); - // push the current set of listeners onto next - next = future.clearListeners(next); - future = null; - while (next != null) { - Listener curr = next; - next = next.next; - Runnable task = curr.task; - if (task instanceof SetFuture) { - SetFuture setFuture = (SetFuture) task; - // We unwind setFuture specifically to avoid StackOverflowErrors in - // the case of long chains of SetFutures - // Handling this special case is important because there is no way - // to pass an executor to setFuture, so a user couldn't break the - // chain by doing this themselves. It is also potentially common - // if someone writes a recursive Futures.transformAsync transformer. - future = setFuture.owner; - if (future.value == setFuture) { - Object valueToSet = getFutureValue(setFuture.future); - if (ATOMIC_HELPER.casValue(future, setFuture, valueToSet)) { - continue outer; - } - } - // other wise the future we were trying to set is already done. - } else { - executeListener(task, curr.executor); - } - } - break; - } - } - - public static V getDone(Future future) throws ExecutionException { - /* - * We throw IllegalStateException, since the call could succeed later. - * Perhaps we "should" throw IllegalArgumentException, since the call - * could succeed with a different argument. Those exceptions' docs - * suggest that either is acceptable. Google's Java Practices page - * recommends IllegalArgumentException here, in part to keep its - * recommendation simple: Static methods should throw - * IllegalStateException only when they use static state. - * - * - * Why do we deviate here? The answer: We want for fluentFuture.getDone() - * to throw the same exception as Futures.getDone(fluentFuture). - */ - Preconditions.checkState(future.isDone(), "Future was expected to be " + - "done:" + - " %s", future); - return Uninterruptibles.getUninterruptibly(future); - } - - /** - * Callback method that is called exactly once after the future is completed. - *

- *

If {@link #interruptTask} is also run during completion, - * {@link #afterDone} runs after it. - *

- *

The default implementation of this method in {@code AbstractFuture} - * does nothing. This is intended for very lightweight cleanup work, for - * example, timing statistics or clearing fields. - * If your task does anything heavier consider, just using a listener with - * an executor. - * - * @since 20.0 - */ - @Beta - protected void afterDone() { - } - - /** - * If this future has been cancelled (and possibly interrupted), cancels - * (and possibly interrupts) the given future (if available). - *

- *

This method should be used only when this future is completed. It is - * designed to be called from {@code done}. - */ - final void maybePropagateCancellation(@Nullable Future related) { - if (related != null & isCancelled()) { - related.cancel(wasInterrupted()); - } - } - - /** - * Releases all threads in the {@link #waiters} list, and clears the list. - */ - private void releaseWaiters() { - Waiter head; - do { - head = waiters; - } while (!ATOMIC_HELPER.casWaiters(this, head, Waiter.TOMBSTONE)); - for (Waiter currentWaiter = head; - currentWaiter != null; currentWaiter = currentWaiter.next) { - currentWaiter.unpark(); - } - } - - /** - * Clears the {@link #listeners} list and prepends its contents to {@code - * onto}, least recently added first. - */ - private Listener clearListeners(Listener onto) { - // We need to - // 1. atomically swap the listeners with TOMBSTONE, this is because - // addListener uses that to to synchronize with us - // 2. reverse the linked list, because despite our rather clear contract, - // people depend on us executing listeners in the order they were added - // 3. push all the items onto 'onto' and return the new head of the stack - Listener head; - do { - head = listeners; - } while (!ATOMIC_HELPER.casListeners(this, head, Listener.TOMBSTONE)); - Listener reversedList = onto; - while (head != null) { - Listener tmp = head; - head = head.next; - tmp.next = reversedList; - reversedList = tmp; - } - return reversedList; - } - - /** - * Submits the given runnable to the given {@link Executor} catching and - * logging all {@linkplain RuntimeException runtime exceptions} thrown by - * the executor. - */ - private static void executeListener(Runnable runnable, Executor executor) { - try { - executor.execute(runnable); - } catch (RuntimeException e) { - // Log it and keep going -- bad runnable and/or executor. Don't punish - // the other runnables if we're given a bad one. We only catch - // RuntimeException because we want Errors to propagate up. - LOG.log( - Level.SEVERE, - "RuntimeException while executing runnable " + runnable + " with " + - "executor " + executor, - e); - } - } - - private abstract static class AtomicHelper { - /** - * Non volatile write of the thread to the {@link Waiter#thread} field. - */ - abstract void putThread(Waiter waiter, Thread newValue); - - /** - * Non volatile write of the waiter to the {@link Waiter#next} field. - */ - abstract void putNext(Waiter waiter, Waiter newValue); - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - abstract boolean casWaiters( - AbstractFuture future, Waiter expect, - Waiter update); - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - abstract boolean casListeners( - AbstractFuture future, Listener expect, - Listener update); - - /** - * Performs a CAS operation on the {@link #value} field. - */ - abstract boolean casValue( - AbstractFuture future, Object expect, Object update); - } - - /** - * {@link AtomicHelper} based on {@link sun.misc.Unsafe}. - *

- *

Static initialization of this class will fail if the - * {@link sun.misc.Unsafe} object cannot be accessed. - */ - private static final class UnsafeAtomicHelper extends AtomicHelper { - static final sun.misc.Unsafe UNSAFE; - static final long LISTENERS_OFFSET; - static final long WAITERS_OFFSET; - static final long VALUE_OFFSET; - static final long WAITER_THREAD_OFFSET; - static final long WAITER_NEXT_OFFSET; - - static { - sun.misc.Unsafe unsafe = null; - try { - unsafe = sun.misc.Unsafe.getUnsafe(); - } catch (SecurityException tryReflectionInstead) { - try { - unsafe = - AccessController.doPrivileged( - new PrivilegedExceptionAction() { - @Override - public sun.misc.Unsafe run() throws Exception { - Class k = sun.misc.Unsafe.class; - for (java.lang.reflect.Field f : k.getDeclaredFields()) { - f.setAccessible(true); - Object x = f.get(null); - if (k.isInstance(x)) { - return k.cast(x); - } - } - throw new NoSuchFieldError("the Unsafe"); - } - }); - } catch (PrivilegedActionException e) { - throw new RuntimeException( - "Could not initialize intrinsics", e.getCause()); - } - } - try { - Class abstractFuture = AbstractFuture.class; - WAITERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("waiters")); - LISTENERS_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("listeners")); - VALUE_OFFSET = unsafe - .objectFieldOffset(abstractFuture.getDeclaredField("value")); - WAITER_THREAD_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("thread")); - WAITER_NEXT_OFFSET = unsafe - .objectFieldOffset(Waiter.class.getDeclaredField("next")); - UNSAFE = unsafe; - } catch (Exception e) { - throwIfUnchecked(e); - throw new RuntimeException(e); - } - } - - public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); - if (throwable instanceof RuntimeException) { - throw (RuntimeException) throwable; - } - if (throwable instanceof Error) { - throw (Error) throwable; - } - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - UNSAFE.putObject(waiter, WAITER_THREAD_OFFSET, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - UNSAFE.putObject(waiter, WAITER_NEXT_OFFSET, newValue); - } - - /** - * Performs a CAS operation on the {@link #waiters} field. - */ - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return UNSAFE - .compareAndSwapObject(future, WAITERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #listeners} field. - */ - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return UNSAFE - .compareAndSwapObject(future, LISTENERS_OFFSET, expect, update); - } - - /** - * Performs a CAS operation on the {@link #value} field. - */ - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return UNSAFE.compareAndSwapObject(future, VALUE_OFFSET, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@link AtomicReferenceFieldUpdater}. - */ - @SuppressWarnings("visibilitymodifier") - private static final class SafeAtomicHelper extends AtomicHelper { - final AtomicReferenceFieldUpdater waiterThreadUpdater; - final AtomicReferenceFieldUpdater waiterNextUpdater; - final AtomicReferenceFieldUpdater waitersUpdater; - final AtomicReferenceFieldUpdater - listenersUpdater; - final AtomicReferenceFieldUpdater valueUpdater; - - SafeAtomicHelper( - AtomicReferenceFieldUpdater waiterThreadUpdater, - AtomicReferenceFieldUpdater waiterNextUpdater, - AtomicReferenceFieldUpdater waitersUpdater, - AtomicReferenceFieldUpdater listenersUpdater, - AtomicReferenceFieldUpdater valueUpdater) { - this.waiterThreadUpdater = waiterThreadUpdater; - this.waiterNextUpdater = waiterNextUpdater; - this.waitersUpdater = waitersUpdater; - this.listenersUpdater = listenersUpdater; - this.valueUpdater = valueUpdater; - } - - @Override - void putThread(Waiter waiter, Thread newValue) { - waiterThreadUpdater.lazySet(waiter, newValue); - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiterNextUpdater.lazySet(waiter, newValue); - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - return waitersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - return listenersUpdater.compareAndSet(future, expect, update); - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - return valueUpdater.compareAndSet(future, expect, update); - } - } - - /** - * {@link AtomicHelper} based on {@code synchronized} and volatile writes. - *

- *

This is an implementation of last resort for when certain basic VM - * features are broken (like AtomicReferenceFieldUpdater). - */ - private static final class SynchronizedHelper extends AtomicHelper { - @Override - void putThread(Waiter waiter, Thread newValue) { - waiter.thread = newValue; - } - - @Override - void putNext(Waiter waiter, Waiter newValue) { - waiter.next = newValue; - } - - @Override - boolean casWaiters(AbstractFuture future, Waiter expect, Waiter - update) { - synchronized (future) { - if (future.waiters == expect) { - future.waiters = update; - return true; - } - return false; - } - } - - @Override - boolean casListeners( - AbstractFuture future, Listener expect, Listener update) { - synchronized (future) { - if (future.listeners == expect) { - future.listeners = update; - return true; - } - return false; - } - } - - @Override - boolean casValue(AbstractFuture future, Object expect, Object update) { - synchronized (future) { - if (future.value == expect) { - future.value = update; - return true; - } - return false; - } - } - } - - private static CancellationException cancellationExceptionWithCause( - @Nullable String message, @Nullable Throwable cause) { - CancellationException exception = new CancellationException(message); - exception.initCause(cause); - return exception; - } - - /** - * Returns an {@link Executor} that runs each task in the thread that invokes - * {@link Executor#execute execute}, as in {@link CallerRunsPolicy}. - *

- *

This instance is equivalent to:

   {@code
-   *   final class DirectExecutor implements Executor {
-   *     public void execute(Runnable r) {
-   *       r.run();
-   *     }
-   *   }}
- */ - public static Executor directExecutor() { - return DirectExecutor.INSTANCE; - } - - /** - * See {@link #directExecutor} for behavioral notes. - */ - private enum DirectExecutor implements Executor { - INSTANCE; - - @Override - public void execute(Runnable command) { - command.run(); - } - - @Override - public String toString() { - return "MoreExecutors.directExecutor()"; - } - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index d8ba919cefb5..d4cdaf2cfe41 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -92,6 +92,7 @@ public class HddsVolume extends StorageVolume { private File dbParentDir; private File deletedContainerDir; private AtomicBoolean dbLoaded = new AtomicBoolean(false); + private final AtomicBoolean dbLoadFailure = new AtomicBoolean(false); /** * Builder for HddsVolume. @@ -257,6 +258,11 @@ public synchronized VolumeCheckResult check(@Nullable Boolean unused) VolumeCheckResult result = super.check(unused); DatanodeConfiguration df = getConf().getObject(DatanodeConfiguration.class); + if (isDbLoadFailure()) { + LOG.warn("Volume {} failed to access RocksDB: RocksDB parent directory is null, " + + "the volume might not have been loaded properly.", getStorageDir()); + return VolumeCheckResult.FAILED; + } if (result != VolumeCheckResult.HEALTHY || !df.getContainerSchemaV3Enabled() || !isDbLoaded()) { return result; @@ -313,6 +319,11 @@ public File getDbParentDir() { return this.dbParentDir; } + @VisibleForTesting + public void setDbParentDir(File dbParentDir) { + this.dbParentDir = dbParentDir; + } + public File getDeletedContainerDir() { return this.deletedContainerDir; } @@ -326,6 +337,10 @@ public boolean isDbLoaded() { return dbLoaded.get(); } + public boolean isDbLoadFailure() { + return dbLoadFailure.get(); + } + public void loadDbStore(boolean readOnly) throws IOException { // DN startup for the first time, not registered yet, // so the DbVolume is not formatted. @@ -363,7 +378,8 @@ public void loadDbStore(boolean readOnly) throws IOException { String containerDBPath = containerDBFile.getAbsolutePath(); try { initPerDiskDBStore(containerDBPath, getConf(), readOnly); - } catch (IOException e) { + } catch (Throwable e) { + dbLoadFailure.set(true); throw new IOException("Can't init db instance under path " + containerDBPath + " for volume " + getStorageID(), e); } @@ -417,9 +433,11 @@ public void createDbStore(MutableVolumeSet dbVolumeSet) throws IOException { try { HddsVolumeUtil.initPerDiskDBStore(containerDBPath, getConf(), false); dbLoaded.set(true); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is created and loaded at {} for volume {}", containerDBPath, getStorageID()); } catch (IOException e) { + dbLoadFailure.set(true); String errMsg = "Can't create db instance under path " + containerDBPath + " for volume " + getStorageID(); LOG.error(errMsg, e); @@ -448,6 +466,7 @@ private void closeDbStore() { .getAbsolutePath(); DatanodeStoreCache.getInstance().removeDB(containerDBPath); dbLoaded.set(false); + dbLoadFailure.set(false); LOG.info("SchemaV3 db is stopped at {} for volume {}", containerDBPath, getStorageID()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 3c0b6e618ee1..e195b127d499 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -442,12 +442,20 @@ public Map> getVolumeStateMap() { public boolean hasEnoughVolumes() { // Max number of bad volumes allowed, should have at least // 1 good volume + boolean hasEnoughVolumes; if (maxVolumeFailuresTolerated == StorageVolumeChecker.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { - return getVolumesList().size() >= 1; + hasEnoughVolumes = getVolumesList().size() >= 1; } else { - return getFailedVolumesList().size() <= maxVolumeFailuresTolerated; + hasEnoughVolumes = getFailedVolumesList().size() <= maxVolumeFailuresTolerated; } + if (!hasEnoughVolumes) { + LOG.error("Not enough volumes in MutableVolumeSet. DatanodeUUID: {}, VolumeType: {}, " + + "MaxVolumeFailuresTolerated: {}, ActiveVolumes: {}, FailedVolumes: {}", + datanodeUuid, volumeType, maxVolumeFailuresTolerated, + getVolumesList().size(), getFailedVolumesList().size()); + } + return hasEnoughVolumes; } public StorageLocationReport[] getStorageReport() { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java index 4917810bd97c..e81fd1008ff6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolumeChecker.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.container.common.volume; +import com.google.common.util.concurrent.FutureCallback; +import com.google.common.util.concurrent.Futures; +import com.google.common.util.concurrent.ListenableFuture; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.ArrayList; @@ -38,7 +43,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -46,10 +50,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Sets; -import com.google.common.util.concurrent.FutureCallback; -import com.google.common.util.concurrent.Futures; -import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import jakarta.annotation.Nullable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java index 991f105d15b2..1548b30c9fb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/ThrottledAsyncChecker.java @@ -144,8 +144,7 @@ public Optional> schedule( final ListenableFuture lf; if (diskCheckTimeout > 0) { - lf = TimeoutFuture - .create(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, + lf = Futures.withTimeout(lfWithoutTimeout, diskCheckTimeout, TimeUnit.MILLISECONDS, scheduledExecutorService); } else { lf = lfWithoutTimeout; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java deleted file mode 100644 index 42e2ed5758eb..000000000000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/TimeoutFuture.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2007 The Guava Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. You may obtain a - * copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * Some portions of this class have been modified to make it functional in this - * package. - */ -package org.apache.hadoop.ozone.container.common.volume; - -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ListenableFuture; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import jakarta.annotation.Nullable; -import java.util.concurrent.Future; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * Implementation of {@code Futures#withTimeout}. - *

- *

Future that delegates to another but will finish early (via a - * {@link TimeoutException} wrapped in an {@link ExecutionException}) if the - * specified duration expires. The delegate future is interrupted and - * cancelled if it times out. - */ -final class TimeoutFuture extends AbstractFuture.TrustedFuture { - public static final Logger LOG = LoggerFactory.getLogger( - TimeoutFuture.class); - - static ListenableFuture create( - ListenableFuture delegate, - long time, - TimeUnit unit, - ScheduledExecutorService scheduledExecutor) { - TimeoutFuture result = new TimeoutFuture(delegate); - TimeoutFuture.Fire fire = new TimeoutFuture.Fire(result); - result.timer = scheduledExecutor.schedule(fire, time, unit); - delegate.addListener(fire, directExecutor()); - return result; - } - - /* - * Memory visibility of these fields. There are two cases to consider. - * - * 1. visibility of the writes to these fields to Fire.run: - * - * The initial write to delegateRef is made definitely visible via the - * semantics of addListener/SES.schedule. The later racy write in cancel() - * is not guaranteed to be observed, however that is fine since the - * correctness is based on the atomic state in our base class. The initial - * write to timer is never definitely visible to Fire.run since it is - * assigned after SES.schedule is called. Therefore Fire.run has to check - * for null. However, it should be visible if Fire.run is called by - * delegate.addListener since addListener is called after the assignment - * to timer, and importantly this is the main situation in which we need to - * be able to see the write. - * - * 2. visibility of the writes to an afterDone() call triggered by cancel(): - * - * Since these fields are non-final that means that TimeoutFuture is not - * being 'safely published', thus a motivated caller may be able to expose - * the reference to another thread that would then call cancel() and be - * unable to cancel the delegate. There are a number of ways to solve this, - * none of which are very pretty, and it is currently believed to be a - * purely theoretical problem (since the other actions should supply - * sufficient write-barriers). - */ - - @Nullable private ListenableFuture delegateRef; - @Nullable private Future timer; - - private TimeoutFuture(ListenableFuture delegate) { - this.delegateRef = Preconditions.checkNotNull(delegate); - } - - /** - * A runnable that is called when the delegate or the timer completes. - */ - private static final class Fire implements Runnable { - @Nullable - private TimeoutFuture timeoutFutureRef; - - Fire( - TimeoutFuture timeoutFuture) { - this.timeoutFutureRef = timeoutFuture; - } - - @Override - public void run() { - // If either of these reads return null then we must be after a - // successful cancel or another call to this method. - TimeoutFuture timeoutFuture = timeoutFutureRef; - if (timeoutFuture == null) { - return; - } - ListenableFuture delegate = timeoutFuture.delegateRef; - if (delegate == null) { - return; - } - - /* - * If we're about to complete the TimeoutFuture, we want to release our - * reference to it. Otherwise, we'll pin it (and its result) in memory - * until the timeout task is GCed. (The need to clear our reference to - * the TimeoutFuture is the reason we use a *static* nested class with - * a manual reference back to the "containing" class.) - * - * This has the nice-ish side effect of limiting reentrancy: run() calls - * timeoutFuture.setException() calls run(). That reentrancy would - * already be harmless, since timeoutFuture can be set (and delegate - * cancelled) only once. (And "set only once" is important for other - * reasons: run() can still be invoked concurrently in different threads, - * even with the above null checks.) - */ - timeoutFutureRef = null; - if (delegate.isDone()) { - timeoutFuture.setFuture(delegate); - } else { - try { - timeoutFuture.setException( - new TimeoutException("Future timed out: " + delegate)); - } finally { - delegate.cancel(true); - } - } - } - } - - @Override - protected void afterDone() { - maybePropagateCancellation(delegateRef); - - Future localTimer = timer; - // Try to cancel the timer as an optimization. - // timer may be null if this call to run was by the timer task since there - // is no happens-before edge between the assignment to timer and an - // execution of the timer task. - if (localTimer != null) { - localTimer.cancel(false); - } - - delegateRef = null; - timer = null; - } -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 594500b77b36..ed13ebc93b99 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -112,6 +112,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadContainerResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponse; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponseBuilder; +import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getWriteChunkResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; @@ -780,6 +781,7 @@ ContainerCommandResponseProto handleReadChunk( data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext); + LOG.debug("read chunk from block {} chunk {}", blockID, chunkInfo); // Validate data only if the read chunk is issued by Ratis for its // internal logic. // For client reads, the client is expected to validate. @@ -819,7 +821,7 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data, info.getChecksumData(), 0); + Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -841,6 +843,7 @@ ContainerCommandResponseProto handleWriteChunk( return malformedRequest(request); } + ContainerProtos.BlockData blockDataProto = null; try { checkContainerOpen(kvContainer); @@ -864,6 +867,28 @@ ContainerCommandResponseProto handleWriteChunk( chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); + final boolean isCommit = dispatcherContext.getStage().isCommit(); + if (isCommit && writeChunk.hasBlock()) { + metrics.incContainerOpsMetrics(Type.PutBlock); + BlockData blockData = BlockData.getFromProtoBuf( + writeChunk.getBlock().getBlockData()); + // optimization for hsync when WriteChunk is in commit phase: + // + // block metadata is piggybacked in the same message. + // there will not be an additional PutBlock request. + // + // End of block will always be sent as a standalone PutBlock. + // the PutBlock piggybacked in WriteChunk is never end of block. + // + // do not do this in WRITE_DATA phase otherwise PutBlock will be out + // of order. + blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex()); + blockManager.putBlock(kvContainer, blockData, false); + blockDataProto = blockData.getProtoBufMessage(); + final long numBytes = blockDataProto.getSerializedSize(); + metrics.incContainerBytesStats(Type.PutBlock, numBytes); + } + // We should increment stats after writeChunk if (isWrite) { metrics.incContainerBytesStats(Type.WriteChunk, writeChunk @@ -877,7 +902,7 @@ ContainerCommandResponseProto handleWriteChunk( request); } - return getSuccessResponse(request); + return getWriteChunkResponseSuccess(request, blockDataProto); } /** @@ -920,9 +945,9 @@ ContainerCommandResponseProto handlePutSmallFile( // chunks will be committed as a part of handling putSmallFile // here. There is no need to maintain this info in openContainerBlockMap. + validateChunkChecksumData(data, chunkInfo); chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - validateChunkChecksumData(data, chunkInfo); chunkManager.finishWriteChunks(kvContainer, blockData); List chunks = new LinkedList<>(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index a45055821a41..e966a0bed862 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -124,15 +124,13 @@ private static String getContainerSubDirectory(long containerId) { */ public static File getContainerDBFile(KeyValueContainerData containerData) { if (containerData.hasSchema(OzoneConsts.SCHEMA_V3)) { + Preconditions.checkNotNull(containerData.getVolume().getDbParentDir(), "Base Directory cannot be null"); return new File(containerData.getVolume().getDbParentDir(), OzoneConsts.CONTAINER_DB_NAME); } - return getContainerDBFile(containerData.getMetadataPath(), containerData); - } - - public static File getContainerDBFile(String baseDir, - KeyValueContainerData containerData) { - return new File(baseDir, containerData.getContainerID() + + Preconditions.checkNotNull(containerData.getMetadataPath(), "Metadata Directory cannot be null"); + return new File(containerData.getMetadataPath(), containerData.getContainerID() + OzoneConsts.DN_CONTAINER_DB); } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 4690565b0bb1..413f36a7616b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -54,7 +54,8 @@ */ public class BlockManagerImpl implements BlockManager { - static final Logger LOG = LoggerFactory.getLogger(BlockManagerImpl.class); + public static final Logger LOG = + LoggerFactory.getLogger(BlockManagerImpl.class); private ConfigurationSource config; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 8d55bfad782f..26719d7f035a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -66,7 +66,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table finalizeBlocksTableWithIterator; - static final Logger LOG = + public static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); private volatile DBStore store; private final AbstractDatanodeDBDefinition dbDef; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index ee51463309b8..5ceea125e814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -106,7 +106,6 @@ public static class Builder { private Clock clock; private IntConsumer executorThreadUpdater = threadCount -> { }; - private String threadNamePrefix; public Builder clock(Clock newClock) { clock = newClock; @@ -138,11 +137,6 @@ public Builder executorThreadUpdater(IntConsumer newUpdater) { return this; } - public Builder threadNamePrefix(String threadPrefix) { - this.threadNamePrefix = threadPrefix; - return this; - } - public ReplicationSupervisor build() { if (replicationConfig == null || datanodeConfig == null) { ConfigurationSource conf = new OzoneConfiguration(); @@ -162,6 +156,7 @@ public ReplicationSupervisor build() { if (executor == null) { LOG.info("Initializing replication supervisor with thread count = {}", replicationConfig.getReplicationMaxStreams()); + String threadNamePrefix = context != null ? context.getThreadNamePrefix() : ""; ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadNamePrefix + "ContainerReplicationThread-%d") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 14015202310b..e5f6dc7edefd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -20,6 +20,7 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -40,6 +41,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; @@ -47,10 +50,12 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -69,6 +74,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.time.Duration; import java.util.Collections; import java.util.HashMap; @@ -99,6 +105,9 @@ * Test-cases to verify the functionality of HddsDispatcher. */ public class TestHddsDispatcher { + @TempDir + private Path tempDir; + private static final Logger LOG = LoggerFactory.getLogger( TestHddsDispatcher.class); @TempDir @@ -129,6 +138,8 @@ public void testContainerCloseActionWhenFull( (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -164,6 +175,72 @@ public void testContainerCloseActionWhenFull( } } + @Test + public void testSmallFileChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + + ContainerCommandResponseProto smallFileResponse = + hddsDispatcher.dispatch(newPutSmallFile(1L, 1L), null); + + assertEquals(ContainerProtos.Result.SUCCESS, smallFileResponse.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + + @Test + public void testWriteChunkChecksum() throws IOException { + String testDirPath = testDir.getPath(); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + //Send a few WriteChunkRequests + ContainerCommandResponseProto response; + ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0); + hddsDispatcher.dispatch(writeChunkRequest0, null); + hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 1), null); + response = hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 2), null); + + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + // Send Read Chunk request for written chunk. + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + ByteString responseData = BufferUtils.concatByteStrings( + response.getReadChunk().getDataBuffers().getBuffersList()); + assertEquals(writeChunkRequest0.getWriteChunk().getData(), + responseData); + + // Test checksum on Read: + final DispatcherContext context = DispatcherContext + .newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA) + .build(); + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + } finally { + ContainerMetrics.remove(); + } + } + @ContainerLayoutTestInfo.ContainerTest public void testContainerCloseActionWhenVolumeFull( ContainerLayoutVersion layoutVersion) throws Exception { @@ -197,6 +274,8 @@ public void testContainerCloseActionWhenVolumeFull( 50, UUID.randomUUID().toString(), dd.getUuidString()); Container container = new KeyValueContainer(containerData, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString()); containerSet.addContainer(container); @@ -514,6 +593,84 @@ private ContainerCommandRequestProto getWriteChunkRequest( .build(); } + static ChecksumData checksum(ByteString data) { + try { + return new Checksum(ContainerProtos.ChecksumType.CRC32, 256) + .computeChecksum(data.asReadOnlyByteBuffer()); + } catch (OzoneChecksumException e) { + throw new IllegalStateException(e); + } + } + + private ContainerCommandRequestProto getWriteChunkRequest0( + String datanodeId, Long containerId, Long localId, int chunkNum) { + final int lenOfBytes = 32; + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + + ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo + .newBuilder() + .setChunkName( + DigestUtils.md5Hex("dummy-key") + "_stream_" + + containerId + "_chunk_" + localId) + .setOffset((long) chunkNum * lenOfBytes) + .setLen(lenOfBytes) + .setChecksumData(checksum(chunkData).getProtoBufMessage()) + .build(); + + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto + .newBuilder() + .setBlockID(new BlockID(containerId, localId) + .getDatanodeBlockIDProtobuf()) + .setChunkData(chunk) + .setData(chunkData); + + return ContainerCommandRequestProto + .newBuilder() + .setContainerID(containerId) + .setCmdType(ContainerProtos.Type.WriteChunk) + .setDatanodeUuid(datanodeId) + .setWriteChunk(writeChunkRequest) + .build(); + } + + static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) { + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + return newPutSmallFile(new BlockID(containerId, localId), chunkData); + } + + static ContainerCommandRequestProto newPutSmallFile( + BlockID blockID, ByteString data) { + final ContainerProtos.BlockData.Builder blockData + = ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest + = ContainerProtos.PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest + = ContainerProtos.PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setPutSmallFile(putSmallFileRequest) + .build(); + } + /** * Creates container read chunk request using input container write chunk * request. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java index d05c127838f1..387997db736d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/utils/TestHddsVolumeUtil.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.volume.DbVolume; @@ -43,7 +44,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrowsExactly; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mockStatic; + +import org.mockito.MockedStatic; +import org.mockito.Mockito; + /** * Test for {@link HddsVolumeUtil}. @@ -95,6 +102,34 @@ public void teardown() { dbVolumeSet.shutdown(); } + @Test + public void testLoadHDDVolumeWithInitDBException() + throws Exception { + // Create db instances for all HDDsVolumes. + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + } + + try (MockedStatic mocked = mockStatic(HddsVolumeUtil.class, Mockito.CALLS_REAL_METHODS)) { + // Simulating the init DB Exception + mocked.when(() -> HddsVolumeUtil.initPerDiskDBStore(Mockito.anyString(), Mockito.any(), Mockito.anyBoolean())) + .thenThrow(new IOException("Mocked Exception")); + + reinitVolumes(); + for (HddsVolume hddsVolume : StorageVolumeUtil.getHddsVolumesList( + hddsVolumeSet.getVolumesList())) { + assertThrowsExactly(IOException.class, () -> hddsVolume.loadDbStore(true)); + // If the Volume init DB is abnormal, the Volume should be recognized as a failed Volume + assertEquals(VolumeCheckResult.FAILED, hddsVolume.check(false)); + assertTrue(hddsVolume.isDbLoadFailure()); + assertFalse(hddsVolume.isDbLoaded()); + } + } + + } + @Test public void testLoadAllHddsVolumeDbStoreWithoutDbVolumes() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index eb1f7979f8b9..55df5f43b6b8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -45,6 +46,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; @@ -76,6 +78,9 @@ */ @Timeout(30) public class TestVolumeSetDiskChecks { + @TempDir + private Path tempDir; + public static final Logger LOG = LoggerFactory.getLogger( TestVolumeSetDiskChecks.class); @TempDir @@ -302,11 +307,15 @@ public void testVolumeFailure() throws IOException { dummyChecker); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container); KeyValueContainer container1 = new KeyValueContainer(data1, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet1.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container1.create(volumeSet1, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString()); conSet.addContainer(container1); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index bad3e7ee81db..03901b99be3b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -143,6 +144,8 @@ public void init(boolean isZeroCopy) throws Exception { ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), datanode.getUuidString()); KeyValueContainer container = new KeyValueContainer(data, conf); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), "test-replication"); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 137214aa1cd6..59b88bcbea46 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.replication.ContainerImporter; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -532,6 +533,8 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) // Start new datanode with the same configuration. dsm = new DatanodeStateMachine(dd, conf); + StorageVolumeUtil.getHddsVolumesList(dsm.getContainer().getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/docs/content/feature/ErasureCoding.md b/hadoop-hdds/docs/content/feature/ErasureCoding.md index 77866762f6d3..c4d3739f1dcd 100644 --- a/hadoop-hdds/docs/content/feature/ErasureCoding.md +++ b/hadoop-hdds/docs/content/feature/ErasureCoding.md @@ -174,7 +174,9 @@ the configuration keys `ozone.server.default.replication.type` and `ozone.server ozone.server.default.replication.type EC +``` +```XML ozone.server.default.replication RS-6-3-1024k @@ -208,6 +210,22 @@ We can pass the EC Replication Config while creating the keys irrespective of bu ozone sh key put --type EC --replication rs-6-3-1024k ``` +When using ofs/o3fs, we can pass the EC Replication Config by setting the configuration keys `ozone.replication.type` and `ozone.replication`. + +```XML + + ozone.replication.type + EC + +``` + +```XML + + ozone.replication + rs-3-2-1024k + +``` + In the case bucket already has default EC Replication Config, there is no need of passing EC Replication Config while creating key. ### Enable Intel ISA-L diff --git a/hadoop-hdds/docs/content/feature/Snapshot.md b/hadoop-hdds/docs/content/feature/Snapshot.md index 880176ec669e..143a1a5f918f 100644 --- a/hadoop-hdds/docs/content/feature/Snapshot.md +++ b/hadoop-hdds/docs/content/feature/Snapshot.md @@ -73,5 +73,5 @@ Ozone also provides SnapshotDiff API. Whenever a user issues a SnapshotDiff betw Snapshot feature places additional demands on the cluster in terms of CPU, memory and storage. Cluster nodes running Ozone Managers and Ozone Datanodes should be configured with extra storage capacity depending on the number of active snapshots that the user wants to keep. Ozone Snapshots consume incremental amount of space per snapshot. e.g. if the active object store has 100 GB data (before replication) and a snapshot is taken, then the 100 GB of space will be locked in that snapshot. If the active object store consumes another 10 GB of space (before replication) subsequently then overall space requirement would be 100 GB + 10 GB = 110 GB in total (before replication). This is because common keys between Ozone snapshots and the active object store will share the storage space. -Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depepnds on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. +Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depends on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. diff --git a/hadoop-hdds/docs/content/interface/Cli.zh.md b/hadoop-hdds/docs/content/interface/Cli.zh.md new file mode 100644 index 000000000000..aa34a9245710 --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Cli.zh.md @@ -0,0 +1,311 @@ +--- +title: 命令行接口 +weight: 4 +menu: + main: + parent: "客户端接口" +--- + + + +Ozone shell 是从命令行与 Ozone 交互的主要接口。在后台,它调用 [Java API]({{< ref "interface/JavaApi.md">}}). + +有些功能只能通过使用 `ozone sh` 命令才能访问。例如: + +1. 创建带有配额的卷 +2. 管理内部 ACL +3. 创建带有加密的键的桶 + +所有这些命令都是一次性的管理任务。应用程序也可以使用其他接口,如 Hadoop 兼容文件系统(o3fs 或 ofs)或 S3 接口来实现相同功能而无需使用 Ozone 命令行接口。 + + +Ozone shell 的帮助菜单可以在 _对象_ 级别 或者 _动作_ 级别被调出. + +示例命令: + +```bash +ozone sh volume --help +``` + +这条命令展示了卷的所有可用的 _动作_ 命令 + +或者也可以用来解释具体的某个 _动作_ ,例如: + +```bash +ozone sh volume create --help +``` + +这条命令输出卷的`create`动作的所有命令行选项 + +## 通用命令格式 + +Ozone shell 命令采取以下形式: + +> _ozone sh object action url_ + +**ozone** 脚本用于调用所有 Ozone 子命令。通过 ```sh``` 命令调用 ozone shell 命令。 + +对象可以是卷、桶或键。动作可以是创建、列出、删除等。 + +根据动作,Ozone URL 可以指向以下格式的卷、桶或键: + +_\[schema\]\[server:port\]/volume/bucket/key_ + + +其中, + +1. **Schema** - 应为 `o3`,这是访问 Ozone API 的原生 RPC 协议。是否指定 schema 是可选的。 + +2. **Server:Port** - 应为 Ozone Manager 的地址。如果不指定端口,则将使用 ozone-site.xml 中的默认端口。 + +请查看卷命令、桶命令和键命令部分了解更多详情。 + +## 卷操作 + +卷位于层次结构的顶层,仅由管理员管理。也可以指定所有者用户和配额。 + +示例命令: + +```shell +$ ozone sh volume create /vol1 +``` + +```shell +$ ozone sh volume info /vol1 +{ + "metadata" : { }, + "name" : "vol1", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-28T12:31:50.112Z", + "modificationTime" : "2020-07-28T12:31:50.112Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +} +``` + +```shell +$ ozone sh volume list / +[ { + "metadata" : { }, + "name" : "s3v", + "admin" : "hadoop", + "owner" : "hadoop", + "creationTime" : "2020-07-27T11:32:22.314Z", + "modificationTime" : "2020-07-27T11:32:22.314Z", + "acls" : [ { + "type" : "USER", + "name" : "hadoop", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + }, { + "type" : "GROUP", + "name" : "users", + "aclScope" : "ACCESS", + "aclList" : [ "ALL" ] + } ], + "quota" : 1152921504606846976 +}, { + .... +} ] +``` + +如果卷为空,我们可以使用以下命令删除卷。 + +```shell +$ ozone sh volume delete /vol1 +Volume vol1 is deleted +``` +如果卷包含任意桶或键,我们可以递归地删除卷。这将删除卷中所有的桶和键,然后删除卷本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh volume delete -r /vol1 +This command will delete volume recursively. +There is no recovery option after using this command, and no trash for FSO buckets. +Delay is expected running this command. +Enter 'yes' to proceed': yes +Volume vol1 is deleted +``` + +## 桶操作 + +桶是层次结构的第二层级,与 AWS S3 桶相似。如果用户有必要的权限,可以在卷中创建桶。 + +示例命令: + +```shell +$ ozone sh bucket create /vol1/bucket1 +``` + +```shell +$ ozone sh bucket info /vol1/bucket1 +{ + "metadata" : { }, + "volumeName" : "vol1", + "name" : "bucket1", + "storageType" : "DISK", + "versioning" : false, + "creationTime" : "2020-07-28T13:14:45.091Z", + "modificationTime" : "2020-07-28T13:14:45.091Z", + "encryptionKeyName" : null, + "sourceVolume" : null, + "sourceBucket" : null +} +``` + +如果桶是空的,我们可以用以下命令来删除桶。 + +```shell +$ ozone sh bucket delete /vol1/bucket1 +Bucket bucket1 is deleted +``` + +如果桶包含任意键,我们可以递归地删除桶。这将删除桶中的所有键,然后删除桶本身。在运行这个命令后,将无法恢复已删除的内容。 + +```shell +$ ozone sh bucket delete -r /vol1/bucket1 +This command will delete bucket recursively. +There is no recovery option after using this command, and deleted keys won't move to trash. +Enter 'yes' to proceed': yes +Bucket bucket1 is deleted +``` +[透明数据加密]({{< ref "security/SecuringTDE.md" >}}) 可以在桶层级被启用。 + +## 键操作 + +键是可以存储数据的对象。 + +```shell +$ ozone sh key put /vol1/bucket1/README.md README.md +``` + +

+ + + +```shell +$ ozone sh key info /vol1/bucket1/README.md +{ + "volumeName" : "vol1", + "bucketName" : "bucket1", + "name" : "README.md", + "dataSize" : 3841, + "creationTime" : "2020-07-28T13:17:20.749Z", + "modificationTime" : "2020-07-28T13:17:21.979Z", + "replicationType" : "RATIS", + "replicationFactor" : 1, + "ozoneKeyLocations" : [ { + "containerID" : 1, + "localID" : 104591670688743424, + "length" : 3841, + "offset" : 0 + } ], + "metadata" : { }, + "fileEncryptionInfo" : null +} +``` + +```shell +$ ozone sh key get /vol1/bucket1/README.md /tmp/ +``` + +```shell +$ ozone sh key delete /vol1/bucket1/key1 +``` + + +如果键是在 [FSO]({{< ref "feature/PrefixFSO.zh.md">}}) 桶中,当删除键时它会被移动到回收站,回收站的位置是: +```shell +$ ///.Trash/ +``` +如果键是在OBS桶中,它将被永久删除。 + +## 查询命令行结果 + +Ozone命令行返回JSON响应。[jq](https://stedolan.github.io/jq/manual/) 是一个命令行JSON处理器,可以用来过滤CLI结果以获取所需信息. + +示例命令: + +* 列出不是链接的 FSO 桶。 +```shell +$ ozone sh bucket list /s3v | jq '.[] | select(.link==false and .bucketLayout=="FILE_SYSTEM_OPTIMIZED")' +{ + "metadata": {}, + "volumeName": "s3v", + "name": "fso-bucket", + "storageType": "DISK", + "versioning": false, + "usedBytes": 0, + "usedNamespace": 0, + "creationTime": "2023-02-01T05:18:46.974Z", + "modificationTime": "2023-02-01T05:18:46.974Z", + "quotaInBytes": -1, + "quotaInNamespace": -1, + "bucketLayout": "FILE_SYSTEM_OPTIMIZED", + "owner": "om", + "link": false +} +``` + +* 列出 EC 桶以及它们的复制策略配置。 +```shell +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.replicationConfig.replicationType == "EC") | {"name": .name, "replicationConfig": .replicationConfig}' +{ + "name": "ec5", + "replicationConfig": { + "data": 3, + "parity": 2, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 5 + } +} +{ + "name": "ec9", + "replicationConfig": { + "data": 6, + "parity": 3, + "ecChunkSize": 1048576, + "codec": "RS", + "replicationType": "EC", + "requiredNodes": 9 + } +} +``` + +* 以制表符分隔的格式列出加密桶的名字以及它们的加密的键名。 +```shell + +$ ozone sh bucket list /vol1 | jq -r '.[] | select(.encryptionKeyName != null) | [.name, .encryptionKeyName] | @tsv' +ec5 key1 +encrypted-bucket key1 +``` diff --git a/hadoop-hdds/docs/content/interface/Ofs.zh.md b/hadoop-hdds/docs/content/interface/Ofs.zh.md new file mode 100644 index 000000000000..25d7039f49ac --- /dev/null +++ b/hadoop-hdds/docs/content/interface/Ofs.zh.md @@ -0,0 +1,249 @@ +--- +title: Ofs (兼容 Hadoop 的文件系统) +date: 2017-09-14 +weight: 1 +menu: + main: + parent: "编程接口" +summary: Hadoop Compatible file system allows any application that expects an HDFS like interface to work against Ozone with zero changes. Frameworks like Apache Spark, YARN and Hive work against Ozone without needing any change. **Global level view.** +--- + + +兼容 Hadoop 的文件系统 (HCFS) 接口允许像 Ozone 这样的存储后端轻松集成到 Hadoop 生态系统中。Ozone 文件系统 (OFS) 是一个兼容 Hadoop 的文件系统。 + + + + +## 基础知识 + +有效的 OFS 路径示例: + +``` +ofs://om1/ +ofs://om3:9862/ +ofs://omservice/ +ofs://omservice/volume1/ +ofs://omservice/volume1/bucket1/ +ofs://omservice/volume1/bucket1/dir1 +ofs://omservice/volume1/bucket1/dir1/key1 + +ofs://omservice/tmp/ +ofs://omservice/tmp/key1 +``` + +在 OFS 文件系统中,卷和挂载点位于根目录级别。卷的下一级是桶。每个桶下面是键和目录。 + +请注意,对于挂载点,目前仅支持临时挂载 /tmp。 + +## 配置 + +请在 `core-site.xml` 添加下列配置。 + +{{< highlight xml >}} + + fs.ofs.impl + org.apache.hadoop.fs.ozone.RootedOzoneFileSystem + + + fs.defaultFS + ofs://om-host.example.com/ + +{{< /highlight >}} + +这将使所有的卷和桶成为默认的 Hadoop 兼容文件系统,并注册 ofs 文件系统类型。 + +您还需要将 ozone-filesystem-hadoop3.jar 文件添加到 classpath 中: + +{{< highlight bash >}} +export HADOOP_CLASSPATH=/opt/ozone/share/ozone/lib/ozone-filesystem-hadoop3-*.jar:$HADOOP_CLASSPATH +{{< /highlight >}} + +(请注意: 在 Hadoop 2.x 中, 请使用 `ozone-filesystem-hadoop2-*.jar`) + +当默认的文件系统被建立,用户可以运行命令例如ls,put,mkdir等。 +例如: + +{{< highlight bash >}} +hdfs dfs -ls / +{{< /highlight >}} + +请注意,ofs 对所有桶和卷都有效。用户可以使用 mkdir 创建桶和卷,例如创建名为 volume1 的卷和名为 bucket1 的桶 + +{{< highlight bash >}} +hdfs dfs -mkdir /volume1 +hdfs dfs -mkdir /volume1/bucket1 +{{< /highlight >}} + +或者使用 put 命令向桶中写入一个文件 + +{{< highlight bash >}} +hdfs dfs -put /etc/hosts /volume1/bucket1/test +{{< /highlight >}} + +有关更多用法,请参见: https://issues.apache.org/jira/secure/attachment/12987636/Design%20ofs%20v1.pdf + +## 与 [o3fs]({{< ref "interface/O3fs.md" >}}) 的区别 + +### 创建文件 + +OFS 不允许直接在根目录或卷下创建键(文件)。 +当用户尝试这样做时,他们将收到一个错误消息: + +```bash +$ ozone fs -touch /volume1/key1 +touch: Cannot create file under root or volume. +``` + +### 简化 fs.defaultFS + +使用 OFS 时,fs.defaultFS(在 core-site.xml 中)不再需要像 o3fs 那样在其路径中具有特定的卷和桶。 +只需设置 OM 主机名或 service ID(在 HA 的情况下): + + +```xml + + fs.defaultFS + ofs://omservice + +``` + +客户端将能够访问集群上的所有卷和桶,而无需指定主机名或 service ID。 + +```bash +$ ozone fs -mkdir -p /volume1/bucket1 +``` + +### 通过 FileSystem shell 直接管理卷和桶 + +管理员可以通过 Hadoop FS shell 轻松创建和删除卷和桶。卷和桶被视为类似于目录,因此如果它们不存在,可以使用 `-p` 创建: + +```bash +$ ozone fs -mkdir -p ofs://omservice/volume1/bucket1/dir1/ +``` +请注意,卷和桶名称字符集规则仍然适用。例如,桶和卷名称不接受下划线(`_`): + +```bash +$ ozone fs -mkdir -p /volume_1 +mkdir: Bucket or Volume name has an unsupported character : _ +``` + +## 挂载点和设置 /tmp + +为了与使用 /tmp/ 的传统 Hadoop 应用程序兼容,我们在 FS 的根目录有一个特殊的临时目录挂载点。 +这个功能将来可能会扩展,以支持自定义挂载路径。 + +目前 Ozone 支持两种 /tmp 的配置。第一种(默认)是每个用户的临时目录, +由一个挂载卷和一个用户特定的临时桶组成。第二种(通过 ozone-site.xml 配置) +是一个类似粘滞位的临时目录,对所有用户共用,由一个挂载卷和一个共用的临时桶组成。 + +重要提示:要使用它,首先,**管理员** 需要创建名为 tmp 的卷(卷名目前是硬编码的)并将其 ACL 设置为 world ALL 访问权限。 + +具体来说: + +```bash +$ ozone sh volume create tmp +$ ozone sh volume setacl tmp -al world::a +``` + +每个集群中这些命令**仅需要执行一次** + +### 对于每个用户的 /tmp 目录 (默认) + +**每个用户** 都需要先创建并初始化他们自己的 temp 桶一次 + +```bash +$ ozone fs -mkdir /tmp +2020-06-04 00:00:00,050 [main] INFO rpc.RpcClient: Creating Bucket: tmp/0238 ... +``` + +在此之后用户可以向该目录写入,就和向其他常规目录写入一样。例如: + +```bash +$ ozone fs -touch /tmp/key1 +``` + +### 对于所有用户共享的 /tmp 目录 + +要启用类似粘滞位的共享 /tmp 目录,请在 ozone-site.xml 中更新以下配置: + +```xml + + ozone.om.enable.ofs.shared.tmp.dir + true + +``` + +然后,在以**管理员**身份设置好 tmp 卷之后,还需要配置一个 tmp 桶,作为所有用户的共享 /tmp 目录,例如: + +```bash +$ ozone sh bucket create /tmp/tmp +$ ozone sh volume setacl tmp -a user:anyuser:rwlc \ + user:adminuser:a,group:anyuser:rwlc,group:adminuser:a tmp/tmp +``` + +在这里,anyuser 是管理员希望授予访问权限的用户名,而 adminuser 是管理员的用户名。 + +然后用户可以访问 tmp 目录: + +```bash +$ ozone fs -put ./NOTICE.txt ofs://om/tmp/key1 +``` + +## 启用回收站的删除操作 + +为了在 Ozone 中启用回收站,请将这些配置添加到 core-site.xml: + +{{< highlight xml >}} + + fs.trash.interval + 10 + + + fs.trash.classname + org.apache.hadoop.ozone.om.TrashPolicyOzone + +{{< /highlight >}} + +当启用回收站功能后删除键时,这些键会被移动到每个桶下的一个回收站目录中,因为在 Ozone 中不允许将键在桶之间移动(重命名)。 + +```bash +$ ozone fs -rm /volume1/bucket1/key1 +2020-06-04 00:00:00,100 [main] INFO fs.TrashPolicyDefault: Moved: 'ofs://id1/volume1/bucket1/key1' to trash at: ofs://id1/volume1/bucket1/.Trash/hadoop/Current/volume1/bucket1/key1 +``` + +这与 HDFS encryption zone 处理回收站位置的方式非常相似。 + +**请注意** + +1. 可以使用标志 `-skipTrash` 来永久删除文件,而不将其移动到回收站。 +2. 启用回收站时,不允许在桶或卷级别进行删除操作。在这种情况下,必须使用 skipTrash。 +即,不使用 skipTrash 的情况下,不允许使用 `ozone fs -rm -R ofs://vol1/bucket1` 或 `ozone fs -rm -R o3fs://bucket1.vol1` 进行操作。 + +## 递归地列出 + +OFS 支持递归地列出卷、桶和键。 + +例如,如果启用了 ACL 的话, 命令 `ozone fs -ls -R ofs://omservice/` 会递归地列出用户有 LIST 权限的所有卷、桶和键。 +如果禁用了 ACL,这个命令会列出该集群上的所有内容。 + +这个功能不会降低服务器性能,因为循环操作是在客户端上进行的。可以将其视为客户端向服务器发出多个请求以获取所有信息的过程。 diff --git a/hadoop-hdds/docs/content/interface/_index.zh.md b/hadoop-hdds/docs/content/interface/_index.zh.md index fd435aad5dce..82c5e1fb9c97 100644 --- a/hadoop-hdds/docs/content/interface/_index.zh.md +++ b/hadoop-hdds/docs/content/interface/_index.zh.md @@ -1,5 +1,5 @@ --- -title: "编程接口" +title: "客户端接口" menu: main: weight: 5 diff --git a/hadoop-hdds/docs/content/recipe/BotoClient.zh.md b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md new file mode 100644 index 000000000000..64a1d8748a64 --- /dev/null +++ b/hadoop-hdds/docs/content/recipe/BotoClient.zh.md @@ -0,0 +1,188 @@ +--- +title: 使用 Boto3 客户端访问 Ozone 对象存储 +linktitle: Boto3 +summary: 如何使用 Boto3 客户端访问 Ozone 对象存储? +--- + + +这个指南展示了如何从 Boto3 客户端访问 Ozone 对象存储。以下 API 已经过验证: + +- Create bucket +- List bucket +- Head bucket +- Delete bucket +- Upload file +- Download file +- Delete objects(keys) +- Head object +- Multipart upload + + +## 要求 + +您将需要较高版本的 Python3 来运行 Boto3 客户端,请参考 Boto3 的安装需求: + +https://boto3.amazonaws.com/v1/documentation/api/latest/index.html + +## 获取对 Ozone 的资源访问 +您可以参考 Amazon Boto3 文档,关于创建 `s3` 资源的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html + + s3 = boto3.resource('s3', + endpoint_url='http://localhost:9878', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999' + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + +## 通过 session 获取对 Ozone 的客户端访问 +您可以参考 Amazon Boto3 文档,关于 session 的内容在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html + + Create a session + session = boto3.session.Session() + + Obtain s3 client to Ozone via session: + + s3_client = session.client( + service_name='s3', + aws_access_key_id='testuser/scm@EXAMPLE.COM', + aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999', + endpoint_url='http://localhost:9878', + ) + 'endpoint_url' is pointing to Ozone s3 endpoint. + + In our code sample below, we're demonstrating the usage of both s3 and s3_client. + +如果您连接到一个安全的集群,有多种方式配置 Boto3 客户端凭证。在这些情况下,创建 Ozone s3 客户端时传递 `aws_access_key_id` 和 `aws_secret_access_key` 的上述步骤应该被跳过。 + +请参考 Boto3 文档以获取详细信息,在此处: +https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html + + +### 创建桶 + response = s3_client.create_bucket(Bucket='bucket1') + print(response) + +这将在一个名为 `s3v` 的卷中创建一个名为 `bucket1` 的桶 + +### 列出所有桶 + response = s3_client.list_buckets() + print('Existing buckets:') + for bucket in response['Buckets']: + print(f' {bucket["Name"]}') + +这将列出 Ozone `s3v` 卷中的所有桶 + +### 查看桶信息 + response = s3_client.head_bucket(Bucket='bucket1') + print(response) + +这将在 Ozone 卷 `s3v` 中查看桶 `bucket1` 的信息。 + +### 删除桶 + response = s3_client.delete_bucket(Bucket='bucket1') + print(response) + +这将从 Ozone 卷 `s3v` 中删除一个桶 `bucket1`。 + +### 上传文件 + response = s3.Bucket('bucket1').upload_file('./README.md','README.md') + print(response) + +这将从向 Ozone 卷 `s3v` 和桶 `bucket1` 中上传 `README.md` 文件并创建一个 `README.md` 键。 + +### 下载文件 + response = s3.Bucket('bucket1').download_file('README.md', 'download.md') + print(response) + +这将从从 Ozone 卷 `s3v` 和桶 `bucket1` 中下载 `README.md` 并创建一个 `README.md` 文件到本地。 + +### 查看对象信息 + response = s3_client.head_object(Bucket='bucket1', Key='README.md') + print(response) + +这将查看一个位于 Ozone 卷 `s3v` 和桶 `bucket1` 中的 `README.md` 文件的信息。 + +### 删除多个对象 + response = s3_client.delete_objects( + Bucket='bucket1', + Delete={ + 'Objects': [ + { + 'Key': 'README4.md', + }, + { + 'Key': 'README3.md', + }, + ], + 'Quiet': False, + }, + ) + +这将从 Ozone 卷 `s3v` 和桶 `bucket1` 中删除多个对象 `README3.md` 和 `README4.md` + +### 分片上传 + response = s3_client.create_multipart_upload(Bucket='bucket1', Key='key1') + print(response) + uid=response['UploadId'] + print(uid) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven.gz', + Key='key1', + PartNumber=1, + UploadId=str(uid) + ) + print(response) + etag1=response.get('CopyPartResult').get('ETag') + print(etag1) + + response = s3_client.upload_part_copy( + Bucket='bucket1', + CopySource='/bucket1/maven1.gz', + Key='key1', + PartNumber=2, + UploadId=str(uid) + ) + print(response) + etag2=response.get('CopyPartResult').get('ETag') + print(etag2) + + response = s3_client.complete_multipart_upload( + Bucket='bucket1', + Key='key1', + MultipartUpload={ + 'Parts': [ + { + 'ETag': str(etag1), + 'PartNumber': 1, + }, + { + 'ETag': str(etag2), + 'PartNumber': 2, + }, + ], + }, + UploadId=str(uid), + ) + print(response) + +这将使用来自 Ozone 卷 `s3v` 的 `maven.gz` 和 `maven1.gz` 作为复制源,以创建 Ozone 卷 `s3v` 中的新对象 `key1`。请注意,`ETag` 是必需的且对于使用分片上传 API 非常重要。 diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java index 2e42df957346..5e33eefde6c5 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java @@ -19,7 +19,10 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.slf4j.Logger; @@ -36,6 +39,7 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT; @@ -48,7 +52,8 @@ public class ScmTopologyClient { LoggerFactory.getLogger(ScmTopologyClient.class); private final ScmBlockLocationProtocol scmBlockLocationProtocol; - private final AtomicReference cache = new AtomicReference<>(); + private final AtomicReference cache = + new AtomicReference<>(); private ScheduledExecutorService executorService; public ScmTopologyClient( @@ -56,7 +61,7 @@ public ScmTopologyClient( this.scmBlockLocationProtocol = scmBlockLocationProtocol; } - public InnerNode getClusterTree() { + public NetworkTopology getClusterMap() { return requireNonNull(cache.get(), "ScmBlockLocationClient must have been initialized already."); } @@ -66,7 +71,10 @@ public void start(ConfigurationSource conf) throws IOException { scmBlockLocationProtocol.getNetworkTopology(); LOG.info("Initial network topology fetched from SCM: {}.", initialTopology); - cache.set(initialTopology); + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + initialTopology)); scheduleNetworkTopologyPoller(conf, Instant.now()); } @@ -97,7 +105,7 @@ private void scheduleNetworkTopologyPoller(ConfigurationSource conf, LOG.debug("Scheduling NetworkTopologyPoller with an initial delay of {}.", initialDelay); - executorService.scheduleAtFixedRate(() -> checkAndRefresh(), + executorService.scheduleAtFixedRate(() -> checkAndRefresh(conf), initialDelay.toMillis(), refreshDuration.toMillis(), TimeUnit.MILLISECONDS); } @@ -110,18 +118,20 @@ public static Duration parseRefreshDuration(ConfigurationSource conf) { return Duration.ofMillis(refreshDurationInMs); } - private synchronized void checkAndRefresh() { - InnerNode current = cache.get(); + private synchronized void checkAndRefresh(ConfigurationSource conf) { + InnerNode current = (InnerNode) cache.get().getNode(ROOT); try { InnerNode newTopology = scmBlockLocationProtocol.getNetworkTopology(); if (!newTopology.equals(current)) { - cache.set(newTopology); - LOG.info("Updated network topology cluster tree fetched from " + - "SCM: {}.", newTopology); + cache.set(new NetworkTopologyImpl(conf.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + newTopology)); + LOG.info("Updated network topology fetched from SCM: {}.", newTopology); } } catch (IOException e) { throw new UncheckedIOException( - "Error fetching updated network topology cluster tree from SCM", e); + "Error fetching updated network topology from SCM", e); } } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 109358c67bf6..b573ee0d040c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -528,15 +528,16 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { /** * Attempts to decommission the list of nodes. * @param nodes The list of hostnames or hostname:ports to decommission + * @param force true to skip fail-early checks and try to decommission nodes * @throws IOException */ @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { Preconditions.checkNotNull(nodes); DecommissionNodesRequestProto request = DecommissionNodesRequestProto.newBuilder() - .addAllHosts(nodes) + .addAllHosts(nodes).setForce(force) .build(); DecommissionNodesResponseProto response = submitRequest(Type.DecommissionNodes, @@ -902,7 +903,13 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { StartContainerBalancerRequestProto.Builder builder = StartContainerBalancerRequestProto.newBuilder(); builder.setTraceID(TracingUtil.exportCurrentSpan()); @@ -911,29 +918,29 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (threshold.isPresent()) { double tsd = threshold.get(); Preconditions.checkState(tsd >= 0.0D && tsd < 100D, - "threshold should be specified in range [0.0, 100.0)."); + "Threshold should be specified in the range [0.0, 100.0)."); builder.setThreshold(tsd); } if (maxSizeToMovePerIterationInGB.isPresent()) { long mstm = maxSizeToMovePerIterationInGB.get(); Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); + "Max Size To Move Per Iteration In GB must be positive."); builder.setMaxSizeToMovePerIterationInGB(mstm); } if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "greater than equal to zero."); Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + + "Max Datanodes Percentage To Involve Per Iteration must be " + "lesser than equal to hundred."); builder.setMaxDatanodesPercentageToInvolvePerIteration(mdti); } if (iterations.isPresent()) { int i = iterations.get(); Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + "Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); builder.setIterations(i); } @@ -941,17 +948,53 @@ public StartContainerBalancerResponseProto startContainerBalancer( if (maxSizeEnteringTargetInGB.isPresent()) { long mset = maxSizeEnteringTargetInGB.get(); Preconditions.checkState(mset > 0, - "maxSizeEnteringTargetInGB must be positive."); + "Max Size Entering Target In GB must be positive."); builder.setMaxSizeEnteringTargetInGB(mset); } if (maxSizeLeavingSourceInGB.isPresent()) { long msls = maxSizeLeavingSourceInGB.get(); Preconditions.checkState(msls > 0, - "maxSizeLeavingSourceInGB must be positive."); + "Max Size Leaving Source In GB must be positive."); builder.setMaxSizeLeavingSourceInGB(msls); } + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + Preconditions.checkState(bi > 0, + "Balancing Interval must be greater than zero."); + builder.setBalancingInterval(bi); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + Preconditions.checkState(mt > 0, + "Move Timeout must be greater than zero."); + builder.setMoveTimeout(mt); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + Preconditions.checkState(mrt > 0, + "Move Replication Timeout must be greater than zero."); + builder.setMoveReplicationTimeout(mrt); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + builder.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + builder.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + builder.setExcludeNodes(ex); + } + StartContainerBalancerRequestProto request = builder.build(); return submitRequest(Type.StartContainerBalancer, builder1 -> builder1.setStartContainerBalancerRequest(request)) diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index f29232090fdf..5d0ca946aeed 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -43,10 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + org.xerial.snappy snappy-java + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.hadoop hadoop-annotations @@ -202,12 +210,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + com.google.guava guava diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 82e4c33325e2..f50048a0182f 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -43,10 +43,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + org.xerial.snappy snappy-java + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.curator * @@ -109,6 +117,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + commons-cli @@ -120,6 +136,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + io.netty * diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index e8b8d623942a..eff95099371c 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -362,6 +362,7 @@ message DatanodeUsageInfoResponseProto { */ message DecommissionNodesRequestProto { repeated string hosts = 1; + optional bool force = 2; } @@ -577,6 +578,12 @@ message StartContainerBalancerRequestProto { optional int64 maxSizeLeavingSourceInGB = 7; optional int32 maxDatanodesPercentageToInvolvePerIteration = 8; optional int32 iterations = 9; + optional int32 balancingInterval = 10; + optional int32 moveTimeout = 11; + optional int32 moveReplicationTimeout = 12; + optional bool networkTopologyEnable = 13; + optional string includeNodes = 14; + optional string excludeNodes = 15; } message StartContainerBalancerResponseProto { diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto index ccde261de024..f5cac299238d 100644 --- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto +++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto @@ -436,9 +436,11 @@ message WriteChunkRequestProto { required DatanodeBlockID blockID = 1; optional ChunkInfo chunkData = 2; optional bytes data = 3; + optional PutBlockRequestProto block = 4; } message WriteChunkResponseProto { + optional GetCommittedBlockLengthResponseProto committedBlockLength = 1; } enum ReadChunkVersion { diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java index 46fbeb412a84..0d79a1c833d0 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.utils.db.managed; -import org.rocksdb.util.Environment; import java.io.IOException; import java.io.OutputStreamWriter; @@ -39,8 +38,8 @@ public static void main(String[] args) { String filePath = args[0]; try (Writer writer = new OutputStreamWriter( Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) { - writer.write("rocksdbLibName=" + - Environment.getJniLibraryFileName("rocksdb")); + String libName = ManagedRocksObjectUtils.getRocksDBLibFileName(); + writer.write("rocksdbLibName=" + libName); } catch (IOException e) { e.printStackTrace(); } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index 9c86a47d7401..148abee7fc0e 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.LeakDetector; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.RocksDB; +import org.rocksdb.util.Environment; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -94,4 +95,11 @@ public static void waitForFileDelete(File file, Duration maxDuration) public static void loadRocksDBLibrary() { RocksDB.loadLibrary(); } + + /** + * Returns RocksDB library file name. + */ + public static String getRocksDBLibFileName() { + return Environment.getJniLibraryFileName("rocksdb"); + } } diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 180ae936998d..de8c68a4801e 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -253,6 +253,8 @@ + @@ -263,7 +265,7 @@ - + dependentFiles) { if (isLibraryLoaded(libraryName)) { return true; } @@ -116,9 +119,9 @@ public synchronized boolean loadLibrary(final String libraryName) { } if (!loaded) { - Optional file = copyResourceFromJarToTemp(libraryName); - if (file.isPresent()) { - System.load(file.get().getAbsolutePath()); + Pair, List> files = copyResourceFromJarToTemp(libraryName, dependentFiles); + if (files.getKey().isPresent()) { + System.load(files.getKey().get().getAbsolutePath()); loaded = true; } } @@ -137,19 +140,20 @@ static String getSystemProperty(String property) { // Added function to make this testable @VisibleForTesting - static InputStream getResourceStream(String libraryFileName) { + static InputStream getResourceStream(String libraryFileName) throws IOException { return NativeLibraryLoader.class.getClassLoader() .getResourceAsStream(libraryFileName); } - private Optional copyResourceFromJarToTemp(final String libraryName) + private Pair, List> copyResourceFromJarToTemp(final String libraryName, + final List dependentFileNames) throws IOException { final String libraryFileName = getJniLibraryFileName(libraryName); InputStream is = null; try { is = getResourceStream(libraryFileName); if (is == null) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } final String nativeLibDir = @@ -160,15 +164,28 @@ private Optional copyResourceFromJarToTemp(final String libraryName) // create a temporary file to copy the library to final File temp = File.createTempFile(libraryName, getLibOsSuffix(), dir); if (!temp.exists()) { - return Optional.empty(); + return Pair.of(Optional.empty(), null); } else { temp.deleteOnExit(); } Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + List dependentFiles = new ArrayList<>(); + for (String fileName : dependentFileNames) { + if (is != null) { + is.close(); + } + is = getResourceStream(fileName); + File file = new File(dir, fileName); + Files.copy(is, file.toPath(), StandardCopyOption.REPLACE_EXISTING); + if (file.exists()) { + file.deleteOnExit(); + } + dependentFiles.add(file); + } ShutdownHookManager.get().addShutdownHook(temp::delete, LIBRARY_SHUTDOWN_HOOK_PRIORITY); - return Optional.of(temp); + return Pair.of(Optional.of(temp), dependentFiles); } finally { if (is != null) { is.close(); diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java index 7c8783b43948..2a58dfce4c4c 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; import java.io.Closeable; +import java.util.Arrays; import java.util.function.Function; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -34,7 +35,8 @@ public class ManagedRawSSTFileReader implements Closeable { public static boolean loadLibrary() throws NativeLibraryNotLoadedException { ManagedRocksObjectUtils.loadRocksDBLibrary(); - if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { + if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME, Arrays.asList( + ManagedRocksObjectUtils.getRocksDBLibFileName()))) { throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); } return true; diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 8f18f8d1e423..f0074e0a1ac9 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -28,6 +28,7 @@ import java.io.ByteArrayInputStream; import java.io.File; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -72,7 +73,7 @@ public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throw mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; - NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName); + NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName, Collections.emptyList()); NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); // Checking if the resource with random was copied to a temp file. File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 0e45b131363d..f47abe65befd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -386,6 +386,11 @@ public void saveConfiguration(ContainerBalancerConfiguration configuration, .build()); } + @VisibleForTesting + public ContainerBalancerConfiguration getConfig() { + return this.config; + } + private void validateConfiguration(ContainerBalancerConfiguration conf) throws InvalidContainerBalancerConfigurationException { // maxSizeEnteringTarget and maxSizeLeavingSource should by default be diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index 7e2ba2fd0125..e275d345a5a7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -338,6 +338,10 @@ public Duration getMoveReplicationTimeout() { return Duration.ofMillis(moveReplicationTimeout); } + public void setMoveReplicationTimeout(Duration duration) { + this.moveReplicationTimeout = duration.toMillis(); + } + public void setMoveReplicationTimeout(long millis) { this.moveReplicationTimeout = millis; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 6350c3c76194..684df784c279 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -152,11 +152,16 @@ public boolean canSizeLeaveSource(DatanodeDetails source, long size) { if (sizeLeavingNode.containsKey(source)) { long sizeLeavingAfterMove = sizeLeavingNode.get(source) + size; //size can be moved out of source datanode only when the following - //two condition are met. - //1 sizeLeavingAfterMove does not succeed the configured + //three conditions are met. + //1 size should be greater than zero bytes + //2 sizeLeavingAfterMove does not succeed the configured // MaxSizeLeavingTarget - //2 after subtracting sizeLeavingAfterMove, the usage is bigger + //3 after subtracting sizeLeavingAfterMove, the usage is bigger // than or equal to lowerLimit + if (size <= 0) { + LOG.debug("{} bytes container cannot leave datanode {}", size, source.getUuidString()); + return false; + } if (sizeLeavingAfterMove > config.getMaxSizeLeavingSource()) { LOG.debug("{} bytes cannot leave datanode {} because 'size.leaving" + ".source.max' limit is {} and {} bytes have already left.", diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index a5583b48b107..9d9bf07fda3b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -440,7 +440,7 @@ public void close() throws IOException { transactionBuffer.close(); HadoopExecutors. shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); - } else { + } else if (!scm.isStopped()) { scm.shutDown("scm statemachine is closed by ratis, terminate SCM"); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 4ace6d22d51c..42a43ad589d8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -21,10 +21,14 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -42,6 +46,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; @@ -56,6 +61,7 @@ public class NodeDecommissionManager { private final DatanodeAdminMonitor monitor; private final NodeManager nodeManager; + private ContainerManager containerManager; private final SCMContext scmContext; private final boolean useHostnames; @@ -252,10 +258,11 @@ private boolean validateDNPortMatch(int port, DatanodeDetails dn) { return false; } - public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, + public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, ContainerManager cm, SCMContext scmContext, EventPublisher eventQueue, ReplicationManager rm) { this.nodeManager = nm; + this.containerManager = cm; this.scmContext = scmContext; executor = Executors.newScheduledThreadPool(1, @@ -305,9 +312,21 @@ public DatanodeAdminMonitor getMonitor() { } public synchronized List decommissionNodes( - List nodes) { + List nodes, boolean force) { List errors = new ArrayList<>(); List dns = mapHostnamesToDatanodes(nodes, errors); + // add check for fail-early if force flag is not set + if (!force) { + LOG.info("Force flag = {}. Checking if decommission is possible for dns: {}", force, dns); + boolean decommissionPossible = checkIfDecommissionPossible(dns, errors); + if (!decommissionPossible) { + LOG.error("Cannot decommission nodes as sufficient node are not available."); + errors.add(new DatanodeAdminError("AllHosts", "Sufficient nodes are not available.")); + return errors; + } + } else { + LOG.info("Force flag = {}. Skip checking if decommission is possible for dns: {}", force, dns); + } for (DatanodeDetails dn : dns) { try { startDecommission(dn); @@ -368,6 +387,61 @@ public synchronized void startDecommission(DatanodeDetails dn) } } + private synchronized boolean checkIfDecommissionPossible(List dns, List errors) { + int numDecom = dns.size(); + List validDns = new ArrayList<>(dns); + int inServiceTotal = nodeManager.getNodeCount(NodeStatus.inServiceHealthy()); + for (DatanodeDetails dn : dns) { + try { + NodeStatus nodeStatus = getNodeStatus(dn); + NodeOperationalState opState = nodeStatus.getOperationalState(); + if (opState != NodeOperationalState.IN_SERVICE) { + numDecom--; + validDns.remove(dn); + } + } catch (NodeNotFoundException ex) { + numDecom--; + validDns.remove(dn); + } + } + + for (DatanodeDetails dn : validDns) { + Set containers; + try { + containers = nodeManager.getContainers(dn); + } catch (NodeNotFoundException ex) { + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + + "decommission it", dn.getHostName()); + continue; // ignore the DN and continue to next one + } + + for (ContainerID cid : containers) { + ContainerInfo cif; + try { + cif = containerManager.getContainer(cid); + } catch (ContainerNotFoundException ex) { + LOG.warn("Could not find container info for container {}.", cid); + continue; // ignore the container and continue to next one + } + synchronized (cif) { + if (cif.getState().equals(HddsProtos.LifeCycleState.DELETED) || + cif.getState().equals(HddsProtos.LifeCycleState.DELETING)) { + continue; + } + int reqNodes = cif.getReplicationConfig().getRequiredNodes(); + if ((inServiceTotal - numDecom) < reqNodes) { + LOG.info("Cannot decommission nodes. Tried to decommission {} nodes of which valid nodes = {}. " + + "Cluster state: In-service nodes = {}, nodes required for replication = {}. " + + "Failing due to datanode : {}, container : {}", + dns.size(), numDecom, inServiceTotal, reqNodes, dn, cid); + return false; + } + } + } + } + return true; + } + public synchronized List recommissionNodes( List nodes) { List errors = new ArrayList<>(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index a44536bf4463..16a8cbd5a4f5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -1107,6 +1107,12 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxSizeToMovePerIterationInGB = Optional.empty(); Optional maxSizeEnteringTargetInGB = Optional.empty(); Optional maxSizeLeavingSourceInGB = Optional.empty(); + Optional balancingInterval = Optional.empty(); + Optional moveTimeout = Optional.empty(); + Optional moveReplicationTimeout = Optional.empty(); + Optional networkTopologyEnable = Optional.empty(); + Optional includeNodes = Optional.empty(); + Optional excludeNodes = Optional.empty(); if (request.hasThreshold()) { threshold = Optional.of(request.getThreshold()); @@ -1132,19 +1138,47 @@ public StartContainerBalancerResponseProto startContainerBalancer( maxSizeToMovePerIterationInGB = Optional.of(request.getMaxSizeToMovePerIterationInGB()); } + if (request.hasMaxSizeEnteringTargetInGB()) { maxSizeEnteringTargetInGB = Optional.of(request.getMaxSizeEnteringTargetInGB()); } + if (request.hasMaxSizeLeavingSourceInGB()) { maxSizeLeavingSourceInGB = Optional.of(request.getMaxSizeLeavingSourceInGB()); } + if (request.hasBalancingInterval()) { + balancingInterval = Optional.of(request.getBalancingInterval()); + } + + if (request.hasMoveTimeout()) { + moveTimeout = Optional.of(request.getMoveTimeout()); + } + + if (request.hasMoveReplicationTimeout()) { + moveReplicationTimeout = Optional.of(request.getMoveReplicationTimeout()); + } + + if (request.hasNetworkTopologyEnable()) { + networkTopologyEnable = Optional.of(request.getNetworkTopologyEnable()); + } + + if (request.hasIncludeNodes()) { + includeNodes = Optional.of(request.getIncludeNodes()); + } + + if (request.hasExcludeNodes()) { + excludeNodes = Optional.of(request.getExcludeNodes()); + } + return impl.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } public StopContainerBalancerResponseProto stopContainerBalancer( @@ -1165,7 +1199,7 @@ public ContainerBalancerStatusResponseProto getContainerBalancerStatus( public DecommissionNodesResponseProto decommissionNodes( DecommissionNodesRequestProto request) throws IOException { List errors = - impl.decommissionNodes(request.getHostsList()); + impl.decommissionNodes(request.getHostsList(), request.getForce()); DecommissionNodesResponseProto.Builder response = DecommissionNodesResponseProto.newBuilder(); for (DatanodeAdminError e : errors) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index e1a7d2dca531..02bc10ba6e40 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -22,6 +22,7 @@ import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; /** * This class is used for maintaining SafeMode metric information, which can @@ -33,16 +34,16 @@ public class SafeModeMetrics { // These all values will be set to some values when safemode is enabled. - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numContainerWithOneReplicaReportedThreshold; private @Metric MutableCounterLong currentContainersWithOneReplicaReportedCount; // When hdds.scm.safemode.pipeline-availability.check is set then only // below metrics will have some values, otherwise they will be zero. - private @Metric MutableCounterLong numHealthyPipelinesThreshold; + private @Metric MutableGaugeLong numHealthyPipelinesThreshold; private @Metric MutableCounterLong currentHealthyPipelinesCount; - private @Metric MutableCounterLong + private @Metric MutableGaugeLong numPipelinesWithAtleastOneReplicaReportedThreshold; private @Metric MutableCounterLong currentPipelinesWithAtleastOneReplicaReportedCount; @@ -55,7 +56,7 @@ public static SafeModeMetrics create() { } public void setNumHealthyPipelinesThreshold(long val) { - this.numHealthyPipelinesThreshold.incr(val); + this.numHealthyPipelinesThreshold.set(val); } public void incCurrentHealthyPipelinesCount() { @@ -63,7 +64,7 @@ public void incCurrentHealthyPipelinesCount() { } public void setNumPipelinesWithAtleastOneReplicaReportedThreshold(long val) { - this.numPipelinesWithAtleastOneReplicaReportedThreshold.incr(val); + this.numPipelinesWithAtleastOneReplicaReportedThreshold.set(val); } public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { @@ -71,14 +72,14 @@ public void incCurrentHealthyPipelinesWithAtleastOneReplicaReportedCount() { } public void setNumContainerWithOneReplicaReportedThreshold(long val) { - this.numContainerWithOneReplicaReportedThreshold.incr(val); + this.numContainerWithOneReplicaReportedThreshold.set(val); } public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } - MutableCounterLong getNumHealthyPipelinesThreshold() { + MutableGaugeLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } @@ -86,7 +87,7 @@ MutableCounterLong getCurrentHealthyPipelinesCount() { return currentHealthyPipelinesCount; } - MutableCounterLong + MutableGaugeLong getNumPipelinesWithAtleastOneReplicaReportedThreshold() { return numPipelinesWithAtleastOneReplicaReportedThreshold; } @@ -95,7 +96,7 @@ MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { return currentPipelinesWithAtleastOneReplicaReportedCount; } - MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { + MutableGaugeLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 2df2a4847e36..ecfb92104da2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -22,7 +22,6 @@ package org.apache.hadoop.hdds.scm.server; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.common.collect.Maps; import com.google.protobuf.BlockingService; @@ -100,6 +99,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -646,11 +646,11 @@ public HddsProtos.Node queryNode(UUID uuid) } @Override - public List decommissionNodes(List nodes) + public List decommissionNodes(List nodes, boolean force) throws IOException { try { getScm().checkAdminAccess(getRemoteUser(), false); - return scm.getScmDecommissionManager().decommissionNodes(nodes); + return scm.getScmDecommissionManager().decommissionNodes(nodes, force); } catch (Exception ex) { LOG.error("Failed to decommission nodes", ex); throw ex; @@ -1047,67 +1047,130 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTarget, - Optional maxSizeLeavingSource) throws IOException { + Optional maxSizeLeavingSource, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { getScm().checkAdminAccess(getRemoteUser(), false); ContainerBalancerConfiguration cbc = scm.getConfiguration().getObject(ContainerBalancerConfiguration.class); Map auditMap = Maps.newHashMap(); - if (threshold.isPresent()) { - double tsd = threshold.get(); - auditMap.put("threshold", String.valueOf(tsd)); - Preconditions.checkState(tsd >= 0.0D && tsd < 100.0D, - "threshold should be specified in range [0.0, 100.0)."); - cbc.setThreshold(tsd); - } - if (maxSizeToMovePerIterationInGB.isPresent()) { - long mstm = maxSizeToMovePerIterationInGB.get(); - auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); - Preconditions.checkState(mstm > 0, - "maxSizeToMovePerIterationInGB must be positive."); - cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); - } - if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { - int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); - auditMap.put("maxDatanodesPercentageToInvolvePerIteration", - String.valueOf(mdti)); - Preconditions.checkState(mdti >= 0, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "greater than equal to zero."); - Preconditions.checkState(mdti <= 100, - "maxDatanodesPercentageToInvolvePerIteration must be " + - "lesser than or equal to 100."); - cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); - } - if (iterations.isPresent()) { - int i = iterations.get(); - auditMap.put("iterations", String.valueOf(i)); - Preconditions.checkState(i > 0 || i == -1, - "number of iterations must be positive or" + + try { + if (threshold.isPresent()) { + double tsd = threshold.get(); + auditMap.put("threshold", String.valueOf(tsd)); + if (tsd < 0.0D || tsd >= 100.0D) { + throw new IOException("Threshold should be specified in the range [0.0, 100.0)."); + } + cbc.setThreshold(tsd); + } + + if (maxSizeToMovePerIterationInGB.isPresent()) { + long mstm = maxSizeToMovePerIterationInGB.get(); + auditMap.put("maxSizeToMovePerIterationInGB", String.valueOf(mstm)); + if (mstm <= 0) { + throw new IOException("Max Size To Move Per Iteration In GB must be positive."); + } + cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); + } + + if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { + int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); + auditMap.put("maxDatanodesPercentageToInvolvePerIteration", + String.valueOf(mdti)); + if (mdti < 0 || mdti > 100) { + throw new IOException("Max Datanodes Percentage To Involve Per Iteration" + + "should be specified in the range [0, 100]"); + } + cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); + } + + if (iterations.isPresent()) { + int i = iterations.get(); + auditMap.put("iterations", String.valueOf(i)); + if (i < -1 || i == 0) { + throw new IOException("Number of Iterations must be positive or" + " -1 (for running container balancer infinitely)."); - cbc.setIterations(i); - } + } + cbc.setIterations(i); + } - if (maxSizeEnteringTarget.isPresent()) { - long mset = maxSizeEnteringTarget.get(); - auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); - Preconditions.checkState(mset > 0, - "maxSizeEnteringTarget must be " + + if (maxSizeEnteringTarget.isPresent()) { + long mset = maxSizeEnteringTarget.get(); + auditMap.put("maxSizeEnteringTarget", String.valueOf(mset)); + if (mset <= 0) { + throw new IOException("Max Size Entering Target must be " + "greater than zero."); - cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); - } + } + cbc.setMaxSizeEnteringTarget(mset * OzoneConsts.GB); + } - if (maxSizeLeavingSource.isPresent()) { - long msls = maxSizeLeavingSource.get(); - auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); - Preconditions.checkState(msls > 0, - "maxSizeLeavingSource must be " + + if (maxSizeLeavingSource.isPresent()) { + long msls = maxSizeLeavingSource.get(); + auditMap.put("maxSizeLeavingSource", String.valueOf(msls)); + if (msls <= 0) { + throw new IOException("Max Size Leaving Source must be " + "greater than zero."); - cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); - } + } + cbc.setMaxSizeLeavingSource(msls * OzoneConsts.GB); + } - ContainerBalancer containerBalancer = scm.getContainerBalancer(); - try { + if (balancingInterval.isPresent()) { + int bi = balancingInterval.get(); + auditMap.put("balancingInterval", String.valueOf(bi)); + if (bi <= 0) { + throw new IOException("Balancing Interval must be greater than zero."); + } + cbc.setBalancingInterval(Duration.ofMinutes(bi)); + } + + if (moveTimeout.isPresent()) { + int mt = moveTimeout.get(); + auditMap.put("moveTimeout", String.valueOf(mt)); + if (mt <= 0) { + throw new IOException("Move Timeout must be greater than zero."); + } + cbc.setMoveTimeout(Duration.ofMinutes(mt)); + } + + if (moveReplicationTimeout.isPresent()) { + int mrt = moveReplicationTimeout.get(); + auditMap.put("moveReplicationTimeout", String.valueOf(mrt)); + if (mrt <= 0) { + throw new IOException("Move Replication Timeout must be greater than zero."); + } + cbc.setMoveReplicationTimeout(Duration.ofMinutes(mrt)); + } + + if (networkTopologyEnable.isPresent()) { + Boolean nt = networkTopologyEnable.get(); + auditMap.put("networkTopologyEnable", String.valueOf(nt)); + cbc.setNetworkTopologyEnable(nt); + } + + if (includeNodes.isPresent()) { + String in = includeNodes.get(); + auditMap.put("includeNodes", (in)); + cbc.setIncludeNodes(in); + } + + if (excludeNodes.isPresent()) { + String ex = excludeNodes.get(); + auditMap.put("excludeNodes", (ex)); + cbc.setExcludeNodes(ex); + } + + ContainerBalancer containerBalancer = scm.getContainerBalancer(); containerBalancer.startBalancer(cbc); + + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + SCMAction.START_CONTAINER_BALANCER, auditMap)); + return StartContainerBalancerResponseProto.newBuilder() + .setStart(true) + .build(); } catch (IllegalContainerBalancerStateException | IOException | InvalidContainerBalancerConfigurationException e) { AUDIT.logWriteFailure(buildAuditMessageForFailure( @@ -1117,11 +1180,6 @@ public StartContainerBalancerResponseProto startContainerBalancer( .setMessage(e.getMessage()) .build(); } - AUDIT.logWriteSuccess(buildAuditMessageForSuccess( - SCMAction.START_CONTAINER_BALANCER, auditMap)); - return StartContainerBalancerResponseProto.newBuilder() - .setStart(true) - .build(); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 11fdc0d16d79..fa67dd68dedc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -845,7 +845,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, pipelineManager, eventQueue, serviceManager, scmContext); } - scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, + scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, scmContext, eventQueue, replicationManager); statefulServiceStateManager = StatefulServiceStateManagerImpl.newBuilder() @@ -1796,6 +1796,10 @@ public void shutDown(String message) { ExitUtils.terminate(0, message, LOG); } + public boolean isStopped() { + return isStopped.get(); + } + /** * Wait until service has completed shutdown. */ diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java index 3bed3878123d..a4d7f3761202 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java @@ -55,6 +55,7 @@ import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -132,7 +133,7 @@ public class TestContainerBalancerTask { * Sets up configuration values and creates a mock cluster. */ @BeforeEach - public void setup() throws IOException, NodeNotFoundException, + public void setup(TestInfo testInfo) throws IOException, NodeNotFoundException, TimeoutException { conf = new OzoneConfiguration(); rmConf = new ReplicationManagerConfiguration(); @@ -164,7 +165,11 @@ public void setup() throws IOException, NodeNotFoundException, conf.setFromObject(balancerConfiguration); GenericTestUtils.setLogLevel(ContainerBalancerTask.LOG, Level.DEBUG); - averageUtilization = createCluster(); + int[] sizeArray = testInfo.getTestMethod() + .filter(method -> method.getName().equals("balancerShouldMoveOnlyPositiveSizeContainers")) + .map(method -> new int[]{0, 0, 0, 0, 0, 1, 2, 3, 4, 5}) + .orElse(null); + averageUtilization = createCluster(sizeArray); mockNodeManager = new MockNodeManager(datanodeToContainersMap); NetworkTopology clusterMap = mockNodeManager.getClusterNetworkTopologyMap(); @@ -1114,6 +1119,34 @@ public void balancerShouldExcludeECContainersWhenLegacyRmIsEnabled() } } + /** + * Test to check if balancer picks up only positive size + * containers to move from source to destination. + */ + @Test + public void balancerShouldMoveOnlyPositiveSizeContainers() + throws IllegalContainerBalancerStateException, IOException, + InvalidContainerBalancerConfigurationException, TimeoutException { + + startBalancer(balancerConfiguration); + /* + Get all containers that were selected by balancer and assert none of + them is a zero or negative size container. + */ + Map containerToSource = + containerBalancerTask.getContainerToSourceMap(); + assertFalse(containerToSource.isEmpty()); + boolean zeroOrNegSizeContainerMoved = false; + for (Map.Entry entry : + containerToSource.entrySet()) { + ContainerInfo containerInfo = cidToInfoMap.get(entry.getKey()); + if (containerInfo.getUsedBytes() <= 0) { + zeroOrNegSizeContainerMoved = true; + } + } + assertFalse(zeroOrNegSizeContainerMoved); + } + /** * Determines unBalanced nodes, that is, over and under utilized nodes, * according to the generated utilization values for nodes and the threshold. @@ -1169,8 +1202,8 @@ private void generateUtilizations(int count) throws IllegalArgumentException { * cluster have utilization values determined by generateUtilizations method. * @return average utilization (used space / capacity) of the cluster */ - private double createCluster() { - generateData(); + private double createCluster(int[] sizeArray) { + generateData(sizeArray); createReplicasForContainers(); long clusterCapacity = 0, clusterUsedSpace = 0; @@ -1204,7 +1237,7 @@ private double createCluster() { /** * Create some datanodes and containers for each node. */ - private void generateData() { + private void generateData(int[] sizeArray) { this.numberOfNodes = 10; generateUtilizations(numberOfNodes); nodesInCluster = new ArrayList<>(nodeUtilizations.size()); @@ -1216,13 +1249,19 @@ private void generateData() { new DatanodeUsageInfo(MockDatanodeDetails.randomDatanodeDetails(), new SCMNodeStat()); - // create containers with varying used space int sizeMultiple = 0; + if (sizeArray == null) { + sizeArray = new int[10]; + for (int j = 0; j < numberOfNodes; j++) { + sizeArray[j] = sizeMultiple; + sizeMultiple %= 5; + sizeMultiple++; + } + } + // create containers with varying used space for (int j = 0; j < i; j++) { - sizeMultiple %= 5; - sizeMultiple++; ContainerInfo container = - createContainer((long) i * i + j, sizeMultiple); + createContainer((long) i * i + j, sizeArray[j]); cidToInfoMap.put(container.containerID(), container); containerIDSet.add(container.containerID()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 09f0dd59b9f9..a0c0280d4083 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -18,20 +18,27 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mockito; import java.io.File; import java.io.IOException; @@ -39,13 +46,21 @@ import java.util.UUID; import java.util.Arrays; import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Unit tests for the decommission manager. @@ -56,15 +71,42 @@ public class TestNodeDecommissionManager { private NodeDecommissionManager decom; private StorageContainerManager scm; private NodeManager nodeManager; + private ContainerManager containerManager; private OzoneConfiguration conf; + private static int id = 1; @BeforeEach void setup(@TempDir File dir) throws Exception { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.getAbsolutePath()); - nodeManager = createNodeManager(conf); - decom = new NodeDecommissionManager(conf, nodeManager, + scm = HddsTestUtils.getScm(conf); + nodeManager = scm.getScmNodeManager(); + containerManager = mock(ContainerManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.allocateContainer(any(ReplicationConfig.class), anyString())) + .thenAnswer(invocation -> createMockContainer((ReplicationConfig)invocation.getArguments()[0], + (String) invocation.getArguments()[1])); + } + + private ContainerInfo createMockContainer(ReplicationConfig rep, String owner) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(id) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner(owner); + id++; + return builder.build(); + } + private ContainerInfo getMockContainer(ReplicationConfig rep, ContainerID conId) { + ContainerInfo.Builder builder = new ContainerInfo.Builder() + .setReplicationConfig(rep) + .setContainerID(conId.getId()) + .setPipelineID(PipelineID.randomId()) + .setState(OPEN) + .setOwner("admin"); + return builder.build(); } @Test @@ -99,37 +141,37 @@ public void testAnyInvalidHostThrowsException() { // Try to decommission a host that does exist, but give incorrect port List error = decom.decommissionNodes( - singletonList(dns.get(1).getIpAddress() + ":10")); + singletonList(dns.get(1).getIpAddress() + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(1).getIpAddress()); // Try to decommission a host that does not exist - error = decom.decommissionNodes(singletonList("123.123.123.123")); + error = decom.decommissionNodes(singletonList("123.123.123.123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123.123.123.123"); // Try to decommission a host that does exist and a host that does not error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - "123,123,123,123")); + "123,123,123,123"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains("123,123,123,123"); // Try to decommission a host with many DNs on the address with no port - error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress())); + error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress()); // Try to decommission a host with many DNs on the address with a port // that does not exist error = decom.decommissionNodes(singletonList(dns.get(0).getIpAddress() - + ":10")); + + ":10"), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(dns.get(0).getIpAddress() + ":10"); // Try to decommission 2 hosts with address that does not exist // Both should return error error = decom.decommissionNodes(Arrays.asList( - "123.123.123.123", "234.234.234.234")); + "123.123.123.123", "234.234.234.234"), false); assertEquals(2, error.size()); assertTrue(error.get(0).getHostname().contains("123.123.123.123") && error.get(1).getHostname().contains("234.234.234.234")); @@ -142,7 +184,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Decommission 2 valid nodes decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -151,14 +193,14 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // Running the command again gives no error - nodes already decommissioning // are silently ignored. decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), - dns.get(2).getIpAddress())); + dns.get(2).getIpAddress()), false); // Attempt to decommission dn(10) which has multiple hosts on the same IP // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); - decom.decommissionNodes(singletonList(multiAddr)); + decom.decommissionNodes(singletonList(multiAddr), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -166,7 +208,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // dn(11) with identical ports. nodeManager.processHeartbeat(dns.get(9)); DatanodeDetails duplicatePorts = dns.get(9); - decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress())); + decom.decommissionNodes(singletonList(duplicatePorts.getIpAddress()), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(duplicatePorts).getOperationalState()); @@ -217,13 +259,13 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() // Attempt to decommission with just the IP, which should fail. List error = - decom.decommissionNodes(singletonList(extraDN.getIpAddress())); + decom.decommissionNodes(singletonList(extraDN.getIpAddress()), false); assertEquals(1, error.size()); assertThat(error.get(0).getHostname()).contains(extraDN.getIpAddress()); // Now try the one with the unique port decom.decommissionNodes( - singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1)); + singletonList(extraDN.getIpAddress() + ":" + ratisPort + 1), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(extraDN).getOperationalState()); @@ -239,7 +281,7 @@ public void testNodesCanBeDecommissionedAndRecommissionedMixedPorts() nodeManager.processHeartbeat(expectedDN); decom.decommissionNodes(singletonList( - expectedDN.getIpAddress() + ":" + ratisPort)); + expectedDN.getIpAddress() + ":" + ratisPort), false); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(expectedDN).getOperationalState()); // The other duplicate is still in service @@ -323,7 +365,7 @@ public void testNodesCannotTransitionFromDecomToMaint() throws Exception { // Try to go from maint to decom: List dn = new ArrayList<>(); dn.add(dns.get(1).getIpAddress()); - List errors = decom.decommissionNodes(dn); + List errors = decom.decommissionNodes(dn, false); assertEquals(1, errors.size()); assertEquals(dns.get(1).getHostName(), errors.get(0).getHostname()); @@ -369,10 +411,268 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception { assertEquals(decom.getMonitor().getTrackedNodes().size(), 3); } - private SCMNodeManager createNodeManager(OzoneConfiguration config) - throws IOException, AuthenticationException { - scm = HddsTestUtils.getScm(config); - return (SCMNodeManager) scm.getScmNodeManager(); + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForRatis() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress(), dns.get(4).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(4)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionForEc() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0])); + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsEC = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionThrowsExceptionRatisAndEc() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + ContainerInfo containerRatis = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(containerRatis.containerID()); + Set idsEC = new HashSet<>(); + ContainerInfo containerEC = containerManager.allocateContainer(new ECReplicationConfig(3, 2), "admin"); + idsEC.add(containerEC.containerID()); + + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> { + ContainerID containerID = (ContainerID)invocation.getArguments()[0]; + if (idsEC.contains(containerID)) { + return getMockContainer(new ECReplicationConfig(3, 2), + (ContainerID)invocation.getArguments()[0]); + } + return getMockContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + (ContainerID)invocation.getArguments()[0]); + }); + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + nodeManager.setContainers(dn, idsEC); + } + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()), true); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksNotInService() throws + NodeNotFoundException, IOException { + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + nodeManager.register(dn, null, null); + } + + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 5; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + for (DatanodeDetails dn : nodeManager.getAllNodes().subList(0, 3)) { + nodeManager.setContainers(dn, idsRatis); + } + + // decommission one node successfully + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + // try to decommission 2 nodes, one in service and one in decommissioning state, should be successful. + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress()), false); + assertEquals(0, error.size()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(0)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + } + + @Test + public void testInsufficientNodeDecommissionChecksForNNF() throws + NodeNotFoundException, IOException { + List error; + List dns = new ArrayList<>(); + + for (int i = 0; i < 5; i++) { + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + dns.add(dn); + } + Set idsRatis = new HashSet<>(); + for (int i = 0; i < 3; i++) { + ContainerInfo container = containerManager.allocateContainer( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), "admin"); + idsRatis.add(container.containerID()); + } + + nodeManager = mock(NodeManager.class); + decom = new NodeDecommissionManager(conf, nodeManager, containerManager, + SCMContext.emptyContext(), new EventQueue(), null); + when(containerManager.getContainer(any(ContainerID.class))) + .thenAnswer(invocation -> getMockContainer(RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE), (ContainerID)invocation.getArguments()[0])); + when(nodeManager.getNodesByAddress(any())).thenAnswer(invocation -> + getDatanodeDetailsList((String)invocation.getArguments()[0], dns)); + when(nodeManager.getContainers(any())).thenReturn(idsRatis); + when(nodeManager.getNodeCount(any())).thenReturn(5); + + when(nodeManager.getNodeStatus(any())).thenAnswer(invocation -> + getNodeOpState((DatanodeDetails) invocation.getArguments()[0], dns)); + Mockito.doAnswer(invocation -> { + setNodeOpState((DatanodeDetails)invocation.getArguments()[0], + (HddsProtos.NodeOperationalState)invocation.getArguments()[1], dns); + return null; + }).when(nodeManager).setNodeOperationalState(any(DatanodeDetails.class), any( + HddsProtos.NodeOperationalState.class)); + + error = decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(), + dns.get(2).getIpAddress(), dns.get(3).getIpAddress()), false); + assertTrue(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE, + nodeManager.getNodeStatus(dns.get(3)).getOperationalState()); + + error = decom.decommissionNodes(Arrays.asList(dns.get(0).getIpAddress(), + dns.get(1).getIpAddress(), dns.get(2).getIpAddress()), false); + assertFalse(error.get(0).getHostname().contains("AllHosts")); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(1)).getOperationalState()); + assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, + nodeManager.getNodeStatus(dns.get(2)).getOperationalState()); + } + + private List getDatanodeDetailsList(String ipaddress, List dns) { + List datanodeDetails = new ArrayList<>(); + for (DatanodeDetails dn : dns) { + if (dn.getIpAddress().equals(ipaddress)) { + datanodeDetails.add(dn); + break; + } + } + return datanodeDetails; + } + + private void setNodeOpState(DatanodeDetails dn, HddsProtos.NodeOperationalState newState, List dns) { + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + datanode.setPersistedOpState(newState); + break; + } + } + } + + private NodeStatus getNodeOpState(DatanodeDetails dn, List dns) throws NodeNotFoundException { + if (dn.equals(dns.get(0))) { + throw new NodeNotFoundException(); + } + for (DatanodeDetails datanode : dns) { + if (datanode.equals(dn)) { + return new NodeStatus(datanode.getPersistedOpState(), HddsProtos.NodeState.HEALTHY); + } + } + return null; } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index c74e274d3d72..6a4cebe9c7a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -184,9 +184,9 @@ public void testDeletedContainersClearedOnStartup() throws Exception { ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); + OzoneContainer ozoneContainer = createVolume(ozoneConf); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { - OzoneContainer ozoneContainer = createVolume(ozoneConf); HddsVolume hddsVolume = (HddsVolume) ozoneContainer.getVolumeSet() .getVolumesList().get(0); KeyValueContainer kvContainer = addContainer(ozoneConf, hddsVolume); @@ -212,6 +212,8 @@ public void testDeletedContainersClearedOnStartup() throws Exception { hddsVolume.getDeletedContainerDir().listFiles(); assertNotNull(leftoverContainers); assertEquals(0, leftoverContainers.length); + } finally { + ozoneContainer.stop(); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java index 158bc6da7b89..17885eecc975 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java @@ -38,49 +38,97 @@ public class ContainerBalancerStartSubcommand extends ScmSubcommand { @Option(names = {"-t", "--threshold"}, description = "Percentage deviation from average utilization of " + - "the cluster after which a datanode will be rebalanced (for " + - "example, '10' for 10%%).") + "the cluster after which a datanode will be rebalanced. The value " + + "should be in the range [0.0, 100.0), with a default of 10 " + + "(specify '10' for 10%%).") private Optional threshold; @Option(names = {"-i", "--iterations"}, - description = "Maximum consecutive iterations that" + - " balancer will run for.") + description = "Maximum consecutive iterations that " + + "balancer will run for. The value should be positive " + + "or -1, with a default of 10 (specify '10' for 10 iterations).") private Optional iterations; @Option(names = {"-d", "--max-datanodes-percentage-to-involve-per-iteration", "--maxDatanodesPercentageToInvolvePerIteration"}, description = "Max percentage of healthy, in service datanodes " + - "that can be involved in balancing in one iteration (for example, " + + "that can be involved in balancing in one iteration. The value " + + "should be in the range [0,100], with a default of 20 (specify " + "'20' for 20%%).") private Optional maxDatanodesPercentageToInvolvePerIteration; @Option(names = {"-s", "--max-size-to-move-per-iteration-in-gb", "--maxSizeToMovePerIterationInGB"}, description = "Maximum size that can be moved per iteration of " + - "balancing (for example, '500' for 500GB).") + "balancing. The value should be positive, with a default of 500 " + + "(specify '500' for 500GB).") private Optional maxSizeToMovePerIterationInGB; @Option(names = {"-e", "--max-size-entering-target-in-gb", "--maxSizeEnteringTargetInGB"}, description = "Maximum size that can enter a target datanode while " + - "balancing. This is the sum of data from multiple sources (for " + - "example, '26' for 26GB).") + "balancing. This is the sum of data from multiple sources. The value " + + "should be positive, with a default of 26 (specify '26' for 26GB).") private Optional maxSizeEnteringTargetInGB; @Option(names = {"-l", "--max-size-leaving-source-in-gb", "--maxSizeLeavingSourceInGB"}, description = "Maximum size that can leave a source datanode while " + - "balancing. This is the sum of data moving to multiple targets " + - "(for example, '26' for 26GB).") + "balancing. This is the sum of data moving to multiple targets. " + + "The value should be positive, with a default of 26 " + + "(specify '26' for 26GB).") private Optional maxSizeLeavingSourceInGB; + @Option(names = {"--balancing-iteration-interval-minutes"}, + description = "The interval period in minutes between each iteration of Container Balancer. " + + "The value should be positive, with a default of 70 (specify '70' for 70 minutes).") + private Optional balancingInterval; + + @Option(names = {"--move-timeout-minutes"}, + description = "The amount of time in minutes to allow a single container to move " + + "from source to target. The value should be positive, with a default of 65 " + + "(specify '65' for 65 minutes).") + private Optional moveTimeout; + + @Option(names = {"--move-replication-timeout-minutes"}, + description = "The " + + "amount of time in minutes to allow a single container's replication from source " + + "to target as part of container move. The value should be positive, with " + + "a default of 50. For example, if \"hdds.container" + + ".balancer.move.timeout\" is 65 minutes, then out of those 65 minutes " + + "50 minutes will be the deadline for replication to complete (specify " + + "'50' for 50 minutes).") + private Optional moveReplicationTimeout; + + @Option(names = {"--move-network-topology-enable"}, + description = "Whether to take network topology into account when " + + "selecting a target for a source. " + + "This configuration is false by default.") + private Optional networkTopologyEnable; + + @Option(names = {"--include-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. Only the Datanodes " + + "specified in this list are balanced. This configuration is empty by " + + "default and is applicable only if it is non-empty (specify \"hostname1,hostname2,hostname3\").") + private Optional includeNodes; + + @Option(names = {"--exclude-datanodes"}, + description = "A list of Datanode " + + "hostnames or ip addresses separated by commas. The Datanodes specified " + + "in this list are excluded from balancing. This configuration is empty " + + "by default (specify \"hostname1,hostname2,hostname3\").") + private Optional excludeNodes; + @Override public void execute(ScmClient scmClient) throws IOException { StartContainerBalancerResponseProto response = scmClient. startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); if (response.getStart()) { System.out.println("Container Balancer started successfully."); } else { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java index 89e7680f31c5..c15109a32784 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStopSubcommand.java @@ -34,7 +34,8 @@ public class ContainerBalancerStopSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { + System.out.println("Sending stop command. Waiting for Container Balancer to stop..."); scmClient.stopContainerBalancer(); - System.out.println("Stopping ContainerBalancer..."); + System.out.println("Container Balancer stopped."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 7898ed76b1cd..f334f1a03e90 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -237,9 +237,9 @@ public HddsProtos.Node queryNode(UUID uuid) throws IOException { } @Override - public List decommissionNodes(List hosts) + public List decommissionNodes(List hosts, boolean force) throws IOException { - return storageContainerLocationClient.decommissionNodes(hosts); + return storageContainerLocationClient.decommissionNodes(hosts, force); } @Override @@ -483,12 +483,19 @@ public StartContainerBalancerResponseProto startContainerBalancer( Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) - throws IOException { + Optional maxSizeLeavingSourceInGB, + Optional balancingInterval, + Optional moveTimeout, + Optional moveReplicationTimeout, + Optional networkTopologyEnable, + Optional includeNodes, + Optional excludeNodes) throws IOException { return storageContainerLocationClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); } @Override diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index e7d3a4443831..31123ae81b51 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -48,6 +48,11 @@ public class DecommissionSubCommand extends ScmSubcommand { paramLabel = "") private List parameters = new ArrayList<>(); + @CommandLine.Option(names = { "--force" }, + defaultValue = "false", + description = "Forcefully try to decommission the datanode(s)") + private boolean force; + @Override public void execute(ScmClient scmClient) throws IOException { if (parameters.size() > 0) { @@ -62,7 +67,7 @@ public void execute(ScmClient scmClient) throws IOException { } else { hosts = parameters; } - List errors = scmClient.decommissionNodes(hosts); + List errors = scmClient.decommissionNodes(hosts, force); System.out.println("Started decommissioning datanode(s):\n" + String.join("\n", hosts)); if (errors.size() > 0) { diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index e271cdfe0298..27c360e72743 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -104,7 +104,10 @@ public void testContainerBalancerStopSubcommand() throws IOException { ScmClient scmClient = mock(ScmClient.class); stopCmd.execute(scmClient); - Pattern p = Pattern.compile("^Stopping\\sContainerBalancer..."); + Pattern p = Pattern.compile("^Sending\\sstop\\scommand." + + "\\sWaiting\\sfor\\sContainer\\sBalancer\\sto\\sstop...\\n" + + "Container\\sBalancer\\sstopped."); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -114,7 +117,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsNotRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn( StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() @@ -133,7 +136,7 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsRunning() throws IOException { ScmClient scmClient = mock(ScmClient.class); when(scmClient.startContainerBalancer( - null, null, null, null, null, null)) + null, null, null, null, null, null, null, null, null, null, null, null)) .thenReturn(StorageContainerLocationProtocolProtos .StartContainerBalancerResponseProto.newBuilder() .setStart(false) diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java index e7e01ffaa1af..d6f0f8ae8267 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; @@ -100,7 +101,7 @@ public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { @Test public void testNoErrorsWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); CommandLine c = new CommandLine(cmd); @@ -123,7 +124,7 @@ public void testNoErrorsWhenDecommissioning() throws IOException { @Test public void testErrorsReportedWhenDecommissioning() throws IOException { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> { ArrayList e = new ArrayList<>(); e.add(new DatanodeAdminError("host1", "host1 error")); diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java index d2a4c54b8bf2..a6225d1b5da6 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestMaintenanceSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; @@ -72,7 +73,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java index e274cd4fd544..083ada8a4207 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestRecommissionSubCommand.java @@ -37,6 +37,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -71,7 +72,7 @@ public void tearDown() { @Test public void testMultipleHostnamesCanBeReadFromStdin() throws Exception { - when(scmClient.decommissionNodes(anyList())) + when(scmClient.decommissionNodes(anyList(), anyBoolean())) .thenAnswer(invocation -> new ArrayList()); String input = "host1\nhost2\nhost3\n"; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 3b76daeba4e5..112c76f8c0a8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -154,7 +154,7 @@ public class OzoneBucket extends WithMetadata { private String owner; protected OzoneBucket(Builder builder) { - setMetadata(builder.metadata); + super(builder); this.proxy = builder.proxy; this.volumeName = builder.volumeName; this.name = builder.name; // bucket name @@ -954,8 +954,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneBucket. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String volumeName; @@ -983,8 +982,9 @@ private Builder(ConfigurationSource conf, ClientProtocol proxy) { this.proxy = proxy; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 9c489943720c..9ab110aa2b55 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -106,7 +106,7 @@ public class OzoneVolume extends WithMetadata { private long refCount; protected OzoneVolume(Builder builder) { - setMetadata(builder.metadata); + super(builder); this.proxy = builder.proxy; this.name = builder.name; this.admin = builder.admin; @@ -409,8 +409,7 @@ public static Builder newBuilder(ConfigurationSource conf, /** * Inner builder for OzoneVolume. */ - public static class Builder { - private Map metadata; + public static class Builder extends WithMetadata.Builder { private ConfigurationSource conf; private ClientProtocol proxy; private String name; @@ -482,8 +481,9 @@ public Builder setRefCount(long refCount) { return this; } + @Override public Builder setMetadata(Map metadata) { - this.metadata = metadata; + super.setMetadata(metadata); return this; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 3a4d391b006a..0806ffb84725 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -321,6 +321,8 @@ public void onRemoval( this.blockInputStreamFactory = BlockInputStreamFactoryImpl .getInstance(byteBufferPool, ecReconstructExecutor); this.clientMetrics = ContainerClientMetrics.acquire(); + + TracingUtil.initTracing("client", conf); } public XceiverClientFactory getXceiverClientManager() { @@ -2530,7 +2532,7 @@ private static ExecutorService createThreadPoolExecutor( int corePoolSize, int maximumPoolSize, String threadNameFormat) { return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).setDaemon(true).build(), new ThreadPoolExecutor.CallerRunsPolicy()); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java index 7e5de329d129..0d82f0f8bbb2 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/MockXceiverClientSpi.java @@ -129,21 +129,26 @@ private ContainerProtos.ListBlockResponseProto listBlock(long containerID) { } private PutBlockResponseProto putBlock(PutBlockRequestProto putBlock) { + return PutBlockResponseProto.newBuilder() + .setCommittedBlockLength( + doPutBlock(putBlock.getBlockData())) + .build(); + } + + private GetCommittedBlockLengthResponseProto doPutBlock( + ContainerProtos.BlockData blockData) { long length = 0; - for (ChunkInfo chunk : putBlock.getBlockData().getChunksList()) { + for (ChunkInfo chunk : blockData.getChunksList()) { length += chunk.getLen(); } - datanodeStorage.putBlock(putBlock.getBlockData().getBlockID(), - putBlock.getBlockData()); + datanodeStorage.putBlock(blockData.getBlockID(), + blockData); - return PutBlockResponseProto.newBuilder() - .setCommittedBlockLength( - GetCommittedBlockLengthResponseProto.newBuilder() - .setBlockID(putBlock.getBlockData().getBlockID()) + return GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) .setBlockLength(length) - .build()) - .build(); + .build(); } private XceiverClientReply result( @@ -166,8 +171,15 @@ private WriteChunkResponseProto writeChunk( datanodeStorage .writeChunk(writeChunk.getBlockID(), writeChunk.getChunkData(), writeChunk.getData()); - return WriteChunkResponseProto.newBuilder() - .build(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + builder.setCommittedBlockLength(doPutBlock(blockData)); + } + return builder.build(); } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 42e0e3b940d2..d2f68f1e4d81 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -744,6 +744,47 @@ public static String normalizeKey(String keyName, return keyName; } + /** + * Normalizes a given path up to the bucket level. + * + * This method takes a path as input and normalises uptil the bucket level. + * It handles empty, removes leading slashes, and splits the path into + * segments. It then extracts the volume and bucket names, forming a + * normalized path with a single slash. Finally, any remaining segments are + * joined as the key name, returning the complete standardized path. + * + * @param path The path string to be normalized. + * @return The normalized path string. + */ + public static String normalizePathUptoBucket(String path) { + if (path == null || path.isEmpty()) { + return OM_KEY_PREFIX; // Handle empty path + } + + // Remove leading slashes + path = path.replaceAll("^/*", ""); + + String[] segments = path.split(OM_KEY_PREFIX, -1); + + String volumeName = segments[0]; + String bucketName = segments.length > 1 ? segments[1] : ""; + + // Combine volume and bucket. + StringBuilder normalizedPath = new StringBuilder(volumeName); + if (!bucketName.isEmpty()) { + normalizedPath.append(OM_KEY_PREFIX).append(bucketName); + } + + // Add remaining segments as the key + if (segments.length > 2) { + normalizedPath.append(OM_KEY_PREFIX).append( + String.join(OM_KEY_PREFIX, + Arrays.copyOfRange(segments, 2, segments.length))); + } + + return normalizedPath.toString(); + } + /** * For a given service ID, return list of configured OM hosts. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 168e9e952881..40c28ed5adee 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.om.helpers; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -67,7 +66,7 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { private final String ownerName; private OmBucketArgs(Builder b) { - setMetadata(b.metadata); + super(b); this.volumeName = b.volumeName; this.bucketName = b.bucketName; this.isVersionEnabled = b.isVersionEnabled; @@ -214,12 +213,11 @@ public Map toAuditMap() { /** * Builder for OmBucketArgs. */ - public static class Builder { + public static class Builder extends WithMetadata.Builder { private String volumeName; private String bucketName; private Boolean isVersionEnabled; private StorageType storageType; - private final Map metadata = new HashMap<>(); private boolean quotaInBytesSet = false; private long quotaInBytes; private boolean quotaInNamespaceSet = false; @@ -258,8 +256,9 @@ public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { return this; } + @Override public Builder addAllMetadata(Map map) { - metadata.putAll(map); + super.addAllMetadata(map); return this; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index dd6406a0e59e..8dfb2f88f98b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -19,7 +19,6 @@ import java.util.ArrayList; -import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -110,9 +109,7 @@ public static Codec getCodec() { private String owner; private OmBucketInfo(Builder b) { - setMetadata(b.metadata); - setObjectID(b.objectID); - setUpdateID(b.updateID); + super(b); this.volumeName = b.volumeName; this.bucketName = b.bucketName; this.acls = b.acls; @@ -361,20 +358,17 @@ public OmBucketInfo copyObject() { } public Builder toBuilder() { - return new Builder() + return new Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setStorageType(storageType) .setIsVersionEnabled(isVersionEnabled) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(getObjectID()) - .setUpdateID(getUpdateID()) .setBucketEncryptionKey(bekInfo) .setSourceVolume(sourceVolume) .setSourceBucket(sourceBucket) .setAcls(acls) - .addAllMetadata(getMetadata()) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) .setQuotaInBytes(quotaInBytes) @@ -387,37 +381,30 @@ public Builder toBuilder() { /** * Builder for OmBucketInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String volumeName; private String bucketName; - private List acls; - private Boolean isVersionEnabled; - private StorageType storageType; + private final List acls = new ArrayList<>(); + private boolean isVersionEnabled; + private StorageType storageType = StorageType.DISK; private long creationTime; private long modificationTime; - private long objectID; - private long updateID; - private Map metadata; private BucketEncryptionKeyInfo bekInfo; private String sourceVolume; private String sourceBucket; private long usedBytes; private long usedNamespace; - private long quotaInBytes; - private long quotaInNamespace; - private BucketLayout bucketLayout; + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private BucketLayout bucketLayout = BucketLayout.DEFAULT; private String owner; private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - //Default values - this.acls = new ArrayList<>(); - this.isVersionEnabled = false; - this.storageType = StorageType.DISK; - this.metadata = new HashMap<>(); - this.quotaInBytes = OzoneConsts.QUOTA_RESET; - this.quotaInNamespace = OzoneConsts.QUOTA_RESET; - this.bucketLayout = BucketLayout.DEFAULT; + } + + private Builder(OmBucketInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -448,7 +435,7 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } - public Builder setIsVersionEnabled(Boolean versionFlag) { + public Builder setIsVersionEnabled(boolean versionFlag) { this.isVersionEnabled = versionFlag; return this; } @@ -468,25 +455,27 @@ public Builder setModificationTime(long modifiedOn) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } @@ -556,7 +545,6 @@ public OmBucketInfo build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); Preconditions.checkNotNull(acls); - Preconditions.checkNotNull(isVersionEnabled); Preconditions.checkNotNull(storageType); return new OmBucketInfo(this); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index b505166bd0fd..1c4a37631e3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -25,7 +25,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -55,12 +54,9 @@ public static Codec getCodec() { private final List acls; public OmDirectoryInfo(Builder builder) { + super(builder); this.name = builder.name; this.acls = builder.acls; - setMetadata(builder.metadata); - setObjectID(builder.objectID); - setUpdateID(builder.updateID); - setParentObjectID(builder.parentObjectID); this.creationTime = builder.creationTime; this.modificationTime = builder.modificationTime; } @@ -77,38 +73,34 @@ public static OmDirectoryInfo.Builder newBuilder() { /** * Builder for Directory Info. */ - public static class Builder { - private long parentObjectID; // pointer to parent directory - - private long objectID; - private long updateID; - + public static class Builder extends WithParentObjectId.Builder { private String name; private long creationTime; private long modificationTime; private final List acls; - private final Map metadata; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); } + @Override public Builder setParentObjectID(long parentObjectId) { - this.parentObjectID = parentObjectId; + super.setParentObjectID(parentObjectId); return this; } + @Override public Builder setObjectID(long objectId) { - this.objectID = objectId; + super.setObjectID(objectId); return this; } + @Override public Builder setUpdateID(long updateId) { - this.updateID = updateId; + super.setUpdateID(updateId); return this; } @@ -141,15 +133,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index e1b7ce1f27af..5186dd65fd3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -101,10 +101,7 @@ public static Codec getCodec(boolean ignorePipeline) { private final List acls; private OmKeyInfo(Builder b) { - setMetadata(b.metadata); - setObjectID(b.objectID); - setUpdateID(b.updateID); - setParentObjectID(b.parentObjectID); + super(b); this.volumeName = b.volumeName; this.bucketName = b.bucketName; this.keyName = b.keyName; @@ -413,7 +410,7 @@ public String toString() { /** * Builder of OmKeyInfo. */ - public static class Builder { + public static class Builder extends WithParentObjectId.Builder { private String volumeName; private String bucketName; private String keyName; @@ -423,21 +420,19 @@ public static class Builder { private long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; - private final Map metadata; private FileEncryptionInfo encInfo; - private final List acls; - private long objectID; - private long updateID; + private final List acls = new ArrayList<>(); // not persisted to DB. FileName will be the last element in path keyName. private String fileName; - private long parentObjectID; private FileChecksum fileChecksum; private boolean isFile; public Builder() { - this.metadata = new HashMap<>(); - acls = new ArrayList<>(); + } + + public Builder(OmKeyInfo obj) { + super(obj); } public Builder setVolumeName(String volume) { @@ -491,13 +486,15 @@ public Builder setReplicationConfig(ReplicationConfig replConfig) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map newMetadata) { - metadata.putAll(newMetadata); + super.addAllMetadata(newMetadata); return this; } @@ -520,13 +517,15 @@ public Builder addAcl(OzoneAcl ozoneAcl) { return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -535,8 +534,9 @@ public Builder setFileName(String keyFileName) { return this; } + @Override public Builder setParentObjectID(long parentID) { - this.parentObjectID = parentID; + super.setParentObjectID(parentID); return this; } @@ -777,7 +777,7 @@ public int hashCode() { */ @Override public OmKeyInfo copyObject() { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() + OmKeyInfo.Builder builder = new OmKeyInfo.Builder(this) .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) @@ -787,9 +787,6 @@ public OmKeyInfo copyObject() { .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) .setAcls(acls) - .setObjectID(getObjectID()) - .setUpdateID(getUpdateID()) - .setParentObjectID(getParentObjectID()) .setFileName(fileName) .setFile(isFile); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 76bbc5546bd8..d5bf7fa596f7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -162,12 +162,11 @@ public PartKeyInfo lastEntry() { * information for a key. */ private OmMultipartKeyInfo(Builder b) { + super(b); this.uploadID = b.uploadID; this.creationTime = b.creationTime; this.replicationConfig = b.replicationConfig; this.partKeyInfoMap = new PartKeyInfoMap(b.partKeyInfoList); - setObjectID(b.objectID); - setUpdateID(b.updateID); this.parentID = b.parentID; } @@ -225,13 +224,11 @@ public ReplicationConfig getReplicationConfig() { /** * Builder of OmMultipartKeyInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String uploadID; private long creationTime; private ReplicationConfig replicationConfig; private final TreeMap partKeyInfoList; - private long objectID; - private long updateID; private long parentID; public Builder() { @@ -268,12 +265,12 @@ public Builder addPartKeyInfoList(int partNum, PartKeyInfo partKeyInfo) { } public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index d818df12298e..8eb931410eff 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -111,6 +111,20 @@ private OmVolumeArgs(String adminName, String ownerName, String volume, this.refCount = refCount; } + private OmVolumeArgs(Builder b) { + super(b); + this.adminName = b.adminName; + this.ownerName = b.ownerName; + this.volume = b.volume; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.usedNamespace = b.usedNamespace; + this.acls = b.acls; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.refCount = b.refCount; + } + public long getRefCount() { Preconditions.checkState(refCount >= 0L, "refCount should not be negative"); return refCount; @@ -297,7 +311,7 @@ public int hashCode() { /** * Builder for OmVolumeArgs. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String adminName; private String ownerName; private String volume; @@ -306,30 +320,18 @@ public static class Builder { private long quotaInBytes; private long quotaInNamespace; private long usedNamespace; - private Map metadata; private List acls; - private long objectID; - private long updateID; private long refCount; - /** - * Sets the Object ID for this Object. - * Object ID are unique and immutable identifier for each object in the - * System. - * @param id - long - */ + @Override public Builder setObjectID(long id) { - this.objectID = id; + super.setObjectID(id); return this; } - /** - * Sets the update ID for this Object. Update IDs are monotonically - * increasing values which are updated each time there is an update. - * @param id - long - */ + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -337,8 +339,7 @@ public Builder setUpdateID(long id) { * Constructs a builder. */ public Builder() { - metadata = new HashMap<>(); - acls = new ArrayList(); + acls = new ArrayList<>(); quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } @@ -383,15 +384,15 @@ public Builder setUsedNamespace(long namespaceUsage) { return this; } + @Override public Builder addMetadata(String key, String value) { - metadata.put(key, value); // overwrite if present. + super.addMetadata(key, value); return this; } + @Override public Builder addAllMetadata(Map additionalMetaData) { - if (additionalMetaData != null) { - metadata.putAll(additionalMetaData); - } + super.addAllMetadata(additionalMetaData); return this; } @@ -406,17 +407,11 @@ public void setRefCount(long refCount) { this.refCount = refCount; } - /** - * Constructs a CreateVolumeArgument. - * @return CreateVolumeArgs. - */ public OmVolumeArgs build() { Preconditions.checkNotNull(adminName); Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); - return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInNamespace, usedNamespace, metadata, acls, creationTime, - modificationTime, objectID, updateID, refCount); + return new OmVolumeArgs(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index fd84ffe06605..47a48c37e8e0 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -718,8 +718,8 @@ public SnapshotInfo copyObject() { @Override public String toString() { return "SnapshotInfo{" + - ", snapshotId: '" + snapshotId + '\'' + - ", name: '" + name + "'," + + "snapshotId: '" + snapshotId + '\'' + + ", name: '" + name + '\'' + ", volumeName: '" + volumeName + '\'' + ", bucketName: '" + bucketName + '\'' + ", snapshotStatus: '" + snapshotStatus + '\'' + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java index cc190399a7da..c0481c212e5f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java @@ -25,7 +25,15 @@ */ public abstract class WithMetadata { - private Map metadata = new HashMap<>(); + private Map metadata; + + protected WithMetadata() { + metadata = new HashMap<>(); + } + + protected WithMetadata(Builder b) { + metadata = b.metadata; + } /** * Custom key value metadata. @@ -41,4 +49,39 @@ public final void setMetadata(Map metadata) { this.metadata = metadata; } + /** Builder for {@link WithMetadata}. */ + public static class Builder { + private final Map metadata; + + protected Builder() { + metadata = new HashMap<>(); + } + + protected Builder(WithObjectID obj) { + metadata = new HashMap<>(obj.getMetadata()); + } + + public Builder addMetadata(String key, String value) { + metadata.put(key, value); + return this; + } + + public Builder addAllMetadata(Map additionalMetadata) { + if (additionalMetadata != null) { + metadata.putAll(additionalMetadata); + } + return this; + } + + public Builder setMetadata(Map map) { + metadata.clear(); + addAllMetadata(map); + return this; + } + + protected Map getMetadata() { + return metadata; + } + } + } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index fb677871fab6..af9508196260 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -27,6 +27,16 @@ public abstract class WithObjectID extends WithMetadata { private long objectID; private long updateID; + protected WithObjectID() { + super(); + } + + protected WithObjectID(Builder b) { + super(b); + objectID = b.objectID; + updateID = b.updateID; + } + /** * ObjectIDs are unique and immutable identifier for each object in the * System. @@ -111,4 +121,47 @@ public String getObjectInfo() { public final void setUpdateID(long updateID) { this.updateID = updateID; } + + /** Builder for {@link WithObjectID}. */ + public static class Builder extends WithMetadata.Builder { + private long objectID; + private long updateID; + + protected Builder() { + super(); + } + + protected Builder(WithObjectID obj) { + super(obj); + objectID = obj.getObjectID(); + updateID = obj.getUpdateID(); + } + + /** + * Sets the Object ID for this Object. + * Object ID are unique and immutable identifier for each object in the + * System. + */ + public Builder setObjectID(long obId) { + this.objectID = obId; + return this; + } + + /** + * Sets the update ID for this Object. Update IDs are monotonically + * increasing values which are updated each time there is an update. + */ + public Builder setUpdateID(long id) { + this.updateID = id; + return this; + } + + public long getObjectID() { + return objectID; + } + + public long getUpdateID() { + return updateID; + } + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java index b92b34e8e3bd..3e228e790405 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java @@ -24,6 +24,14 @@ public class WithParentObjectId extends WithObjectID { private long parentObjectID; + public WithParentObjectId() { + } + + public WithParentObjectId(Builder builder) { + super(builder); + parentObjectID = builder.getParentObjectID(); + } + /** * Object ID with additional parent ID field. * @@ -54,4 +62,27 @@ public final long getParentObjectID() { public final void setParentObjectID(long parentObjectID) { this.parentObjectID = parentObjectID; } + + /** Builder for {@link WithParentObjectId}. */ + public static class Builder extends WithObjectID.Builder { + private long parentObjectID; + + protected Builder() { + super(); + } + + protected Builder(WithParentObjectId obj) { + super(obj); + parentObjectID = obj.getParentObjectID(); + } + + public Builder setParentObjectID(long parentObjectId) { + this.parentObjectID = parentObjectId; + return this; + } + + protected long getParentObjectID() { + return parentObjectID; + } + } } diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 5139dddcd8c1..4fca7bb6aaee 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -35,7 +35,7 @@ find "." -not -path '*/iteration*' -name 'TEST*.xml' -print0 \ > "${tempfile}" if [[ "${CHECK:-unit}" == "integration" ]]; then - find "." -not -path '*/iteration*' -name '*-output.txt' -print0 \ + find hadoop-ozone/integration-test -not -path '*/iteration*' -name '*-output.txt' -print0 \ | xargs -n1 -0 "grep" -l -E "not closed properly|was not shutdown properly" \ | awk -F/ '{sub("-output.txt",""); print $NF}' \ >> "${tempfile}" diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index cb8b6f8f9151..18ae39059755 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -42,7 +42,7 @@ cat "${REPORT_DIR}/output.log" find "." -name checkstyle-errors.xml -print0 \ | xargs -0 sed '$!N; //d' \ + -e '//d' \ -e '//dev/null 2>&1 && pwd )" CHECK=integration -source "${DIR}/junit.sh" -pl :ozone-integration-test,:mini-chaos-tests "$@" +source "${DIR}/junit.sh" "$@" diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index 768a1f32a38b..9d2efd8ac641 100755 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -50,10 +50,8 @@ if [[ -f hadoop-ozone/dist/src/shell/ozone/ozone-functions.sh ]]; then ozone_java_setup fi -if [[ "${CHECK}" == "integration" ]] || [[ ${ITERATIONS} -gt 1 ]]; then - if [[ ${OZONE_REPO_CACHED} == "false" ]]; then - mvn ${MAVEN_OPTIONS} -DskipTests clean install - fi +if [[ ${ITERATIONS} -gt 1 ]] && [[ ${OZONE_REPO_CACHED} == "false" ]]; then + mvn ${MAVEN_OPTIONS} -DskipTests clean install fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/${CHECK}"} diff --git a/hadoop-ozone/dev-support/checks/unit.sh b/hadoop-ozone/dev-support/checks/unit.sh index d2d50c5ff03f..8e540fa9e141 100755 --- a/hadoop-ozone/dev-support/checks/unit.sh +++ b/hadoop-ozone/dev-support/checks/unit.sh @@ -14,8 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#checks:unit - DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=unit source "${DIR}/junit.sh" \ diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 3ec4b83b037f..64fc2cfa3728 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -28,7 +28,7 @@ UTF-8 true - 20230615-1 + 20240316-jdk17-1 apache/ozone-testkrb5:20230318-1 true diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json index 7fc43b286cb1..0e8ed806f166 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json @@ -121,7 +121,7 @@ "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, - "legendFormat": "{{hostname}}, {{__name__}}", + "legendFormat": "{{hostname}}", "range": true, "refId": "A", "useBackend": false @@ -136,7 +136,7 @@ "h": 1, "w": 24, "x": 0, - "y": 0 + "y": 9 }, "id": 14, "panels": [], @@ -198,37 +198,13 @@ ] } }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, "x": 0, - "y": 1 + "y": 10 }, "id": 13, "options": { @@ -260,7 +236,7 @@ "useBackend": false } ], - "title": "s3_gateway_metrics_list_key_count", + "title": "List Keys Count", "type": "timeseries" }, { @@ -269,7 +245,7 @@ "h": 1, "w": 24, "x": 0, - "y": 9 + "y": 18 }, "id": 12, "panels": [], @@ -334,10 +310,10 @@ "overrides": [] }, "gridPos": { - "h": 10, - "w": 24, + "h": 8, + "w": 12, "x": 0, - "y": 10 + "y": 19 }, "id": 15, "options": { @@ -385,7 +361,7 @@ "useBackend": false } ], - "title": "Acl check and Resolve Bucket latency Avg. Time (ns)", + "title": "ACL Check and Resolve Bucket Latency Avg. Time (ns)", "type": "timeseries" }, { @@ -449,8 +425,8 @@ "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 20 + "x": 12, + "y": 19 }, "id": 16, "options": { @@ -482,7 +458,7 @@ "useBackend": false } ], - "title": "Read from RocksDb ", + "title": "Read from RocksDB Avg. Time (ns)", "type": "timeseries" }, { @@ -540,37 +516,13 @@ ] } }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 20 + "x": 0, + "y": 27 }, "id": 8, "options": { @@ -602,7 +554,7 @@ "useBackend": false } ], - "title": "Ops Per Sec", + "title": "List Keys Num Ops Per Sec", "type": "timeseries" }, { @@ -660,37 +612,13 @@ ] } }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] + "overrides": [] }, "gridPos": { "h": 8, "w": 12, - "x": 0, - "y": 28 + "x": 12, + "y": 27 }, "id": 7, "options": { diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozone-balancer/.env similarity index 85% rename from hadoop-ozone/dist/src/main/compose/ozone-om-ha/.ssh/environment rename to hadoop-ozone/dist/src/main/compose/ozone-balancer/.env index cbde0f2078df..0e99fab82fd0 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/.ssh/environment +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/.env @@ -13,4 +13,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -JAVA_HOME=/usr/lib/jvm/jre + +HDDS_VERSION=${hdds.version} +OZONE_RUNNER_VERSION=${docker.ozone-runner.version} +OZONE_RUNNER_IMAGE=apache/ozone-runner +OZONE_OPTS= \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml new file mode 100644 index 000000000000..dc6bae7822e5 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-compose.yaml @@ -0,0 +1,179 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +version: "3.8" + +# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) +x-common-config: + &common-config + image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + volumes: + - ../..:/opt/hadoop + env_file: + - docker-config + +x-replication: + &replication + OZONE-SITE.XML_ozone.server.default.replication: ${OZONE_REPLICATION_FACTOR:-3} + +services: + datanode1: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: ["ozone","datanode"] + volumes: + - tmpfs1:/data + - ../..:/opt/hadoop + datanode2: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs2:/data + - ../..:/opt/hadoop + datanode3: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs3:/data + - ../..:/opt/hadoop + datanode4: + <<: *common-config + ports: + - 19864 + - 9882 + environment: + <<: *replication + command: [ "ozone","datanode" ] + volumes: + - tmpfs4:/data + - ../..:/opt/hadoop + om1: + <<: *common-config + environment: + WAITFOR: scm3:9894 + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874:9874 + - 9862 + hostname: om1 + command: ["ozone","om"] + om2: + <<: *common-config + environment: + WAITFOR: scm3:9894 + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874 + - 9862 + hostname: om2 + command: ["ozone","om"] + om3: + <<: *common-config + environment: + WAITFOR: scm3:9894 + ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION + <<: *replication + ports: + - 9874 + - 9862 + hostname: om3 + command: ["ozone","om"] + scm1: + <<: *common-config + ports: + - 9876:9876 + environment: + ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + command: ["ozone","scm"] + scm2: + <<: *common-config + ports: + - 9876 + environment: + WAITFOR: scm1:9894 + ENSURE_SCM_BOOTSTRAPPED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + command: ["ozone","scm"] + scm3: + <<: *common-config + ports: + - 9876 + environment: + WAITFOR: scm2:9894 + ENSURE_SCM_BOOTSTRAPPED: /data/metadata/scm/current/VERSION + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + command: ["ozone","scm"] + httpfs: + <<: *common-config + environment: + OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} + <<: *replication + ports: + - 14000:14000 + command: [ "ozone","httpfs" ] + s3g: + <<: *common-config + environment: + OZONE_OPTS: + <<: *replication + ports: + - 9878:9878 + command: ["ozone","s3g"] +volumes: + tmpfs1: + driver: local + driver_opts: + o: "size=1g,uid=1000" + device: tmpfs + type: tmpfs + tmpfs2: + driver: local + driver_opts: + o: "size=1g,uid=2000" + device: tmpfs + type: tmpfs + tmpfs3: + driver: local + driver_opts: + o: "size=1g,uid=3000" + device: tmpfs + type: tmpfs + tmpfs4: + driver: local + driver_opts: + o: "size=1g,uid=4000" + device: tmpfs + type: tmpfs diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config new file mode 100644 index 000000000000..5e715e5a563a --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -0,0 +1,59 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# For HttpFS service it is required to enable proxying users. +CORE-SITE.XML_hadoop.proxyuser.hadoop.hosts=* +CORE-SITE.XML_hadoop.proxyuser.hadoop.groups=* + +CORE-SITE.XML_fs.defaultFS=ofs://om/ +CORE-SITE.XML_fs.trash.interval=1 + +OZONE-SITE.XML_ozone.om.service.ids=om +OZONE-SITE.XML_ozone.om.nodes.om=om1,om2,om3 +OZONE-SITE.XML_ozone.om.address.om.om1=om1 +OZONE-SITE.XML_ozone.om.address.om.om2=om2 +OZONE-SITE.XML_ozone.om.address.om.om3=om3 +OZONE-SITE.XML_ozone.om.ratis.enable=true + +OZONE-SITE.XML_ozone.scm.service.ids=scmservice +OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 +OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 +OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 +OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 +OZONE-SITE.XML_ozone.scm.ratis.enable=true +OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data +OZONE-SITE.XML_ozone.scm.container.size=100MB +OZONE-SITE.XML_ozone.scm.block.size=20MB +OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB +OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata +OZONE-SITE.XML_hdds.node.report.interval=20s +OZONE-SITE.XML_hdds.heartbeat.interval=20s +OZONE-SITE.XML_hdds.datanode.du.refresh.period=20s +OZONE-SITE.XML_hdds.datanode.dir=/data/hdds +OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB +OZONE-SITE.XML_ozone.scm.pipeline.creation.auto.factor.one=false +OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 +OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s +OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 +OZONE-SITE.XML_hdds.container.report.interval=30s +OZONE-SITE.XML_ozone.om.s3.grpc.server_enabled=true +OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon +OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true + +OZONE_CONF_DIR=/etc/hadoop +OZONE_LOG_DIR=/var/log/hadoop + +no_proxy=om1,om2,om3,scm,s3g,recon,kdc,localhost,127.0.0.1 diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh similarity index 65% rename from hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment rename to hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh index cbde0f2078df..e79979877ba3 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/.ssh/environment +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/test.sh @@ -1,3 +1,4 @@ +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -13,4 +14,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -JAVA_HOME=/usr/lib/jvm/jre + +#suite:balancer + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR +export OM_SERVICE_ID="om" +export OM=om1 +export SCM=scm1 +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +# We need 4 dataNodes in this tests +start_docker_env 4 + +execute_robot_test ${OM} balancer/testBalancer.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh index 1361a4c0c335..976e490d32ca 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/test.sh @@ -38,9 +38,11 @@ execute_robot_test ${SCM} -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-ofs-link ## Exclude virtual-host tests. This is tested separately as it requires additional config. exclude="--exclude virtual-host" for bucket in generated; do - execute_robot_test ${SCM} -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 - # some tests are independent of the bucket type, only need to be run once - exclude="--exclude virtual-host --exclude no-bucket-type" + for layout in OBJECT_STORE LEGACY FILE_SYSTEM_OPTIMIZED; do + execute_robot_test ${SCM} -v BUCKET:${bucket} -v BUCKET_LAYOUT:${layout} -N s3-${layout}-${bucket} ${exclude} s3 + # some tests are independent of the bucket type, only need to be run once + exclude="--exclude virtual-host --exclude no-bucket-type" + done done execute_robot_test ${SCM} freon diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile index 79aeec488586..714a6f56d66c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/Dockerfile @@ -36,7 +36,8 @@ RUN sudo chown hadoop /opt RUN sudo chmod 600 /opt/.ssh/* RUN sudo chmod 700 /opt/.ssh -RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/jre/" >> /etc/profile' +RUN echo "export JAVA_HOME=$JAVA_HOME" | sudo sh -c 'cat >> /etc/profile' +RUN echo "JAVA_HOME=$JAVA_HOME" | sh -c 'cat >> /opt/.ssh/environment' # Install required robot framework libraries RUN sudo pip3 install robotframework-sshlibrary diff --git a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile index fd9993b56e2e..35c6ccabd375 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile +++ b/hadoop-ozone/dist/src/main/compose/ozonescripts/Dockerfile @@ -35,5 +35,7 @@ RUN sudo chown hadoop /opt RUN sudo chmod 600 /opt/.ssh/* RUN sudo chmod 700 /opt/.ssh -RUN sudo sh -c 'echo "export JAVA_HOME=/usr/lib/jvm/jre/" >> /etc/profile' +RUN echo "export JAVA_HOME=$JAVA_HOME" | sudo sh -c 'cat >> /etc/profile' +RUN echo "JAVA_HOME=$JAVA_HOME" | sh -c 'cat >> /opt/.ssh/environment' + CMD ["sudo","/usr/sbin/sshd","-D"] diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index d8b82ff22013..a194fbc22653 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -157,7 +157,6 @@ KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=* #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm OZONE_DATANODE_SECURE_USER=root -JAVA_HOME=/usr/lib/jvm/jre JSVC_HOME=/usr/bin OZONE_CONF_DIR=/etc/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index c4ddfdbafecc..0ffba0c96f48 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -75,7 +75,6 @@ CORE-SITE.XML_hadoop.security.authorization=false #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm OZONE_DATANODE_SECURE_USER=root -JAVA_HOME=/usr/lib/jvm/jre JSVC_HOME=/usr/bin OZONE_CLASSPATH= diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index d09e2db8e3d2..b13eecf68e84 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -149,7 +149,6 @@ KMS-SITE.XML_hadoop.kms.proxyuser.s3g.hosts=* #BYTEMAN_SCRIPT_URL=https://raw.githubusercontent.com/apache/hadoop/trunk/dev-support/byteman/hadooprpc.btm OZONE_DATANODE_SECURE_USER=root -JAVA_HOME=/usr/lib/jvm/jre JSVC_HOME=/usr/bin OZONE_CONF_DIR=/etc/hadoop diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index a705411438d1..d118c92e29c4 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -498,8 +498,6 @@ Apache Software Foundation License 2.0 nvd3-1.8.5.min.js.map nvd3-1.8.5.min.css.map nvd3-1.8.5.min.js -AbstractFuture.java -TimeoutFuture.java BSD 3-Clause diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index ea10fb98d874..c50daa724dad 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -71,6 +71,19 @@ Verbose container info ${output} = Execute ozone admin --verbose container info "${CONTAINER}" Should contain ${output} Pipeline Info +List containers as JSON + ${output} = Execute ozone admin container info "${CONTAINER}" --json | jq -r '.' + Should contain ${output} containerInfo + Should contain ${output} pipeline + Should contain ${output} replicas + Should contain ${output} writePipelineID + +Report containers as JSON + ${output} = Execute ozone admin container report --json | jq -r '.' + Should contain ${output} reportTimeStamp + Should contain ${output} stats + Should contain ${output} samples + Close container ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 Execute ozone admin container close "${container}" @@ -85,6 +98,8 @@ Incomplete command Should contain ${output} info Should contain ${output} create Should contain ${output} close + Should contain ${output} report + Should contain ${output} upgrade #List containers on unknown host # ${output} = Execute And Ignore Error ozone admin --verbose container list --scm unknown-host diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot index b4ee5b952906..5b6c2fe97e25 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot @@ -99,3 +99,17 @@ List datanodes as JSON Should contain ${output} datanodeDetails Should contain ${output} healthState Should contain ${output} opState + +Get usage info as JSON + ${output} = Execute ozone admin datanode usageinfo -m --json | jq -r '.' + Should contain ${output} capacity + Should contain ${output} committed + Should contain ${output} containerCount + Should contain ${output} datanodeDetails + Should contain ${output} freeSpaceToSpare + Should contain ${output} ozoneUsed + Should contain ${output} ozoneUsedPercent + Should contain ${output} remaining + Should contain ${output} remainingPercent + Should contain ${output} totalUsed + Should contain ${output} totalUsedPercent diff --git a/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot new file mode 100644 index 000000000000..49679587be94 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/balancer/testBalancer.robot @@ -0,0 +1,144 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest ozone cluster startup +Library OperatingSystem +Library Collections +Resource ../commonlib.robot +Resource ../ozone-lib/shell.robot + +Test Timeout 20 minutes + +*** Variables *** +${SECURITY_ENABLED} false +${HOST} datanode1 +${VOLUME} volume1 +${BUCKET} bucket1 +${SIZE} 104857600 + + +** Keywords *** +Prepare For Tests + Execute dd if=/dev/urandom of=/tmp/100mb bs=1048576 count=100 + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + Execute ozone sh volume create /${VOLUME} + Execute ozone sh bucket create /${VOLUME}/${BUCKET} + + +Datanode In Maintenance Mode + ${result} = Execute ozone admin datanode maintenance ${HOST} + Should Contain ${result} Entering maintenance mode on datanode + ${result} = Execute ozone admin datanode list | grep "Operational State:*" + Wait Until Keyword Succeeds 30sec 5sec Should contain ${result} ENTERING_MAINTENANCE + Wait Until Keyword Succeeds 3min 10sec Related pipelines are closed + Sleep 60000ms + +Related pipelines are closed + ${result} = Execute ozone admin datanode list | awk -v RS= '{$1=$1}1'|grep MAINT | sed -e 's/^.*pipelines: \\(.*\\)$/\\1/' + Should Contain Any ${result} CLOSED No related pipelines or the node is not in Healthy state. + +Datanode Recommission + ${result} = Execute ozone admin datanode recommission ${HOST} + Should Contain ${result} Started recommissioning datanode + Wait Until Keyword Succeeds 1min 10sec Datanode Recommission is Finished + Sleep 300000ms + +Datanode Recommission is Finished + ${result} = Execute ozone admin datanode list | grep "Operational State:*" + Should Not Contain ${result} ENTERING_MAINTENANCE + +Run Container Balancer + ${result} = Execute ozone admin containerbalancer start -t 1 -d 100 -i 1 + Should Contain ${result} Container Balancer started successfully. + ${result} = Execute ozone admin containerbalancer status + Should Contain ${result} ContainerBalancer is Running. + Wait Until Keyword Succeeds 3min 10sec ContainerBalancer is Not Running + Sleep 60000ms + +ContainerBalancer is Not Running + ${result} = Execute ozone admin containerbalancer status + Should contain ${result} ContainerBalancer is Not Running. + +Create Multiple Keys + [arguments] ${NUM_KEYS} + ${file} = Set Variable /tmp/100mb + FOR ${INDEX} IN RANGE ${NUM_KEYS} + ${fileName} = Set Variable file-${INDEX}.txt + ${key} = Set Variable /${VOLUME}/${BUCKET}/${fileName} + LOG ${fileName} + Create Key ${key} ${file} + Key Should Match Local File ${key} ${file} + END + +Datanode Usageinfo + [arguments] ${uuid} + ${result} = Execute ozone admin datanode usageinfo --uuid=${uuid} + Should Contain ${result} Ozone Used + +Get Uuid + ${result} = Execute ozone admin datanode list | awk -v RS= '{$1=$1}1'| grep ${HOST} | sed -e 's/Datanode: //'|sed -e 's/ .*$//' + [return] ${result} + +Close All Containers + FOR ${INDEX} IN RANGE 15 + ${container} = Execute ozone admin container list --state OPEN | jq -r 'select(.replicationConfig.replicationFactor == "THREE") | .containerID' | head -1 + EXIT FOR LOOP IF "${container}" == "${EMPTY}" + ${message} = Execute And Ignore Error ozone admin container close "${container}" + Run Keyword If '${message}' != '${EMPTY}' Should Contain ${message} is in closing state + ${output} = Execute ozone admin container info "${container}" + Should contain ${output} CLOS + END + Wait until keyword succeeds 3min 10sec All container is closed + +All container is closed + ${output} = Execute ozone admin container list --state OPEN + Should Be Empty ${output} + +Get Datanode Ozone Used Bytes Info + [arguments] ${uuid} + ${output} = Execute export DATANODES=$(ozone admin datanode list --json) && for datanode in $(echo "$\{DATANODES\}" | jq -r '.[].datanodeDetails.uuid'); do ozone admin datanode usageinfo --uuid=$\{datanode\} --json | jq '{(.[0].datanodeDetails.uuid) : .[0].ozoneUsed}'; done | jq -s add + ${result} = Execute echo '${output}' | jq '. | to_entries | .[] | select(.key == "${uuid}") | .value' + [return] ${result} + +** Test Cases *** +Verify Container Balancer for RATIS containers + Prepare For Tests + + Datanode In Maintenance Mode + + ${uuid} = Get Uuid + Datanode Usageinfo ${uuid} + + Create Multiple Keys 3 + + Close All Containers + + ${datanodeOzoneUsedBytesInfo} = Get Datanode Ozone Used Bytes Info ${uuid} + Should Be True ${datanodeOzoneUsedBytesInfo} < ${SIZE} + + Datanode Recommission + + Run Container Balancer + + ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} = Get Datanode Ozone Used Bytes Info ${uuid} + Should Not Be Equal As Integers ${datanodeOzoneUsedBytesInfo} ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} < ${SIZE} * 3.5 + Should Be True ${datanodeOzoneUsedBytesInfoAfterContainerBalancing} > ${SIZE} * 3 + + + + + diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index 840fb963d8d1..b20537014dd1 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -23,6 +23,7 @@ ${ENDPOINT_URL} http://s3g:9878 ${OZONE_S3_HEADER_VERSION} v4 ${OZONE_S3_SET_CREDENTIALS} true ${BUCKET} generated +${BUCKET_LAYOUT} OBJECT_STORE ${KEY_NAME} key1 ${OZONE_S3_TESTS_SET_UP} ${FALSE} ${OZONE_AWS_ACCESS_KEY_ID} ${EMPTY} @@ -127,16 +128,12 @@ Create bucket with name ${result} = Execute AWSS3APICli create-bucket --bucket ${bucket} Should contain ${result} Location Should contain ${result} ${bucket} -Create legacy bucket - ${postfix} = Generate Ozone String - ${legacy_bucket} = Set Variable legacy-bucket-${postfix} - ${result} = Execute and checkrc ozone sh bucket create -l LEGACY s3v/${legacy_bucket} 0 - [Return] ${legacy_bucket} -Create obs bucket +Create bucket with layout + [Arguments] ${layout} ${postfix} = Generate Ozone String - ${bucket} = Set Variable obs-bucket-${postfix} - ${result} = Execute and checkrc ozone sh bucket create -l OBJECT_STORE s3v/${bucket} 0 + ${bucket} = Set Variable bucket-${postfix} + ${result} = Execute ozone sh bucket create --layout ${layout} s3v/${bucket} [Return] ${bucket} Setup s3 tests @@ -144,7 +141,7 @@ Setup s3 tests Run Keyword Generate random prefix Run Keyword Install aws cli Run Keyword if '${OZONE_S3_SET_CREDENTIALS}' == 'true' Setup v4 headers - Run Keyword if '${BUCKET}' == 'generated' Create generated bucket + Run Keyword if '${BUCKET}' == 'generated' Create generated bucket ${BUCKET_LAYOUT} Run Keyword if '${BUCKET}' == 'link' Setup links for S3 tests Run Keyword if '${BUCKET}' == 'encrypted' Create encrypted bucket Run Keyword if '${BUCKET}' == 'erasure' Create EC bucket @@ -154,18 +151,19 @@ Setup links for S3 tests ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/link Return From Keyword If ${exists} Execute ozone sh volume create o3://${OM_SERVICE_ID}/legacy - Execute ozone sh bucket create o3://${OM_SERVICE_ID}/legacy/source-bucket + Execute ozone sh bucket create --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/legacy/source-bucket Create link link Create generated bucket - ${BUCKET} = Create bucket + [Arguments] ${layout}=OBJECT_STORE + ${BUCKET} = Create bucket with layout ${layout} Set Global Variable ${BUCKET} Create encrypted bucket Return From Keyword if '${SECURITY_ENABLED}' == 'false' ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/encrypted Return From Keyword If ${exists} - Execute ozone sh bucket create -k ${KEY_NAME} o3://${OM_SERVICE_ID}/s3v/encrypted + Execute ozone sh bucket create -k ${KEY_NAME} --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/encrypted Create link [arguments] ${bucket} @@ -175,7 +173,7 @@ Create link Create EC bucket ${exists} = Bucket Exists o3://${OM_SERVICE_ID}/s3v/erasure Return From Keyword If ${exists} - Execute ozone sh bucket create --replication rs-3-2-1024k --type EC o3://${OM_SERVICE_ID}/s3v/erasure + Execute ozone sh bucket create --replication rs-3-2-1024k --type EC --layout ${BUCKET_LAYOUT} o3://${OM_SERVICE_ID}/s3v/erasure Generate random prefix ${random} = Generate Ozone String diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot index af7571d35b8d..e2bca772bcd9 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot @@ -39,26 +39,42 @@ Copy Object Happy Scenario Execute date > /tmp/copyfile ${file_checksum} = Execute md5sum /tmp/copyfile | awk '{print $1}' - ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/key=value/f1 --body /tmp/copyfile + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/key=value/f1 --body /tmp/copyfile --metadata="custom-key1=custom-value1,custom-key2=custom-value2,gdprEnabled=true" ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 - ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 --metadata="custom-key3=custom-value3,custom-key4=custom-value4" ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 + + #check that the custom metadata of the source key has been copied to the destination key (default copy directive is COPY) + ${result} = Execute AWSS3ApiCli head-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/key=value/f1 + Should contain ${result} \"custom-key1\": \"custom-value1\" + Should contain ${result} \"custom-key2\": \"custom-value2\" + # COPY directive ignores any metadata specified in the copy object request + Should Not contain ${result} \"custom-key3\": \"custom-value3\" + Should Not contain ${result} \"custom-key4\": \"custom-value4\" + #copying again will not throw error - ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + #also uses the REPLACE copy directive + ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 --metadata="custom-key3=custom-value3,custom-key4=custom-value4" --metadata-directive REPLACE ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 Should Be Equal ${eTag} \"${file_checksum}\" ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 + ${result} = Execute AWSS3ApiCli head-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 + Should contain ${result} \"custom-key3\": \"custom-value3\" + Should contain ${result} \"custom-key4\": \"custom-value4\" + # REPLACE directive uses the custom metadata specified in the request instead of the source key's custom metadata + Should Not contain ${result} \"custom-key1\": \"custom-value1\" + Should Not contain ${result} \"custom-key2\": \"custom-value2\" Copy Object Where Bucket is not available ${result} = Execute AWSS3APICli and checkrc copy-object --bucket dfdfdfdfdfnonexistent --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 255 @@ -76,3 +92,13 @@ Copy Object Where both source and dest are same with change to storageclass Copy Object Where Key not available ${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/nonnonexistentkey 255 Should contain ${result} NoSuchKey + +Copy Object using an invalid copy directive + ${result} = Execute AWSS3ApiCli and checkrc copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 --metadata-directive INVALID 255 + Should contain ${result} InvalidArgument + +Copy Object with user defined metadata size larger than 2 KB + Execute echo "Randomtext" > /tmp/testfile2 + ${custom_metadata_value} = Execute printf 'v%.0s' {1..3000} + ${result} = Execute AWSS3ApiCli and checkrc copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 --metadata="custom-key1=${custom_metadata_value}" --metadata-directive REPLACE 255 + Should contain ${result} MetadataTooLarge \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot index be0582edd1f2..66f3461b01dd 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objecthead.robot @@ -40,22 +40,23 @@ Head object in non existing bucket ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET}-non-existent --key ${PREFIX}/headobject/key=value/f1 255 Should contain ${result} 404 Should contain ${result} Not Found + Head object where path is a directory - ${legacy-bucket} = Create legacy bucket - ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0 - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${legacy-bucket} --key ${PREFIX}/headobject/keyvalue/ 255 + Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets + ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/f1 --body /tmp/testfile 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/headobject/keyvalue/ 255 Should contain ${result} 404 Should contain ${result} Not Found Head directory objects - ${obs-bucket} = Create obs bucket - ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ --body /tmp/testfile 0 - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir 255 + Pass Execution If '${BUCKET_LAYOUT}' == 'FILE_SYSTEM_OPTIMIZED' does not apply to FSO buckets + ${result} = Execute AWSS3APICli and checkrc put-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ --body /tmp/testfile 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir 255 Should contain ${result} 404 Should contain ${result} Not Found - ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${obs-bucket} --key ${PREFIX}/mydir/ 0 + ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/mydir/ 0 Head non existing key ${result} = Execute AWSS3APICli and checkrc head-object --bucket ${BUCKET} --key ${PREFIX}/non-existent 255 Should contain ${result} 404 - Should contain ${result} Not Found \ No newline at end of file + Should contain ${result} Not Found diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 1b44360d6bed..05348fbcba4b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -44,6 +44,8 @@ Put object to s3 Get object from s3 ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/f1 /tmp/testfile.result Compare files /tmp/testfile /tmp/testfile.result + ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte /tmp/zerobyte.result + Compare files /tmp/zerobyte /tmp/zerobyte.result #This test depends on the previous test case. Can't be executed alone Get object with wrong signature @@ -151,34 +153,14 @@ Incorrect values for end and start offset Should Be Equal ${expectedData} ${actualData} Zero byte file - ${result} = Execute ozone sh bucket info /s3v/${BUCKET} - ${linked} = Execute echo '${result}' | jq -j '.sourceVolume,"/",.sourceBucket' - ${eval} = Evaluate "source" in """${linked}""" - IF ${eval} == ${True} - ${result} = Execute ozone sh bucket info ${linked} - END - ${fsolayout} = Evaluate "OPTIMIZED" in """${result}""" - ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-0 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-1 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END ${result} = Execute AWSS3APICli and checkrc get-object --bucket ${BUCKET} --key ${PREFIX}/putobject/key=value/zerobyte --range bytes=0-10000 /tmp/testfile2.result 255 - IF ${fsolayout} == ${True} - Should contain ${result} NoSuchKey - ELSE Should contain ${result} InvalidRange - END Create file with user defined metadata Execute echo "Randomtext" > /tmp/testfile2 @@ -257,4 +239,4 @@ Create key twice with different content and expect different ETags # clean up Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences Execute rm -rf /tmp/file1 - Execute rm -rf /tmp/file2 \ No newline at end of file + Execute rm -rf /tmp/file2 diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index 6ff30c433cda..582e6c1034a7 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -201,7 +201,7 @@ org.apache.maven.plugins maven-eclipse-plugin - 2.6 + 2.10 org.apache.maven.plugins diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index a9fc2710ce31..430ec4e03fd2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -110,6 +110,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -427,6 +428,19 @@ public void testCreateDoesNotAddParentDirKeys() throws Exception { assertTrue(fs.getFileStatus(parent).isDirectory(), "Parent directory does not appear to be a directory"); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + Path root = new Path("/" + volumeName + "/" + bucketName); + Path testKeyPath = new Path(root, "testKey"); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test public void testDeleteCreatesFakeParentDir() throws Exception { Path grandparent = new Path("/testDeleteCreatesFakeParentDir"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 80188e052afc..a092890ae2a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -115,6 +115,7 @@ import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; +import static org.apache.hadoop.fs.ozone.OzoneFileSystemTests.createKeyWithECReplicationConfiguration; import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -326,6 +327,19 @@ void testCreateDoesNotAddParentDirKeys() throws Exception { fs.delete(grandparent, true); } + @Test + public void testCreateKeyWithECReplicationConfig() throws Exception { + String testKeyName = "testKey"; + Path testKeyPath = new Path(bucketPath, testKeyName); + createKeyWithECReplicationConfiguration(cluster.getConf(), testKeyPath); + + OzoneKeyDetails key = getKey(testKeyPath, false); + assertEquals(HddsProtos.ReplicationType.EC, + key.getReplicationConfig().getReplicationType()); + assertEquals("rs-3-2-1024k", + key.getReplicationConfig().getReplication()); + } + @Test void testListStatusWithIntermediateDirWithECEnabled() throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java index d729251267ea..47c584e048a6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/OzoneFileSystemTests.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.io.IOException; @@ -30,6 +31,8 @@ import java.util.TreeSet; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION_TYPE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -95,4 +98,17 @@ private static void listStatusIterator(FileSystem subject, assertEquals(total, iCount); } + + static void createKeyWithECReplicationConfiguration(OzoneConfiguration inputConf, Path keyPath) + throws IOException { + OzoneConfiguration conf = new OzoneConfiguration(inputConf); + conf.set(OZONE_REPLICATION, "rs-3-2-1024k"); + conf.set(OZONE_REPLICATION_TYPE, "EC"); + URI uri = FileSystem.getDefaultUri(conf); + conf.setBoolean( + String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + try (FileSystem fileSystem = FileSystem.get(uri, conf)) { + ContractTestUtils.touch(fileSystem, keyPath); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 015eaa2916a6..daa433f68f8a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -33,27 +33,27 @@ import java.util.stream.Stream; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.Encryptor; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; -import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -73,6 +73,9 @@ import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.keyvalue.impl.BlockManagerImpl; +import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; @@ -81,9 +84,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest; -import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestWithFSO; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; import org.apache.ozone.test.GenericTestUtils; @@ -156,6 +156,7 @@ public static void init() throws Exception { // Reduce KeyDeletingService interval CONF.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); CONF.setBoolean("ozone.client.incremental.chunk.list", true); + CONF.setBoolean("ozone.client.stream.putblock.piggybacking", true); CONF.setBoolean(OZONE_CHUNK_LIST_INCREMENTAL, true); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(BLOCK_SIZE) @@ -177,11 +178,11 @@ public static void init() throws Exception { bucket = TestDataUtil.createVolumeAndBucket(client, layout); // Enable DEBUG level logging for relevant classes - GenericTestUtils.setLogLevel(OMKeyRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequest.LOG, Level.DEBUG); - GenericTestUtils.setLogLevel(OMKeyCommitRequestWithFSO.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(BlockManagerImpl.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(AbstractDatanodeStore.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockOutputStream.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(BlockInputStream.LOG, Level.DEBUG); + GenericTestUtils.setLogLevel(KeyValueHandler.LOG, Level.DEBUG); } @AfterAll @@ -549,7 +550,8 @@ static void runTestHSync(FileSystem fs, Path file, break; } for (int i = 0; i < n; i++) { - assertEquals(data[offset + i], buffer[i]); + assertEquals(data[offset + i], buffer[i], + "expected at offset " + offset + " i=" + i); } offset += n; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index 79c937ceb58b..99095f55b008 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -20,13 +20,17 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; + +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; @@ -174,6 +178,39 @@ public XceiverClientReply sendCommandAsync( assertEquals(1, seenDNs.size()); } + @Test + public void testPrimaryReadFromNormalDatanode() + throws IOException { + final List seenDNs = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + Pipeline randomPipeline = MockPipeline.createRatisPipeline(); + int nodeCount = randomPipeline.getNodes().size(); + assertThat(nodeCount).isGreaterThan(1); + randomPipeline.getNodes().forEach( + node -> assertEquals(NodeOperationalState.IN_SERVICE, node.getPersistedOpState())); + + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + randomPipeline.getNodes().get( + RandomUtils.nextInt(0, nodeCount)).setPersistedOpState(NodeOperationalState.IN_MAINTENANCE); + try (XceiverClientGrpc client = new XceiverClientGrpc(randomPipeline, conf) { + @Override + public XceiverClientReply sendCommandAsync( + ContainerProtos.ContainerCommandRequestProto request, + DatanodeDetails dn) { + seenDNs.add(dn); + return buildValidResponse(); + } + }) { + invokeXceiverClientGetBlock(client); + } catch (IOException e) { + e.printStackTrace(); + } + // Always the IN_SERVICE datanode will be read first + assertEquals(NodeOperationalState.IN_SERVICE, seenDNs.get(0).getPersistedOpState()); + } + } + @Test public void testConnectionReusedAfterGetBlock() throws IOException { // With a new Client, make 100 calls. On each call, ensure that only one diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java index 3f7267b4fd9c..fb4cb3ba4cdd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestDecommissionAndMaintenance.java @@ -211,7 +211,7 @@ public void testNodeWithOpenPipelineCanBeDecommissionedAndRecommissioned() final DatanodeDetails toDecommission = nm.getNodeByUuid(dnID.toString()); scmClient.decommissionNodes(Arrays.asList( - getDNHostAndPort(toDecommission))); + getDNHostAndPort(toDecommission)), false); waitForDnToReachOpState(nm, toDecommission, DECOMMISSIONED); // Ensure one node transitioned to DECOMMISSIONING @@ -265,7 +265,7 @@ public void testDecommissioningNodesCompleteDecommissionOnSCMRestart() waitForAndReturnContainer(ratisRepConfig, 3); final DatanodeDetails dn = getOneDNHostingReplica(getContainerReplicas(container)); - scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn))); + scmClient.decommissionNodes(Arrays.asList(getDNHostAndPort(dn)), false); // Wait for the state to be persisted on the DN so it can report it on // restart of SCM. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 161bf3c3b97d..9c76c0ec0c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -275,7 +275,7 @@ abstract class Builder { protected boolean includeRecon = false; protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); - protected int dnCurrentVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); + protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); protected int numOfDatanodes = 3; protected boolean startDataNodes = true; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java index 79ea4c593c40..75264d2e7a67 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.ozone.test.tag.Unhealthy; @@ -31,16 +32,16 @@ import org.junit.jupiter.api.Timeout; import java.util.Optional; -import java.util.concurrent.TimeUnit; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * This class tests container balancer operations * from cblock clients. */ -@Timeout(value = 300, unit = TimeUnit.MILLISECONDS) +@Timeout(value = 300) public class TestContainerBalancerOperations { private static ScmClient containerBalancerClient; @@ -83,11 +84,18 @@ public void testContainerBalancerCLIOperations() throws Exception { Optional maxSizeToMovePerIterationInGB = Optional.of(1L); Optional maxSizeEnteringTargetInGB = Optional.of(1L); Optional maxSizeLeavingSourceInGB = Optional.of(1L); - + Optional balancingInterval = Optional.of(1); + Optional moveTimeout = Optional.of(1); + Optional moveReplicationTimeout = Optional.of(1); + Optional networkTopologyEnable = Optional.of(false); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -105,7 +113,9 @@ public void testContainerBalancerCLIOperations() throws Exception { containerBalancerClient.startContainerBalancer(threshold, iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, - maxSizeLeavingSourceInGB); + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -115,4 +125,61 @@ public void testContainerBalancerCLIOperations() throws Exception { } //TODO: add more acceptance after container balancer is fully completed + + /** + * Test if Container Balancer CLI overrides default configs and + * options specified in the configs. + */ + @Test + public void testIfCBCLIOverridesConfigs() throws Exception { + //Configurations added in ozone-site.xml + ozoneConf.setInt("hdds.container.balancer.iterations", 40); + ozoneConf.setInt("hdds.container.balancer.datanodes.involved.max.percentage.per.iteration", 30); + + boolean running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + + //CLI option for iterations and balancing interval is not passed + Optional iterations = Optional.empty(); + Optional balancingInterval = Optional.empty(); + + //CLI options are passed + Optional threshold = Optional.of(0.1); + Optional maxDatanodesPercentageToInvolvePerIteration = + Optional.of(100); + Optional maxSizeToMovePerIterationInGB = Optional.of(1L); + Optional maxSizeEnteringTargetInGB = Optional.of(6L); + Optional maxSizeLeavingSourceInGB = Optional.of(6L); + Optional moveTimeout = Optional.of(65); + Optional moveReplicationTimeout = Optional.of(55); + Optional networkTopologyEnable = Optional.of(true); + Optional includeNodes = Optional.of(""); + Optional excludeNodes = Optional.of(""); + containerBalancerClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB, balancingInterval, moveTimeout, + moveReplicationTimeout, networkTopologyEnable, includeNodes, + excludeNodes); + running = containerBalancerClient.getContainerBalancerStatus(); + assertTrue(running); + + ContainerBalancerConfiguration config = cluster.getStorageContainerManager().getContainerBalancer().getConfig(); + + //If config value is not added in ozone-site.xml and CLI option is not passed + //then it takes the default configuration + assertEquals(70, config.getBalancingInterval().toMinutes()); + + //If config value is added in ozone-site.xml and CLI option is not passed + //then it takes the value from ozone-site.xml + assertEquals(40, config.getIterations()); + + //If config value is added in ozone-site.xml and CLI option is passed + //then it takes the CLI option. + assertEquals(100, config.getMaxDatanodesPercentageToInvolvePerIteration()); + + containerBalancerClient.stopContainerBalancer(); + running = containerBalancerClient.getContainerBalancerStatus(); + assertFalse(running); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index e15e1e4d63ba..ce5432739cbd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -80,8 +80,13 @@ class TestBlockOutputStream { static MiniOzoneCluster createCluster() throws IOException, InterruptedException, TimeoutException { - OzoneConfiguration conf = new OzoneConfiguration(); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + clientConfig.setEnablePutblockPiggybacking(true); + conf.setFromObject(clientConfig); + conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setQuietMode(false); @@ -397,7 +402,7 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { key.flush(); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 1, + assertEquals(putBlockCount, metrics.getContainerOpCountMetrics(PutBlock)); assertEquals(pendingWriteChunkCount, metrics.getPendingContainerOpCountMetrics(WriteChunk)); @@ -426,9 +431,9 @@ void testWriteMoreThanChunkSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 2, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + assertEquals(putBlockCount + 1, metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 4, metrics.getTotalOpCount()); + assertEquals(totalOpCount + 3, metrics.getTotalOpCount()); assertEquals(0, keyOutputStream.getStreamEntries().size()); validateData(keyName, data1, client.getObjectStore(), VOLUME, BUCKET); @@ -493,9 +498,9 @@ void testWriteMoreThanFlushSize(boolean flushDelay) throws Exception { metrics.getPendingContainerOpCountMetrics(PutBlock)); assertEquals(writeChunkCount + 3, metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(putBlockCount + 2, + assertEquals(putBlockCount + 1, metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + 5, metrics.getTotalOpCount()); + assertEquals(totalOpCount + 4, metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); // make sure the bufferPool is empty assertEquals(0, blockOutputStream.getBufferPool().computeBufferData()); @@ -686,9 +691,9 @@ void testWriteMoreThanMaxFlushSize(boolean flushDelay) throws Exception { assertEquals(writeChunkCount + 5, metrics.getContainerOpCountMetrics(WriteChunk)); // The previous flush did not trigger any action with flushDelay enabled - assertEquals(putBlockCount + (flushDelay ? 3 : 4), + assertEquals(putBlockCount + (flushDelay ? 2 : 3), metrics.getContainerOpCountMetrics(PutBlock)); - assertEquals(totalOpCount + (flushDelay ? 8 : 9), + assertEquals(totalOpCount + (flushDelay ? 7 : 8), metrics.getTotalOpCount()); assertEquals(dataLength, blockOutputStream.getTotalAckDataLength()); assertEquals(0, blockOutputStream.getCommitIndex2flushedDataMap().size()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 44303ed2ff23..5288bcb3cf21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -284,6 +284,17 @@ static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { out.write(value.getBytes(StandardCharsets.UTF_8)); } verifyKeyData(bucket, keyName, value, testStartTime); + OzoneKeyDetails key1 = bucket.getKey(keyName); + + // Overwrite the key + try (OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(StandardCharsets.UTF_8).length, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), + new HashMap<>())) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + OzoneKeyDetails key2 = bucket.getKey(keyName); + assertNotEquals(key1.getFileEncryptionInfo().toString(), key2.getFileEncryptionInfo().toString()); } static void createAndVerifyFileSystemData( @@ -325,7 +336,6 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, len = is.read(fileContent); } - assertEquals(len, value.length()); assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, RATIS, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index febb6fd41c2a..4ecbd08a41b0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -56,6 +56,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerStateMachine; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -323,7 +324,11 @@ public void testParallelDeleteBucketAndCreateKey() throws IOException, omSM.getHandler().setInjector(injector); thread1.start(); thread2.start(); - Thread.sleep(2000); + // Wait long enough for createKey's preExecute to finish executing + GenericTestUtils.waitFor(() -> { + return getCluster().getOzoneManager().getOmServerProtocol().getLastRequestToSubmit().getCmdType().equals( + Type.CreateKey); + }, 100, 10000); injector.resume(); try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 51943a2e8d23..a4a5701f5491 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.metrics; import java.io.File; +import java.nio.file.Path; import java.util.List; import java.util.Map; import java.util.UUID; @@ -59,12 +60,15 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.io.TempDir; /** * Test for metrics published by storage containers. */ @Timeout(300) public class TestContainerMetrics { + @TempDir + private Path tempDir; @Test public void testContainerMetrics() throws Exception { @@ -105,6 +109,8 @@ public void testContainerMetrics() throws Exception { } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); dispatcher.setClusterId(UUID.randomUUID().toString()); server = new XceiverServerGrpc(datanodeDetails, conf, dispatcher, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1b8bae0d03a8..1c5da04c0a3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -30,11 +30,13 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import java.io.File; +import java.nio.file.Path; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -55,6 +57,8 @@ */ @Timeout(300) public class TestOzoneContainer { + @TempDir + private Path tempDir; @Test public void testCreateOzoneContainer( @@ -75,6 +79,8 @@ public void testCreateOzoneContainer( DatanodeDetails datanodeDetails = randomDatanodeDetails(); container = ContainerTestUtils .getOzoneContainer(datanodeDetails, conf); + StorageVolumeUtil.getHddsVolumesList(container.getVolumeSet().getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 73910ef00ff1..b05c547b625d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -38,6 +38,8 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -303,6 +305,9 @@ private OzoneContainer createAndStartOzoneContainerInstance() { StateContext stateContext = ContainerTestUtils.getMockContext(dn, conf); container = new OzoneContainer( dn, conf, stateContext, caClient, keyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); container.start(clusterID); } catch (Throwable e) { if (container != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 4f24f8e6c320..5585696dfc31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; @@ -137,6 +139,9 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils .getMockContext(dn, conf), caClient, secretKeyClient); + MutableVolumeSet volumeSet = container.getVolumeSet(); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempFolder.toFile())); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index c05f55bd4a74..630c4d314959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; @@ -57,6 +58,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -69,6 +71,7 @@ import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; @@ -84,6 +87,8 @@ public class TestContainerServer { .getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClient caClient; + @TempDir + private Path tempDir; @BeforeAll public static void setup() { @@ -182,7 +187,7 @@ static void runTestClientServer( } } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); @@ -192,6 +197,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index e0522ac6e91d..8044685bb747 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.EnumSet; @@ -65,6 +66,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; @@ -103,6 +105,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; @@ -115,6 +118,8 @@ * Test Container servers when security is enabled. */ public class TestSecureContainerServer { + @TempDir + private Path tempDir; private static final String TEST_DIR = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); @@ -166,7 +171,7 @@ public void testClientServer() throws Exception { hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } - private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, + private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { ContainerSet containerSet = new ContainerSet(1000); conf.set(HDDS_DATANODE_DIR_KEY, @@ -175,6 +180,8 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, conf.set(OZONE_METADATA_DIRS, TEST_DIR); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList()) + .forEach(hddsVolume -> hddsVolume.setDbParentDir(tempDir.toFile())); StateContext context = ContainerTestUtils.getMockContext(dd, conf); ContainerMetrics metrics = ContainerMetrics.create(conf); Map handlers = Maps.newHashMap(); @@ -199,7 +206,7 @@ public void testClientServerRatisGrpc() throws Exception { runTestClientServerRatis(GRPC, 3); } - static XceiverServerRatis newXceiverServerRatis( + XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); @@ -216,12 +223,12 @@ static XceiverServerRatis newXceiverServerRatis( caClient, null); } - private static void runTestClientServerRatis(RpcType rpc, int numNodes) + private void runTestClientServerRatis(RpcType rpc, int numNodes) throws Exception { runTestClientServer(numNodes, (pipeline, conf) -> RatisTestHelper.initRatisConf(rpc, conf), XceiverClientRatis::newXceiverClientRatis, - TestSecureContainerServer::newXceiverServerRatis, + this::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), (p) -> { }); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 2ae69dc3c96f..e773bf7ed7f2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -607,18 +607,40 @@ private ContainerProtos.DatanodeBlockID createBlockId(long containerId, private void mockWriteChunkResponse(XceiverClientSpi mockDnProtocol) throws IOException, ExecutionException, InterruptedException { - ContainerCommandResponseProto writeResponse = - ContainerCommandResponseProto.newBuilder() - .setWriteChunk(WriteChunkResponseProto.newBuilder().build()) - .setResult(Result.SUCCESS) - .setCmdType(Type.WriteChunk) - .build(); doAnswer(invocation -> - new XceiverClientReply(completedFuture(writeResponse))) + new XceiverClientReply( + completedFuture( + createWriteChunkResponse( + (ContainerCommandRequestProto)invocation.getArgument(0))))) .when(mockDnProtocol) .sendCommandAsync(argThat(matchCmd(Type.WriteChunk))); } + ContainerCommandResponseProto createWriteChunkResponse( + ContainerCommandRequestProto request) { + ContainerProtos.WriteChunkRequestProto writeChunk = request.getWriteChunk(); + + WriteChunkResponseProto.Builder builder = + WriteChunkResponseProto.newBuilder(); + if (writeChunk.hasBlock()) { + ContainerProtos.BlockData + blockData = writeChunk.getBlock().getBlockData(); + + GetCommittedBlockLengthResponseProto response = + GetCommittedBlockLengthResponseProto.newBuilder() + .setBlockID(blockData.getBlockID()) + .setBlockLength(blockData.getSize()) + .build(); + + builder.setCommittedBlockLength(response); + } + return ContainerCommandResponseProto.newBuilder() + .setWriteChunk(builder.build()) + .setResult(Result.SUCCESS) + .setCmdType(Type.WriteChunk) + .build(); + } + private ArgumentMatcher matchCmd(Type type) { return argument -> argument != null && argument.getCmdType() == type; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java index cb49d273e78d..95a24b8ca99c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabled.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.AfterAll; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java index 7f325f6c3e0d..91ad9eb8fe55 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotDisabledRestart.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index 055ddeb20c9a..42b43fa03a3b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -15,7 +15,7 @@ * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -41,12 +41,15 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.snapshot.TestOmSnapshot; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java index 66d395160201..47bdd8f3bd52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemFso.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemFso.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java similarity index 95% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java index 86682b2cbc19..b8d81c31cf5f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystemLegacy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystemLegacy.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.junit.jupiter.api.Timeout; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java similarity index 97% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index 14826a18616f..341b5b78c603 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -30,6 +30,9 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.junit.jupiter.api.AfterAll; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index 54ee0ed53796..2f7e1bd5a9d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -34,13 +34,16 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.compaction.log.CompactionLogEntry; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java similarity index 98% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java index d4bf911676ab..1c98ce89af59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingService.java @@ -17,7 +17,7 @@ * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -32,13 +32,17 @@ import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java index 893e248d88c5..fac6764767f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDirectoryCleaningService.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java index 7691704d924c..9fcb82fd4b6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAndAdminContainerCLI.java @@ -246,7 +246,7 @@ void testNodesInDecommissionOrMaintenance( TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), 0); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline1))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline1)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, @@ -273,7 +273,7 @@ void testNodesInDecommissionOrMaintenance( TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), 0); } else { scmClient.decommissionNodes(Collections.singletonList( - TestNodeUtil.getDNHostAndPort(nodeToGoOffline2))); + TestNodeUtil.getDNHostAndPort(nodeToGoOffline2)), false); } TestNodeUtil.waitForDnToReachOpState(scmNodeManager, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index c0e5acc20e7a..d52b0e99b2fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.HashMap; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; @@ -46,6 +47,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -65,8 +67,6 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -381,16 +381,23 @@ private static OmKeyLocationInfoGroup getOmKeyLocationInfoGroup() { private long getReconTaskAttributeFromJson(String taskStatusResponse, String taskName, - String entityAttribute) { - ArrayList taskStatusList = new Gson() - .fromJson(taskStatusResponse, ArrayList.class); - Optional taskEntity = - taskStatusList - .stream() - .filter(task -> task.get("taskName").equals(taskName)) - .findFirst(); - assertTrue(taskEntity.isPresent()); - return (long) (double) taskEntity.get().get(entityAttribute); + String entityAttribute) + throws IOException { + List> taskStatusList = + JsonUtils.readTreeAsListOfMaps(taskStatusResponse); + + // Stream through the list to find the task entity matching the taskName + Optional> taskEntity = taskStatusList.stream() + .filter(task -> taskName.equals(task.get("taskName"))) + .findFirst(); + + if (taskEntity.isPresent()) { + Number number = (Number) taskEntity.get().get(entityAttribute); + return number.longValue(); + } else { + throw new IOException( + "Task entity for task name " + taskName + " not found"); + } } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 27945ccb96f1..085858f71179 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -78,9 +79,9 @@ import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; - import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; @@ -890,6 +891,34 @@ public void testLinkBucketOrphan() throws Exception { } } + @Test + @Timeout(10) + public void testListBucket() throws Exception { + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + OzoneConfiguration clientConf = + getClientConfForOFS(hostPrefix, cluster.getConf()); + int pageSize = 20; + clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); + URI uri = FileSystem.getDefaultUri(clientConf); + clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + OzoneFsShell shell = new OzoneFsShell(clientConf); + + String volName = "testlistbucket"; + int numBuckets = pageSize; + + try { + generateBuckets("/" + volName, numBuckets); + out.reset(); + int res = ToolRunner.run(shell, new String[]{"-ls", "/" + volName}); + assertEquals(0, res); + String r = out.toString(DEFAULT_ENCODING); + assertThat(r).matches("(?s)^Found " + numBuckets + " items.*"); + + } finally { + shell.close(); + } + } + @Test public void testDeleteTrashNoSkipTrash() throws Exception { diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 452275aa69fc..604d550f7b84 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedPrefixInfo; import java.util.ArrayList; -import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -47,16 +46,13 @@ public static Codec getCodec() { return CODEC; } - private String name; + private final String name; private final List acls; - public OmPrefixInfo(String name, List acls, - Map metadata, long objectId, long updateId) { - this.name = name; - this.acls = acls; - setMetadata(metadata); - setObjectID(objectId); - setUpdateID(updateId); + private OmPrefixInfo(Builder b) { + super(b); + name = b.name; + acls = new ArrayList<>(b.acls); } /** @@ -99,17 +95,19 @@ public static OmPrefixInfo.Builder newBuilder() { /** * Builder for OmPrefixInfo. */ - public static class Builder { + public static class Builder extends WithObjectID.Builder { private String name; - private List acls; - private Map metadata; - private long objectID; - private long updateID; + private final List acls; public Builder() { //Default values this.acls = new LinkedList<>(); - this.metadata = new HashMap<>(); + } + + public Builder(OmPrefixInfo obj) { + super(obj); + setName(obj.name); + acls = new ArrayList<>(obj.getAcls()); } public Builder setAcls(List listOfAcls) { @@ -124,26 +122,28 @@ public Builder setName(String n) { return this; } + @Override public OmPrefixInfo.Builder addMetadata(String key, String value) { - metadata.put(key, value); + super.addMetadata(key, value); return this; } + @Override public OmPrefixInfo.Builder addAllMetadata( Map additionalMetadata) { - if (additionalMetadata != null) { - metadata.putAll(additionalMetadata); - } + super.addAllMetadata(additionalMetadata); return this; } + @Override public Builder setObjectID(long obId) { - this.objectID = obId; + super.setObjectID(obId); return this; } + @Override public Builder setUpdateID(long id) { - this.updateID = id; + super.setUpdateID(id); return this; } @@ -153,7 +153,7 @@ public Builder setUpdateID(long id) { */ public OmPrefixInfo build() { Preconditions.checkNotNull(name); - return new OmPrefixInfo(name, acls, metadata, objectID, updateID); + return new OmPrefixInfo(this); } } @@ -234,11 +234,11 @@ public String toString() { * Return a new copy of the object. */ public OmPrefixInfo copyObject() { - Map metadataList = new HashMap<>(); - if (getMetadata() != null) { - metadataList.putAll(getMetadata()); - } - return new OmPrefixInfo(name, new ArrayList<>(acls), metadataList, getObjectID(), getUpdateID()); + return toBuilder().build(); + } + + public Builder toBuilder() { + return new Builder(this); } } diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 8070c93cd654..07eed9a53997 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -74,10 +73,14 @@ private OmPrefixInfo getOmPrefixInfoForTest(String path, String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - new ArrayList<>(Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - scope, aclType))), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 2fbbbe153040..1c0ec78cfb22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -26,6 +26,7 @@ import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableCounterLong; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; /** * This class is for maintaining Ozone Manager statistics. @@ -74,7 +75,10 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotLists; private @Metric MutableCounterLong numSnapshotDiffJobs; private @Metric MutableCounterLong numSnapshotInfos; + private @Metric MutableCounterLong numSnapshotPurges; + private @Metric MutableCounterLong numSnapshotSetProperties; + private @Metric MutableGaugeInt numSnapshotCacheSize; private @Metric MutableCounterLong numGetFileStatus; private @Metric MutableCounterLong numCreateDirectory; private @Metric MutableCounterLong numCreateFile; @@ -137,6 +141,8 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotListFails; private @Metric MutableCounterLong numSnapshotDiffJobFails; private @Metric MutableCounterLong numSnapshotInfoFails; + private @Metric MutableCounterLong numSnapshotPurgeFails; + private @Metric MutableCounterLong numSnapshotSetPropertyFails; private @Metric MutableCounterLong numSnapshotActive; private @Metric MutableCounterLong numSnapshotDeleted; @@ -489,6 +495,14 @@ public void incNumSnapshotInfos() { numSnapshotInfos.incr(); } + public void incNumSnapshotPurges() { + numSnapshotPurges.incr(); + } + + public void incNumSnapshotSetProperties() { + numSnapshotSetProperties.incr(); + } + public void incNumSnapshotDiffJobs() { numSnapshotDiffJobs.incr(); } @@ -504,6 +518,15 @@ public void incNumSnapshotDiffJobFails() { public void incNumSnapshotInfoFails() { numSnapshotInfoFails.incr(); } + + public void incNumSnapshotPurgeFails() { + numSnapshotPurgeFails.incr(); + } + + public void incNumSnapshotSetPropertyFails() { + numSnapshotSetPropertyFails.incr(); + } + public void setNumSnapshotActive(long num) { long currVal = numSnapshotActive.value(); numSnapshotActive.incr(num - currVal); @@ -530,6 +553,17 @@ public void decNumSnapshotDeleted() { numSnapshotDeleted.incr(-1); } + public int getNumSnapshotCacheSize() { + return numSnapshotCacheSize.value(); + } + public void incNumSnapshotCacheSize() { + numSnapshotCacheSize.incr(); + } + + public void decNumSnapshotCacheSize() { + numSnapshotCacheSize.decr(); + } + public void incNumCompleteMultipartUploadFails() { numCompleteMultipartUploadFails.incr(); } @@ -1305,6 +1339,14 @@ public long getNumSnapshotDiffJobs() { return numSnapshotDiffJobs.value(); } + public long getNumSnapshotPurges() { + return numSnapshotPurges.value(); + } + + public long getNumSnapshotSetProperties() { + return numSnapshotSetProperties.value(); + } + public long getNumSnapshotCreateFails() { return numSnapshotCreateFails.value(); } @@ -1329,6 +1371,13 @@ public long getNumSnapshotDeleted() { return numSnapshotDeleted.value(); } + public long getNumSnapshotPurgeFails() { + return numSnapshotPurgeFails.value(); + } + + public long getNumSnapshotSetPropertyFails() { + return numSnapshotSetPropertyFails.value(); + } public void incNumTrashRenames() { numTrashRenames.incr(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 602620743b0b..a3799b389c51 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -272,7 +272,7 @@ public OmSnapshotManager(OzoneManager ozoneManager) { }; // Init snapshot cache - this.snapshotCache = new SnapshotCache(loader, softCacheSize); + this.snapshotCache = new SnapshotCache(loader, softCacheSize, ozoneManager.getMetrics()); this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, ozoneManager, snapDiffJobCf, snapDiffReportCf, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index dfafe3fc3e8e..5966d969de70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -85,9 +85,7 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; -import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; @@ -1161,11 +1159,7 @@ public void setScmTopologyClient( } public NetworkTopology getClusterMap() { - InnerNode currentTree = scmTopologyClient.getClusterTree(); - return new NetworkTopologyImpl(configuration.get( - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, - ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), - currentTree); + return scmTopologyClient.getClusterMap(); } /** @@ -1663,13 +1657,12 @@ public void start() throws IOException { metadataManager.start(configuration); + startSecretManagerIfNecessary(); // Start Ratis services if (omRatisServer != null) { omRatisServer.start(); } - startSecretManagerIfNecessary(); - upgradeFinalizer.runPrefinalizeStateActions(omStorage, this); Integer layoutVersionInDB = getLayoutVersionInDB(); if (layoutVersionInDB == null || diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 13d3af554683..ae3715be7bfd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -779,6 +779,8 @@ protected OmKeyInfo prepareFileInfo( dbKeyInfo.getMetadata().clear(); dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())); + + dbKeyInfo.setFileEncryptionInfo(encInfo); return dbKeyInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 3f4d746adb54..29c7628e3cca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,10 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -40,11 +44,15 @@ import java.io.IOException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; + /** * Handles OMSnapshotPurge Request. * This is an OM internal request. Does not need @RequireSnapshotFeatureState. @@ -59,7 +67,10 @@ public OMSnapshotPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + final long trxnLogIndex = termIndex.getIndex(); + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -80,56 +91,118 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); - // Snapshots that are purged by the SnapshotDeletingService - // will update the next snapshot so that is can be deep cleaned - // by the KeyDeletingService in the next run. + // Each snapshot purge operation does three things: + // 1. Update the snapshot chain, + // 2. Update the deep clean flag for the next active snapshot (So that it can be + // deep cleaned by the KeyDeletingService in the next run), + // 3. Finally, purge the snapshot. + // All of these steps have to be performed only when it acquires all the necessary + // locks (lock on the snapshot to be purged, lock on the next active snapshot, and + // lock on the next path and global previous snapshots). Ideally, there is no need + // for locks for snapshot purge and can rely on OMStateMachine because OMStateMachine + // is going to process each request sequentially. + // + // But there is a problem with that. After filtering unnecessary SST files for a snapshot, + // SstFilteringService updates that snapshot's SstFilter flag. SstFilteringService cannot + // use SetSnapshotProperty API because it runs on each OM independently and One OM does + // not know if the snapshot has been filtered on the other OM in HA environment. + // + // If locks are not taken snapshot purge and SstFilteringService will cause a race condition + // and override one's update with another. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() - .get(snapTableKey); - - if (fromSnapshot == null) { - // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. - LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + - "Snapshot purge request.", snapTableKey); - continue; - } + // To acquire all the locks, a set is maintained which is keyed by snapshotTableKey. + // snapshotTableKey is nothing but /volumeName/bucketName/snapshotName. + // Once all the locks are acquired, it performs the three steps mentioned above and + // release all the locks after that. + Set> lockSet = new HashSet<>(4, 1); + try { + if (omMetadataManager.getSnapshotInfoTable().get(snapTableKey) == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } - SnapshotInfo nextSnapshot = SnapshotUtils - .getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); + acquireLock(lockSet, snapTableKey, omMetadataManager); + SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos); - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, - trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - ozoneManager.getOmSnapshotManager().invalidateCacheEntry(fromSnapshot.getSnapshotId()); + SnapshotInfo nextSnapshot = + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); + + if (nextSnapshot != null) { + acquireLock(lockSet, nextSnapshot.getTableKey(), omMetadataManager); + } + + // Update the chain first so that it has all the necessary locks before updating deep clean. + updateSnapshotChainAndCache(lockSet, omMetadataManager, fromSnapshot, trxnLogIndex, + updatedPathPreviousAndGlobalSnapshots); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + // Remove and close snapshot's RocksDB instance from SnapshotCache. + omSnapshotManager.invalidateCacheEntry(fromSnapshot.getSnapshotId()); + // Update SnapshotInfoTable cache. + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + } finally { + for (Triple lockKey: lockSet) { + omMetadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight()); + } + } } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapInfos, updatedPathPreviousAndGlobalSnapshots); + + omMetrics.incNumSnapshotPurges(); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + + "snapshots: {} and global and previous for snapshots:{}.", + snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotPurgeFails(); + LOG.error("Failed to execute snapshotPurgeRequest:{{}}.", snapshotPurgeRequest, ex); } return omClientResponse; } + private void acquireLock(Set> lockSet, String snapshotTableKey, + OMMetadataManager omMetadataManager) throws IOException { + SnapshotInfo snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + + // It should not be the case that lock is required for non-existing snapshot. + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotTableKey); + throw new OMException("Snapshot: '{" + snapshotTableKey + "}' doesn't not exist in snapshot table.", + OMException.ResultCodes.FILE_NOT_FOUND); + } + Triple lockKey = Triple.of(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()); + if (!lockSet.contains(lockKey)) { + mergeOmLockDetails(omMetadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight())); + lockSet.add(lockKey); + } + } + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) { + Map updatedSnapInfos) throws IOException { if (snapInfo != null) { + // Fetch the latest value again after acquiring lock. + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially // reclaim more keys in the next snapshot. - snapInfo.setDeepClean(false); + updatedSnapshotInfo.setDeepClean(false); // Update table cache first - omMetadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(snapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(updatedSnapshotInfo.getTableKey()), + CacheValue.get(trxnLogIndex, updatedSnapshotInfo)); + updatedSnapInfos.put(updatedSnapshotInfo.getTableKey(), updatedSnapshotInfo); } } @@ -140,6 +213,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, * update in DB. */ private void updateSnapshotChainAndCache( + Set> lockSet, OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, long trxnLogIndex, @@ -151,7 +225,6 @@ private void updateSnapshotChainAndCache( SnapshotChainManager snapshotChainManager = metadataManager .getSnapshotChainManager(); - SnapshotInfo nextPathSnapInfo = null; // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -167,58 +240,63 @@ private void updateSnapshotChainAndCache( return; } - // Updates next path snapshot's previous snapshot ID + String nextPathSnapshotKey = null; + if (hasNextPathSnapshot) { UUID nextPathSnapshotId = snapshotChainManager.nextPathSnapshot( snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager + nextPathSnapshotKey = snapshotChainManager .getTableKey(nextPathSnapshotId); - nextPathSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - if (nextPathSnapInfo != null) { - nextPathSnapInfo.setPathPreviousSnapshotId( - snapInfo.getPathPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } + + // Acquire lock from the snapshot + acquireLock(lockSet, nextPathSnapshotKey, metadataManager); } - // Updates next global snapshot's previous snapshot ID + String nextGlobalSnapshotKey = null; if (hasNextGlobalSnapshot) { - UUID nextGlobalSnapshotId = - snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager - .getTableKey(nextGlobalSnapshotId); - - SnapshotInfo nextGlobalSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals( - nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextGlobalSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); - } + UUID nextGlobalSnapshotId = snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); + nextGlobalSnapshotKey = snapshotChainManager.getTableKey(nextGlobalSnapshotId); + + // Acquire lock from the snapshot + acquireLock(lockSet, nextGlobalSnapshotKey, metadataManager); + } + + SnapshotInfo nextPathSnapInfo = + nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + + // Updates next path snapshot's previous snapshot ID + if (nextPathSnapInfo != null) { + nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } + + // Updates next global snapshot's previous snapshot ID + // If both next global and path snapshot are same, it may overwrite + // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check + // will prevent it. + if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && + nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { + nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } else if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId( + snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextGlobalSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index b3dd5206c993..c4ca3dc99e3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -36,7 +37,8 @@ import java.io.IOException; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; /** * Updates the exclusive size of the snapshot. @@ -51,6 +53,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse = null; OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); @@ -62,16 +65,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .getSetSnapshotPropertyRequest(); SnapshotInfo updatedSnapInfo = null; + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + boolean acquiredSnapshotLock = false; + String volumeName = null; + String bucketName = null; + String snapshotName = null; + try { - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotKey); + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); + throw new OMException("Snapshot: '{" + snapshotKey + "}' doesn't not exist in snapshot table.", FILE_NOT_FOUND); + } + + volumeName = snapshotInfo.getVolumeName(); + bucketName = snapshotInfo.getBucketName(); + snapshotName = snapshotInfo.getName(); + + mergeOmLockDetails(metadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + + acquiredSnapshotLock = getOmLockDetails().isLockAcquired(); + updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); - if (updatedSnapInfo == null) { - LOG.error("SnapshotInfo for Snapshot: {} is not found", snapshotKey); - throw new OMException("SnapshotInfo for Snapshot: " + snapshotKey + - " is not found", INVALID_SNAPSHOT_ERROR); - } if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest @@ -101,9 +119,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn CacheValue.get(termIndex.getIndex(), updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); + omMetrics.incNumSnapshotSetProperties(); + LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest); } catch (IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotSetPropertyFails(); + LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest, ex); + } finally { + if (acquiredSnapshotLock) { + mergeOmLockDetails(metadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java index 8eeb7bf0e4aa..610949e0f8a4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/ValidatorRegistry.java @@ -71,7 +71,7 @@ public class ValidatorRegistry { Reflections reflections = new Reflections(new ConfigurationBuilder() .setUrls(searchUrls) .setScanners(new MethodAnnotationsScanner()) - .useParallelExecutor() + .setParallel(true) ); Set describedValidators = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index b8db58d7fd9e..d300601b3858 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -80,12 +80,13 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; - updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); updateSnapInfo(metadataManager, batchOperation, updatedPreviousAndGlobalSnapInfos); + updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); for (String dbKey: snapshotDbKeys) { + // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager - .getSnapshotInfoTable().get(dbKey); + .getSnapshotInfoTable().getSkipCache(dbKey); // Even though snapshot existed when SnapshotDeletingService // was running. It might be deleted in the previous run and // the DB might not have been updated yet. So snapshotInfo @@ -96,8 +97,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); - omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, - dbKey); + omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 0b64d6d069b4..f14837462b0a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -19,6 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheLoader; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.slf4j.Logger; @@ -51,10 +52,13 @@ public class SnapshotCache { // opened on the OM. private final int cacheSizeLimit; - public SnapshotCache(CacheLoader cacheLoader, int cacheSizeLimit) { + private final OMMetrics omMetrics; + + public SnapshotCache(CacheLoader cacheLoader, int cacheSizeLimit, OMMetrics omMetrics) { this.dbMap = new ConcurrentHashMap<>(); this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; + this.omMetrics = omMetrics; } @VisibleForTesting @@ -83,6 +87,7 @@ public void invalidate(UUID key) throws IOException { } catch (IOException e) { throw new IllegalStateException("Failed to close snapshotId: " + key, e); } + omMetrics.decNumSnapshotCacheSize(); } return null; }); @@ -104,6 +109,7 @@ public void invalidateAll() { throw new IllegalStateException("Failed to close snapshot", e); } it.remove(); + omMetrics.decNumSnapshotCacheSize(); } } @@ -150,6 +156,7 @@ public ReferenceCounted get(UUID key) throws IOException { // Unexpected and unknown exception thrown from CacheLoader#load throw new IllegalStateException(ex); } + omMetrics.incNumSnapshotCacheSize(); } if (v != null) { // When RC OmSnapshot is successfully loaded @@ -157,7 +164,6 @@ public ReferenceCounted get(UUID key) throws IOException { } return v; }); - if (rcOmSnapshot == null) { // The only exception that would fall through the loader logic above // is OMException with FILE_NOT_FOUND. @@ -227,6 +233,7 @@ private void cleanupInternal() { } catch (IOException ex) { throw new IllegalStateException("Error while closing snapshot DB.", ex); } + omMetrics.decNumSnapshotCacheSize(); return null; } }); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java index d08a0009e36e..f1e9c819e709 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java @@ -96,7 +96,7 @@ protected void registerUpgradeActions(String packageName) { .forPackages(packageName) .setScanners(new TypeAnnotationsScanner(), new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> typesAnnotatedWith = reflections.getTypesAnnotatedWith(UpgradeActionOm.class); typesAnnotatedWith.forEach(actionClass -> { @@ -132,7 +132,7 @@ public static Set> getRequestClasses( .setUrls(ClasspathHelper.forPackage(packageName)) .setScanners(new SubTypesScanner()) .setExpandSuperTypes(false) - .useParallelExecutor()); + .setParallel(true)); Set> validRequests = new HashSet<>(); Set> subTypes = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 11d27913ff82..03729aebb509 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -87,6 +87,9 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerP // always true, only used in tests private boolean shouldFlushCache = true; + private OMRequest lastRequestToSubmit; + + /** * Constructs an instance of the server handler. * @@ -212,6 +215,7 @@ private OMResponse internalProcessRequest(OMRequest request) throws assert (omClientRequest != null); OMClientRequest finalOmClientRequest = omClientRequest; requestToSubmit = preExecute(finalOmClientRequest); + this.lastRequestToSubmit = requestToSubmit; } catch (IOException ex) { if (omClientRequest != null) { omClientRequest.handleRequestFailure(ozoneManager); @@ -235,6 +239,11 @@ private OMRequest preExecute(OMClientRequest finalOmClientRequest) () -> finalOmClientRequest.preExecute(ozoneManager)); } + @VisibleForTesting + public OMRequest getLastRequestToSubmit() { + return lastRequestToSubmit; + } + /** * Submits request to OM's Ratis server. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 71882c3423e0..8edd096e766c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.IOmMetadataReader; @@ -63,10 +64,12 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; @@ -77,8 +80,6 @@ * Tests OMSnapshotPurgeRequest class. */ public class TestOMSnapshotPurgeRequestAndResponse { - - private BatchOperation batchOperation; private List checkpointPaths = new ArrayList<>(); private OzoneManager ozoneManager; @@ -177,7 +178,6 @@ private void createSnapshotCheckpoint(String snapshotName) throws Exception { private void createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { - batchOperation = omMetadataManager.getStore().initBatchOperation(); OMRequest omRequest = OMRequestTestUtils .createSnapshotRequest(volume, bucket, snapshotName); // Pre-Execute OMSnapshotCreateRequest. @@ -188,9 +188,10 @@ private void createSnapshotCheckpoint(String volume, OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); SnapshotInfo snapshotInfo = @@ -226,19 +227,35 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } } @Test public void testValidateAndUpdateCache() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); - purgeSnapshots(snapshotPurgeRequest); + + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + for (String snapshotTableKey: snapshotDbKeysToPurge) { + assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + } + + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } // Check if the entries are deleted. assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); @@ -247,6 +264,36 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + } + + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + + List snapshotDbKeysToPurge = createSnapshots(10); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); + assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 3856a5b62f5f..b5bfc2714b0f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -48,6 +49,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.Mockito.anyString; @@ -62,7 +64,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { private BatchOperation batchOperation; private OzoneManager ozoneManager; private OMMetadataManager omMetadataManager; - + private OMMetrics omMetrics; private String volumeName; private String bucketName; private String snapName; @@ -71,6 +73,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { @BeforeEach void setup(@TempDir File testDir) throws Exception { + omMetrics = OMMetrics.create(); ozoneManager = mock(OzoneManager.class); OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); when(lvm.isAllowed(anyString())).thenReturn(true); @@ -84,6 +87,7 @@ void setup(@TempDir File testDir) throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); @@ -94,6 +98,9 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + createSnapshotDataForTest(); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = @@ -120,6 +127,9 @@ public void testValidateAndUpdateCache() throws IOException { omMetadataManager.getStore().commitBatchOperation(batchOperation); } + assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { @@ -134,6 +144,42 @@ public void testValidateAndUpdateCache() throws IOException { } } + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + + createSnapshotDataForTest(); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + for (OMRequest omRequest: snapshotUpdateSizeRequests) { + OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); + + // Validate and Update Cache + OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) + omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); + } + + assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetPropertyFails()); + } + private void assertCacheValues(String dbKey) { CacheValue cacheValue = omMetadataManager .getSnapshotInfoTable() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index 21b795216def..2a1e2ec99fca 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.cache.CacheLoader; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; @@ -52,6 +53,8 @@ class TestSnapshotCache { private static CacheLoader cacheLoader; private SnapshotCache snapshotCache; + private OMMetrics omMetrics; + @BeforeAll static void beforeAll() throws Exception { cacheLoader = mock(CacheLoader.class); @@ -74,7 +77,8 @@ static void beforeAll() throws Exception { @BeforeEach void setUp() { // Reset cache for each test case - snapshotCache = new SnapshotCache(cacheLoader, CACHE_SIZE_LIMIT); + omMetrics = OMMetrics.create(); + snapshotCache = new SnapshotCache(cacheLoader, CACHE_SIZE_LIMIT, omMetrics); } @AfterEach @@ -87,11 +91,13 @@ void tearDown() { @DisplayName("get()") void testGet() throws IOException { final UUID dbKey1 = UUID.randomUUID(); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertNotNull(omSnapshot.get()); assertInstanceOf(OmSnapshot.class, omSnapshot.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test @@ -101,12 +107,14 @@ void testGetTwice() throws IOException { ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); ReferenceCounted omSnapshot1again = snapshotCache.get(dbKey1); // Should be the same instance assertEquals(omSnapshot1, omSnapshot1again); assertEquals(omSnapshot1.get(), omSnapshot1again.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test @@ -117,10 +125,12 @@ void testReleaseByDbKey() throws IOException { assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } @Test @@ -130,13 +140,16 @@ void testInvalidate() throws IOException { ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } @Test @@ -146,11 +159,13 @@ void testInvalidateAll() throws IOException { ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey2 = UUID.randomUUID(); ReferenceCounted omSnapshot2 = snapshotCache.get(dbKey2); assertNotNull(omSnapshot2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); @@ -158,16 +173,20 @@ void testInvalidateAll() throws IOException { ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); // Entry will not be immediately evicted assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidate(dbKey1); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.invalidateAll(); assertEquals(0, snapshotCache.size()); + assertEquals(0, omMetrics.getNumSnapshotCacheSize()); } private void assertEntryExistence(UUID key, boolean shouldExist) { @@ -191,26 +210,33 @@ void testEviction1() throws IOException { final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); snapshotCache.release(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last get() because // those were release()d. assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, false); } @@ -221,25 +247,30 @@ void testEviction2() throws IOException { final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1 would not have been evicted because it is not release()d assertEquals(4, snapshotCache.size()); + assertEquals(4, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey1, true); // Releasing dbKey2 at this point should immediately trigger its eviction // because the cache size exceeded the soft limit snapshotCache.release(dbKey2); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); assertEntryExistence(dbKey2, false); assertEntryExistence(dbKey1, true); } @@ -252,41 +283,50 @@ void testEviction3WithClose() throws IOException { try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } // ref count should have been decreased because it would be close()d // upon exiting try-with-resources. assertEquals(0L, snapshotCache.getDbMap().get(dbKey1).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey2 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); // Get dbKey2 entry a second time try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { assertEquals(2L, rcOmSnapshot.getTotalRefCount()); assertEquals(2L, rcOmSnapshot2.getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); } assertEquals(1L, rcOmSnapshot.getTotalRefCount()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey2).getTotalRefCount()); assertEquals(2, snapshotCache.size()); + assertEquals(2, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey3 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey3).getTotalRefCount()); assertEquals(3, snapshotCache.size()); + assertEquals(3, omMetrics.getNumSnapshotCacheSize()); final UUID dbKey4 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } assertEquals(0L, snapshotCache.getDbMap().get(dbKey4).getTotalRefCount()); assertEquals(1, snapshotCache.size()); + assertEquals(1, omMetrics.getNumSnapshotCacheSize()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java similarity index 98% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index 1821b6f9af32..c5ae809718e7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -15,11 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index c0a85c787d5b..a9e67b00cc9e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -364,7 +364,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { omSnapshotManager = mock(OmSnapshotManager.class); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); - SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10); + SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10, omMetrics); when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) .thenAnswer(invocationOnMock -> { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java similarity index 96% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index 48f366371adf..dc00433e179b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -16,10 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus; import org.apache.hadoop.util.Time; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java similarity index 97% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index 25fdaa908230..72bca07557b6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -16,7 +16,7 @@ * limitations under the License. * */ -package org.apache.hadoop.ozone.om; +package org.apache.hadoop.ozone.om.snapshot; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -26,6 +26,14 @@ import org.apache.hadoop.hdds.utils.db.DBProfile; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.KeyManager; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SstFilteringService; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -34,7 +42,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; -import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index ab52950cf924..de9603c475f7 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -894,9 +894,9 @@ public List listStatus(String pathStr, boolean recursive, } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { - String startBucket = ofsStartPath.getBucketName(); + String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucket, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username); } if (ofsPath.isSnapshotPath()) { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 1fcb1554b6c3..3ba291ae0fd0 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -18,6 +18,8 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Preconditions; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; @@ -41,6 +43,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.ozone.OFSPath; @@ -239,7 +242,12 @@ public FSDataInputStream open(Path path, int bufferSize) throws IOException { statistics.incrementReadOps(1); LOG.trace("open() path: {}", path); final String key = pathToKey(path); - return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + return TracingUtil.executeInNewSpan("ofs open", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("path", key); + return new FSDataInputStream(createFSInputStream(adapter.readFile(key))); + }); } protected InputStream createFSInputStream(InputStream inputStream) { @@ -263,7 +271,8 @@ public FSDataOutputStream create(Path f, FsPermission permission, incrementCounter(Statistic.INVOCATION_CREATE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(f); - return createOutputStream(key, replication, overwrite, true); + return TracingUtil.executeInNewSpan("ofs create", + () -> createOutputStream(key, replication, overwrite, true)); } @Override @@ -277,8 +286,10 @@ public FSDataOutputStream createNonRecursive(Path path, incrementCounter(Statistic.INVOCATION_CREATE_NON_RECURSIVE, 1); statistics.incrementWriteOps(1); final String key = pathToKey(path); - return createOutputStream(key, - replication, flags.contains(CreateFlag.OVERWRITE), false); + return TracingUtil.executeInNewSpan("ofs createNonRecursive", + () -> + createOutputStream(key, + replication, flags.contains(CreateFlag.OVERWRITE), false)); } private OutputStream selectOutputStream(String key, short replication, @@ -374,6 +385,14 @@ boolean processKeyPath(List keyPathList) throws IOException { */ @Override public boolean rename(Path src, Path dst) throws IOException { + return TracingUtil.executeInNewSpan("ofs rename", + () -> renameInSpan(src, dst)); + } + + private boolean renameInSpan(Path src, Path dst) throws IOException { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("src", src.toString()) + .setTag("dst", dst.toString()); incrementCounter(Statistic.INVOCATION_RENAME, 1); statistics.incrementWriteOps(1); if (src.equals(dst)) { @@ -526,8 +545,8 @@ protected void rename(final Path src, final Path dst, @Override public Path createSnapshot(Path path, String snapshotName) throws IOException { - String snapshot = getAdapter() - .createSnapshot(pathToKey(path), snapshotName); + String snapshot = TracingUtil.executeInNewSpan("ofs createSnapshot", + () -> getAdapter().createSnapshot(pathToKey(path), snapshotName)); return new Path(OzoneFSUtils.trimPathToDepth(path, PATH_DEPTH_TO_BUCKET), OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } @@ -541,7 +560,8 @@ public void renameSnapshot(Path path, String snapshotOldName, String snapshotNew @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { - adapter.deleteSnapshot(pathToKey(path), snapshotName); + TracingUtil.executeInNewSpan("ofs deleteSnapshot", + () -> adapter.deleteSnapshot(pathToKey(path), snapshotName)); } private class DeleteIterator extends OzoneListingIterator { @@ -672,6 +692,11 @@ private boolean innerDelete(Path f, boolean recursive) throws IOException { */ @Override public boolean delete(Path f, boolean recursive) throws IOException { + return TracingUtil.executeInNewSpan("ofs delete", + () -> deleteInSpan(f, recursive)); + } + + private boolean deleteInSpan(Path f, boolean recursive) throws IOException { incrementCounter(Statistic.INVOCATION_DELETE, 1); statistics.incrementWriteOps(1); LOG.debug("Delete path {} - recursive {}", f, recursive); @@ -889,7 +914,8 @@ private boolean o3Exists(final Path f) throws IOException { @Override public FileStatus[] listStatus(Path f) throws IOException { - return convertFileStatusArr(listStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs listStatus", + () -> convertFileStatusArr(listStatusAdapter(f))); } private FileStatus[] convertFileStatusArr( @@ -946,7 +972,8 @@ public Path getWorkingDirectory() { @Override public Token getDelegationToken(String renewer) throws IOException { - return adapter.getDelegationToken(renewer); + return TracingUtil.executeInNewSpan("ofs getDelegationToken", + () -> adapter.getDelegationToken(renewer)); } /** @@ -1014,7 +1041,8 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { if (isEmpty(key)) { return false; } - return mkdir(f); + return TracingUtil.executeInNewSpan("ofs mkdirs", + () -> mkdir(f)); } @Override @@ -1025,7 +1053,8 @@ public long getDefaultBlockSize() { @Override public FileStatus getFileStatus(Path f) throws IOException { - return convertFileStatus(getFileStatusAdapter(f)); + return TracingUtil.executeInNewSpan("ofs getFileStatus", + () -> convertFileStatus(getFileStatusAdapter(f))); } public FileStatusAdapter getFileStatusAdapter(Path f) throws IOException { @@ -1096,7 +1125,8 @@ public boolean exists(Path f) throws IOException { public FileChecksum getFileChecksum(Path f, long length) throws IOException { incrementCounter(Statistic.INVOCATION_GET_FILE_CHECKSUM); String key = pathToKey(f); - return adapter.getFileChecksum(key, length); + return TracingUtil.executeInNewSpan("ofs getFileChecksum", + () -> adapter.getFileChecksum(key, length)); } @Override @@ -1508,6 +1538,11 @@ FileStatus convertFileStatus(FileStatusAdapter fileStatusAdapter) { @Override public ContentSummary getContentSummary(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs getContentSummary", + () -> getContentSummaryInSpan(f)); + } + + private ContentSummary getContentSummaryInSpan(Path f) throws IOException { FileStatusAdapter status = getFileStatusAdapter(f); if (status.isFile()) { @@ -1583,7 +1618,8 @@ public void setTimes(Path f, long mtime, long atime) throws IOException { if (key.equals("NONE")) { throw new FileNotFoundException("File not found. path /NONE."); } - adapter.setTimes(key, mtime, atime); + TracingUtil.executeInNewSpan("ofs setTimes", + () -> adapter.setTimes(key, mtime, atime)); } protected boolean setSafeModeUtil(SafeModeAction action, @@ -1595,6 +1631,7 @@ protected boolean setSafeModeUtil(SafeModeAction action, statistics.incrementWriteOps(1); } LOG.trace("setSafeMode() action:{}", action); - return getAdapter().setSafeMode(action, isChecked); + return TracingUtil.executeInNewSpan("ofs setSafeMode", + () -> getAdapter().setSafeMode(action, isChecked)); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index b1d7d92e9f2e..4dc70bfa569d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -24,6 +24,9 @@ import java.nio.ByteBuffer; import java.nio.ReadOnlyBufferException; +import io.opentracing.Scope; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.CanUnbuffer; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; @@ -32,6 +35,7 @@ import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.Seekable; +import org.apache.hadoop.hdds.tracing.TracingUtil; /** * The input stream for Ozone file system. @@ -54,25 +58,40 @@ public OzoneFSInputStream(InputStream inputStream, Statistics statistics) { @Override public int read() throws IOException { - int byteRead = inputStream.read(); - if (statistics != null && byteRead >= 0) { - statistics.incrementBytesRead(1); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + int byteRead = inputStream.read(); + if (statistics != null && byteRead >= 0) { + statistics.incrementBytesRead(1); + } + return byteRead; + } finally { + span.finish(); } - return byteRead; } @Override public int read(byte[] b, int off, int len) throws IOException { - int bytesRead = inputStream.read(b, off, len); - if (statistics != null && bytesRead >= 0) { - statistics.incrementBytesRead(bytesRead); + Span span = GlobalTracer.get() + .buildSpan("OzoneFSInputStream.read").start(); + try (Scope scope = GlobalTracer.get().activateSpan(span)) { + span.setTag("offset", off) + .setTag("length", len); + int bytesRead = inputStream.read(b, off, len); + if (statistics != null && bytesRead >= 0) { + statistics.incrementBytesRead(bytesRead); + } + return bytesRead; + } finally { + span.finish(); } - return bytesRead; } @Override public synchronized void close() throws IOException { - inputStream.close(); + TracingUtil.executeInNewSpan("OzoneFSInputStream.close", + inputStream::close); } @Override @@ -103,6 +122,11 @@ public int available() throws IOException { */ @Override public int read(ByteBuffer buf) throws IOException { + return TracingUtil.executeInNewSpan("OzoneFSInputStream.read(ByteBuffer)", + () -> readInTrace(buf)); + } + + private int readInTrace(ByteBuffer buf) throws IOException { if (buf.isReadOnly()) { throw new ReadOnlyBufferException(); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java index 141a40469419..c5f62d6f68ba 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSOutputStream.java @@ -18,7 +18,10 @@ package org.apache.hadoop.fs.ozone; +import io.opentracing.Span; +import io.opentracing.util.GlobalTracer; import org.apache.hadoop.fs.Syncable; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import java.io.IOException; @@ -42,17 +45,24 @@ public OzoneFSOutputStream(OzoneOutputStream outputStream) { @Override public void write(int b) throws IOException { - outputStream.write(b); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> outputStream.write(b)); } @Override public void write(byte[] b, int off, int len) throws IOException { - outputStream.write(b, off, len); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.write", + () -> { + Span span = GlobalTracer.get().activeSpan(); + span.setTag("length", len); + outputStream.write(b, off, len); + }); } @Override public synchronized void flush() throws IOException { - outputStream.flush(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.flush", + outputStream::flush); } @Override @@ -67,7 +77,8 @@ public void hflush() throws IOException { @Override public void hsync() throws IOException { - outputStream.hsync(); + TracingUtil.executeInNewSpan("OzoneFSOutputStream.hsync", + outputStream::hsync); } protected OzoneOutputStream getWrappedOutputStream() { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index cb7c8f16ea12..c06a6b7644e8 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -19,18 +19,20 @@ package org.apache.hadoop.fs.ozone; import com.google.common.base.Strings; +import io.opentracing.util.GlobalTracer; +import org.apache.hadoop.crypto.key.KeyProvider; +import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LeaseRecoverable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.SafeMode; import org.apache.hadoop.fs.SafeModeAction; +import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.LeaseKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; @@ -138,6 +140,11 @@ public boolean hasPathCapability(final Path path, final String capability) @Override public boolean recoverLease(final Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs recoverLease", + () -> recoverLeaseTraced(f)); + } + private boolean recoverLeaseTraced(final Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("recoverLease() path:{}", f); Path qualifiedPath = makeQualified(f); @@ -197,6 +204,12 @@ public boolean recoverLease(final Path f) throws IOException { @Override public boolean isFileClosed(Path f) throws IOException { + return TracingUtil.executeInNewSpan("ofs isFileClosed", + () -> isFileClosedTraced(f)); + } + + private boolean isFileClosedTraced(Path f) throws IOException { + GlobalTracer.get().activeSpan().setTag("path", f.toString()); statistics.incrementWriteOps(1); LOG.trace("isFileClosed() path:{}", f); Path qualifiedPath = makeQualified(f); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 06f175c0dc4e..afc9c8a3239a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -397,10 +397,12 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); - containers.stream() + List emptyMissingFiltered = containers.stream() .filter( - container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString())); - for (UnhealthyContainers c : containers) { + container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString())) + .collect( + Collectors.toList()); + for (UnhealthyContainers c : emptyMissingFiltered) { long containerID = c.getContainerId(); ContainerInfo containerInfo = containerManager.getContainer(ContainerID.valueOf(containerID)); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 34dcba40f81b..266caaa2d8e2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -17,9 +17,11 @@ */ package org.apache.hadoop.ozone.recon.api.handlers; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -163,6 +165,8 @@ public static BucketHandler getBucketHandler( ReconOMMetadataManager omMetadataManager, OzoneStorageContainerManager reconSCM, OmBucketInfo bucketInfo) throws IOException { + // Check if enableFileSystemPaths flag is set to true. + boolean enableFileSystemPaths = isEnableFileSystemPaths(omMetadataManager); // If bucketInfo is null then entity type is UNKNOWN if (Objects.isNull(bucketInfo)) { @@ -172,10 +176,17 @@ public static BucketHandler getBucketHandler( .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { return new FSOBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); - } else if (bucketInfo.getBucketLayout() - .equals(BucketLayout.LEGACY)) { - return new LegacyBucketHandler(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketInfo); + } else if (bucketInfo.getBucketLayout().equals(BucketLayout.LEGACY)) { + // Choose handler based on enableFileSystemPaths flag for legacy layout. + // If enableFileSystemPaths is false, then the legacy bucket is treated + // as an OBS bucket. + if (enableFileSystemPaths) { + return new LegacyBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } else { + return new OBSBucketHandler(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, bucketInfo); + } } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { return new OBSBucketHandler(reconNamespaceSummaryManager, @@ -188,6 +199,22 @@ public static BucketHandler getBucketHandler( } } + /** + * Determines whether FileSystemPaths are enabled for Legacy Buckets + * based on the Ozone configuration. + * + * @param ReconOMMetadataManager Instance + * @return True if FileSystemPaths are enabled, false otherwise. + */ + private static boolean isEnableFileSystemPaths(ReconOMMetadataManager omMetadataManager) { + OzoneConfiguration configuration = omMetadataManager.getOzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } + return configuration.getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT); + } + public static BucketHandler getBucketHandler( ReconNamespaceSummaryManager reconNamespaceSummaryManager, ReconOMMetadataManager omMetadataManager, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java index d12c7b6545ac..4f9e68ddff95 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/EntityHandler.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.ReconConstants; import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; import org.apache.hadoop.ozone.recon.api.types.DUResponse; @@ -60,9 +61,18 @@ public EntityHandler( this.omMetadataManager = omMetadataManager; this.reconSCM = reconSCM; this.bucketHandler = bucketHandler; - normalizedPath = normalizePath(path); - names = parseRequestPath(normalizedPath); + // Defaulting to FILE_SYSTEM_OPTIMIZED if bucketHandler is null + BucketLayout layout = + (bucketHandler != null) ? bucketHandler.getBucketLayout() : + BucketLayout.FILE_SYSTEM_OPTIMIZED; + + // Normalize the path based on the determined layout + normalizedPath = normalizePath(path, layout); + + // Choose the parsing method based on the bucket layout + names = (layout == BucketLayout.OBJECT_STORE) ? + parseObjectStorePath(normalizedPath) : parseRequestPath(normalizedPath); } public abstract NamespaceSummaryResponse getSummaryResponse() @@ -118,7 +128,8 @@ public static EntityHandler getEntityHandler( String path) throws IOException { BucketHandler bucketHandler; - String normalizedPath = normalizePath(path); + String normalizedPath = + normalizePath(path, BucketLayout.FILE_SYSTEM_OPTIMIZED); String[] names = parseRequestPath(normalizedPath); if (path.equals(OM_KEY_PREFIX)) { return EntityType.ROOT.create(reconNamespaceSummaryManager, @@ -156,23 +167,36 @@ public static EntityHandler getEntityHandler( String volName = names[0]; String bucketName = names[1]; - String keyName = BucketHandler.getKeyName(names); - + // Assuming getBucketHandler already validates volume and bucket existence bucketHandler = BucketHandler.getBucketHandler( - reconNamespaceSummaryManager, - omMetadataManager, reconSCM, - volName, bucketName); + reconNamespaceSummaryManager, omMetadataManager, reconSCM, volName, + bucketName); - // check if either volume or bucket doesn't exist - if (bucketHandler == null - || !omMetadataManager.volumeExists(volName) - || !bucketHandler.bucketExists(volName, bucketName)) { + if (bucketHandler == null) { return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, null, path); + omMetadataManager, reconSCM, null, path); + } + + // Directly handle path normalization and parsing based on the layout + if (bucketHandler.getBucketLayout() == BucketLayout.OBJECT_STORE) { + String[] parsedObjectLayoutPath = parseObjectStorePath( + normalizePath(path, bucketHandler.getBucketLayout())); + if (parsedObjectLayoutPath == null) { + return EntityType.UNKNOWN.create(reconNamespaceSummaryManager, + omMetadataManager, reconSCM, null, path); + } + // Use the key part directly from the parsed path + return bucketHandler.determineKeyPath(parsedObjectLayoutPath[2]) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); + } else { + // Use the existing names array for non-OBJECT_STORE layouts to derive + // the keyName + String keyName = BucketHandler.getKeyName(names); + return bucketHandler.determineKeyPath(keyName) + .create(reconNamespaceSummaryManager, omMetadataManager, reconSCM, + bucketHandler, path); } - return bucketHandler.determineKeyPath(keyName) - .create(reconNamespaceSummaryManager, - omMetadataManager, reconSCM, bucketHandler, path); } } @@ -256,7 +280,52 @@ public static String[] parseRequestPath(String path) { return names; } - private static String normalizePath(String path) { + /** + * Splits an object store path into volume, bucket, and key name components. + * + * This method parses a path of the format "/volumeName/bucketName/keyName", + * including paths with additional '/' characters within the key name. It's + * designed for object store paths where the first three '/' characters + * separate the root, volume and bucket names from the key name. + * + * @param path The object store path to parse, starting with a slash. + * @return A String array with three elements: volume name, bucket name, and + * key name, or {null} if the path format is invalid. + */ + public static String[] parseObjectStorePath(String path) { + // Removing the leading slash for correct splitting + path = path.substring(1); + + // Splitting the modified path by "/", limiting to 3 parts + String[] parts = path.split("/", 3); + + // Checking if we correctly obtained 3 parts after removing the leading slash + if (parts.length <= 3) { + return parts; + } else { + return null; + } + } + + /** + * Normalizes a given path based on the specified bucket layout. + * + * This method adjusts the path according to the bucket layout. + * For {OBJECT_STORE Layout}, it normalizes the path up to the bucket level + * using OmUtils.normalizePathUptoBucket. For other layouts, it + * normalizes the entire path, including the key, using + * OmUtils.normalizeKey, and does not preserve any trailing slashes. + * The normalized path will always be prefixed with OM_KEY_PREFIX to ensure it + * is consistent with the expected format for object storage paths in Ozone. + * + * @param path + * @param bucketLayout + * @return A normalized path + */ + private static String normalizePath(String path, BucketLayout bucketLayout) { + if (bucketLayout == BucketLayout.OBJECT_STORE) { + return OM_KEY_PREFIX + OmUtils.normalizePathUptoBucket(path); + } return OM_KEY_PREFIX + OmUtils.normalizeKey(path, false); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java index 2040b7b343d9..1fc114eabd75 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOMMetadataManager.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; @@ -105,4 +106,11 @@ List listBucketsUnderVolume(String volumeName, */ List listBucketsUnderVolume( String volumeName) throws IOException; + + /** + * Return the OzoneConfiguration instance used by Recon. + * @return + */ + OzoneConfiguration getOzoneConfiguration(); + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index ad0526363df0..4b041f6511f6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -291,6 +291,11 @@ public List listBucketsUnderVolume(final String volumeName) Integer.MAX_VALUE); } + @Override + public OzoneConfiguration getOzoneConfiguration() { + return ozoneConfiguration; + } + private List listAllBuckets(final int maxNumberOfBuckets) throws IOException { List result = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 5c3395084464..30fdb7c1292e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -63,7 +63,7 @@ */ public class NSSummaryTask implements ReconOmTask { private static final Logger LOG = - LoggerFactory.getLogger(NSSummaryTask.class); + LoggerFactory.getLogger(NSSummaryTask.class); private final ReconNamespaceSummaryManager reconNamespaceSummaryManager; private final ReconOMMetadataManager reconOMMetadataManager; @@ -173,4 +173,3 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } - diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java index ec1ccd0542fc..4555b976ffed 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithLegacy.java @@ -47,7 +47,7 @@ */ public class NSSummaryTaskWithLegacy extends NSSummaryTaskDbEventHandler { - private static final BucketLayout BUCKET_LAYOUT = BucketLayout.LEGACY; + private static final BucketLayout LEGACY_BUCKET_LAYOUT = BucketLayout.LEGACY; private static final Logger LOG = LoggerFactory.getLogger(NSSummaryTaskWithLegacy.class); @@ -71,16 +71,17 @@ public NSSummaryTaskWithLegacy(ReconNamespaceSummaryManager public boolean processWithLegacy(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); Map nsSummaryMap = new HashMap<>(); + ReconOMMetadataManager metadataManager = getReconOMMetadataManager(); while (eventIterator.hasNext()) { - OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); // we only process updates on OM's KeyTable String table = omdbUpdateEvent.getTable(); - boolean updateOnKeyTable = table.equals(KEY_TABLE); - if (!updateOnKeyTable) { + + if (!table.equals(KEY_TABLE)) { continue; } @@ -90,102 +91,26 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; Object value = keyTableUpdateEvent.getValue(); Object oldValue = keyTableUpdateEvent.getOldValue(); + if (!(value instanceof OmKeyInfo)) { LOG.warn("Unexpected value type {} for key {}. Skipping processing.", value.getClass().getName(), updatedKey); continue; } + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; - // KeyTable entries belong to both Legacy and OBS buckets. - // Check bucket layout and if it's OBS - // continue to the next iteration. - // Check just for the current KeyInfo. - String volumeName = updatedKeyInfo.getVolumeName(); - String bucketName = updatedKeyInfo.getBucketName(); - String bucketDBKey = getReconOMMetadataManager() - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = getReconOMMetadataManager() - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid(metadataManager, updatedKeyInfo)) { continue; } - setKeyParentID(updatedKeyInfo); - - if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - switch (action) { - case PUT: - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldKeyInfo != null) { - // delete first, then put - setKeyParentID(oldKeyInfo); - handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old keyInfo for {}.", - updatedKey); - } - handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + if (enableFileSystemPaths) { + processWithFileSystemLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } else { - OmDirectoryInfo updatedDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(updatedKeyInfo.getKeyName()) - .setObjectID(updatedKeyInfo.getObjectID()) - .setParentObjectID(updatedKeyInfo.getParentObjectID()) - .build(); - - OmDirectoryInfo oldDirectoryInfo = null; - - if (oldKeyInfo != null) { - oldDirectoryInfo = - new OmDirectoryInfo.Builder() - .setName(oldKeyInfo.getKeyName()) - .setObjectID(oldKeyInfo.getObjectID()) - .setParentObjectID(oldKeyInfo.getParentObjectID()) - .build(); - } - - switch (action) { - case PUT: - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case DELETE: - handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - case UPDATE: - if (oldDirectoryInfo != null) { - // delete first, then put - handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); - } else { - LOG.warn("Update event does not have the old dirInfo for {}.", - updatedKey); - } - handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); - break; - - default: - LOG.debug("Skipping DB update event : {}", - omdbUpdateEvent.getAction()); - } + processWithObjectStoreLayout(updatedKeyInfo, oldKeyInfo, action, + nsSummaryMap); } } catch (IOException ioEx) { LOG.error("Unable to process Namespace Summary data in Recon DB. ", @@ -206,12 +131,118 @@ public boolean processWithLegacy(OMUpdateEventBatch events) { return true; } + private void processWithFileSystemLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setKeyParentID(updatedKeyInfo); + + if (!updatedKeyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } else { + OmDirectoryInfo updatedDirectoryInfo = new OmDirectoryInfo.Builder() + .setName(updatedKeyInfo.getKeyName()) + .setObjectID(updatedKeyInfo.getObjectID()) + .setParentObjectID(updatedKeyInfo.getParentObjectID()) + .build(); + + OmDirectoryInfo oldDirectoryInfo = null; + + if (oldKeyInfo != null) { + oldDirectoryInfo = + new OmDirectoryInfo.Builder() + .setName(oldKeyInfo.getKeyName()) + .setObjectID(oldKeyInfo.getObjectID()) + .setParentObjectID(oldKeyInfo.getParentObjectID()) + .build(); + } + + switch (action) { + case PUT: + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldDirectoryInfo != null) { + handleDeleteDirEvent(oldDirectoryInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old dirInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutDirEvent(updatedDirectoryInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Directory: {}", action); + } + } + } + + private void processWithObjectStoreLayout(OmKeyInfo updatedKeyInfo, + OmKeyInfo oldKeyInfo, + OMDBUpdateEvent.OMDBUpdateAction action, + Map nsSummaryMap) + throws IOException { + setParentBucketId(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + case UPDATE: + if (oldKeyInfo != null) { + setParentBucketId(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKeyInfo.getKeyName()); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + + default: + LOG.debug("Skipping DB update event for Key: {}", action); + } + } + public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { Map nsSummaryMap = new HashMap<>(); try { Table keyTable = - omMetadataManager.getKeyTable(BUCKET_LAYOUT); + omMetadataManager.getKeyTable(LEGACY_BUCKET_LAYOUT); try (TableIterator> keyTableIter = keyTable.iterator()) { @@ -223,30 +254,29 @@ public boolean reprocessWithLegacy(OMMetadataManager omMetadataManager) { // KeyTable entries belong to both Legacy and OBS buckets. // Check bucket layout and if it's OBS // continue to the next iteration. - String volumeName = keyInfo.getVolumeName(); - String bucketName = keyInfo.getBucketName(); - String bucketDBKey = omMetadataManager - .getBucketKey(volumeName, bucketName); - // Get bucket info from bucket table - OmBucketInfo omBucketInfo = omMetadataManager - .getBucketTable().getSkipCache(bucketDBKey); - - if (omBucketInfo.getBucketLayout() - .isObjectStore(enableFileSystemPaths)) { + if (!isBucketLayoutValid((ReconOMMetadataManager) omMetadataManager, + keyInfo)) { continue; } - setKeyParentID(keyInfo); - - if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { - OmDirectoryInfo directoryInfo = - new OmDirectoryInfo.Builder() - .setName(keyInfo.getKeyName()) - .setObjectID(keyInfo.getObjectID()) - .setParentObjectID(keyInfo.getParentObjectID()) - .build(); - handlePutDirEvent(directoryInfo, nsSummaryMap); + if (enableFileSystemPaths) { + // The LEGACY bucket is a file system bucket. + setKeyParentID(keyInfo); + + if (keyInfo.getKeyName().endsWith(OM_KEY_PREFIX)) { + OmDirectoryInfo directoryInfo = + new OmDirectoryInfo.Builder() + .setName(keyInfo.getKeyName()) + .setObjectID(keyInfo.getObjectID()) + .setParentObjectID(keyInfo.getParentObjectID()) + .build(); + handlePutDirEvent(directoryInfo, nsSummaryMap); + } else { + handlePutKeyEvent(keyInfo, nsSummaryMap); + } } else { + // The LEGACY bucket is an object store bucket. + setParentBucketId(keyInfo); handlePutKeyEvent(keyInfo, nsSummaryMap); } if (!checkAndCallFlushToDB(nsSummaryMap)) { @@ -290,7 +320,7 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { getReconOMMetadataManager().getOzoneKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), parentKeyName); OmKeyInfo parentKeyInfo = getReconOMMetadataManager() - .getKeyTable(BUCKET_LAYOUT) + .getKeyTable(LEGACY_BUCKET_LAYOUT) .getSkipCache(fullParentKeyName); if (parentKeyInfo != null) { @@ -300,17 +330,53 @@ private void setKeyParentID(OmKeyInfo keyInfo) throws IOException { "NSSummaryTaskWithLegacy is null"); } } else { - String bucketKey = getReconOMMetadataManager() - .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); - OmBucketInfo parentBucketInfo = - getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + setParentBucketId(keyInfo); + } + } - if (parentBucketInfo != null) { - keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); - } else { - throw new IOException("ParentKeyInfo for " + - "NSSummaryTaskWithLegacy is null"); - } + /** + * Set the parent object ID for a bucket. + *@paramkeyInfo + *@throwsIOException + */ + private void setParentBucketId(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithLegacy is null"); } } + + /** + * Check if the bucket layout is LEGACY. + * @param metadataManager + * @param keyInfo + * @return + */ + private boolean isBucketLayoutValid(ReconOMMetadataManager metadataManager, + OmKeyInfo keyInfo) + throws IOException { + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = metadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + metadataManager.getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != LEGACY_BUCKET_LAYOUT) { + LOG.debug( + "Skipping processing for bucket {} as bucket layout is not LEGACY", + bucketName); + return false; + } + + return true; + } + } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 3d1528fccb3e..41987c00ef35 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -16,7 +16,7 @@ "ag-charts-community": "^7.3.0", "ag-charts-react": "^7.3.0", "antd": "^3.26.20", - "axios": "^0.27.2", + "axios": "^0.28.0", "babel-jest": "^24.9.0", "babel-plugin-import": "^1.13.8", "classnames": "^2.3.2", @@ -25,7 +25,7 @@ "less": "^3.13.1", "less-loader": "^5.0.0", "moment": "^2.29.4", - "plotly.js": "^1.58.5", + "plotly.js": "^2.25.2", "pretty-ms": "^5.1.0", "react": "^16.8.6", "react-app-rewired": "^2.2.1", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 1dab3d583519..957a0ed5d152 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -17,7 +17,7 @@ specifiers: ag-charts-community: ^7.3.0 ag-charts-react: ^7.3.0 antd: ^3.26.20 - axios: ^0.27.2 + axios: ^0.28.0 babel-jest: ^24.9.0 babel-plugin-import: ^1.13.8 classnames: ^2.3.2 @@ -35,7 +35,7 @@ specifiers: less-loader: ^5.0.0 moment: ^2.29.4 npm-run-all: ^4.1.5 - plotly.js: ^1.58.5 + plotly.js: ^2.25.2 pretty-ms: ^5.1.0 react: ^16.8.6 react-app-rewired: ^2.2.1 @@ -61,7 +61,7 @@ dependencies: ag-charts-community: 7.3.0 ag-charts-react: 7.3.0_4uflhkpzmxcxyxkuqg2ofty3gq antd: 3.26.20_wcqkhtmu7mswc6yz4uyexck3ty - axios: 0.27.2 + axios: 0.28.0 babel-jest: 24.9.0_@babel+core@7.22.11 babel-plugin-import: 1.13.8 classnames: 2.3.2 @@ -70,12 +70,12 @@ dependencies: less: 3.13.1 less-loader: 5.0.0_less@3.13.1 moment: 2.29.4 - plotly.js: 1.58.5 + plotly.js: 2.25.2 pretty-ms: 5.1.0 react: 16.14.0 react-app-rewired: 2.2.1_react-scripts@3.4.4 react-dom: 16.14.0_react@16.14.0 - react-plotly.js: 2.6.0_f6dluzp62qf57yw3gl4ocsg3e4 + react-plotly.js: 2.6.0_qtjenpcawcnnxnr626ndcvhi4u react-router: 5.3.4_react@16.14.0 react-router-dom: 5.3.4_react@16.14.0 react-scripts: 3.4.4_bo7u2dcgnntwwyyxmecoaqdaee @@ -100,14 +100,6 @@ devDependencies: packages: - /3d-view/2.0.1: - resolution: {integrity: sha512-YSLRHXNpSziaaiK2R0pI5+JKguoJVbtWmIv9YyBFtl0+q42kQwJB/JUulbFR/1zYFm58ifjKQ6kVdgZ6tyKtCA==} - dependencies: - matrix-camera-controller: 2.1.4 - orbit-camera-controller: 4.0.0 - turntable-camera-controller: 3.0.1 - dev: false - /@ampproject/remapping/2.2.1: resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} engines: {node: '>=6.0.0'} @@ -239,7 +231,7 @@ packages: gensync: 1.0.0-beta.2 json5: 2.2.3 lodash: 4.17.21 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 source-map: 0.5.7 transitivePeerDependencies: @@ -351,7 +343,7 @@ packages: '@babel/helper-plugin-utils': 7.22.5 debug: 4.3.4 lodash.debounce: 4.0.8 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -1842,7 +1834,7 @@ packages: '@babel/core': 7.9.0 '@babel/helper-module-imports': 7.22.5 '@babel/helper-plugin-utils': 7.22.5 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 dev: false @@ -2712,6 +2704,10 @@ packages: d3-shape: 1.3.7 dev: false + /@plotly/d3/3.8.1: + resolution: {integrity: sha512-x49ThEu1FRA00kTso4Jdfyf2byaCPLBGmLjAYQz5OzaPyLUhHesX3/Nfv2OHEhynhdy2UB39DLXq6thYe2L2kg==} + dev: false + /@plotly/point-cluster/3.1.9: resolution: {integrity: sha512-MwaI6g9scKf68Orpr1pHZ597pYx9uP8UEFXLPbsCmuw3a84obwz6pnMXGc90VhgDNeNiLEdlmuK7CPo+5PIxXw==} dependencies: @@ -2727,6 +2723,10 @@ packages: pick-by-alias: 1.2.0 dev: false + /@plotly/regl/2.1.2: + resolution: {integrity: sha512-Mdk+vUACbQvjd0m/1JJjOOafmkp/EpmHjISsopEz5Av44CBq7rPC05HHNbYGKVyNUF2zmEoBS/TT0pd0SPFFyw==} + dev: false + /@sinclair/typebox/0.27.8: resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} dev: false @@ -3290,14 +3290,6 @@ packages: resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} dev: false - /a-big-triangle/1.0.3: - resolution: {integrity: sha512-AboEtoSPueZisde3Vr+7VRSfUIWBSGZUOtW3bJrOZXgIyK7dNNDdpDmOKJjg5GmJLlRKUONWV8lMgTK8MBhQWw==} - dependencies: - gl-buffer: 2.1.2 - gl-vao: 1.3.0 - weak-map: 1.0.8 - dev: false - /abab/2.0.6: resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} dev: false @@ -3361,12 +3353,6 @@ packages: object-assign: 4.1.1 dev: false - /add-line-numbers/1.0.1: - resolution: {integrity: sha512-w+2a1malCvWwACQFBpZ5/uwmHGaGYT+aGIxA8ONF5vlhe6X/gD3eR8qVoLWa+5nnWAOq2LuPbrqDYqj1pn0WMg==} - dependencies: - pad-left: 1.0.2 - dev: false - /address/1.1.2: resolution: {integrity: sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==} engines: {node: '>= 0.12.0'} @@ -3380,12 +3366,6 @@ packages: regex-parser: 2.2.11 dev: false - /affine-hull/1.0.0: - resolution: {integrity: sha512-3QNG6+vFAwJvSZHsJYDJ/mt1Cxx9n5ffA+1Ohmj7udw0JuRgUVIXK0P9N9pCMuEdS3jCNt8GFX5q2fChq+GO3Q==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /ag-charts-community/7.3.0: resolution: {integrity: sha512-118U6YsCMia6iZHaN06zT19rr2SYa92WB73pMVCKQlp2H3c19uKQ6Y6DfKG/nIfNUzFXZLHBwKIdZXsMWJdZww==} dev: false @@ -3439,20 +3419,6 @@ packages: resolution: {integrity: sha512-0V/PkoculFl5+0Lp47JoxUcO0xSxhIBvm+BxHdD/OgXNmdRpRHCFnKVuUoWyS9EzQP+otSGv0m9Lb4yVkQBn2A==} dev: false - /alpha-complex/1.0.0: - resolution: {integrity: sha512-rhsjKfc9tMF5QZc0NhKz/zFzMu2rvHxCP/PyJtEmMkV7M848YjIoQGDlNGp+vTqxXjA8wAY2OxgR1K54C2Awkg==} - dependencies: - circumradius: 1.0.0 - delaunay-triangulate: 1.1.6 - dev: false - - /alpha-shape/1.0.0: - resolution: {integrity: sha512-/V+fmmjtSA2yfQNq8iEqBxnPbjcOMXpM9Ny+yE/O7aLR7Q1oPzUc9bHH0fPHS3hUugUL/dHzTis6l3JirYOS/w==} - dependencies: - alpha-complex: 1.0.0 - simplicial-complex-boundary: 1.0.1 - dev: false - /alphanum-sort/1.0.2: resolution: {integrity: sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==} dev: false @@ -3792,6 +3758,13 @@ packages: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} dev: false + /asn1.js/4.10.1: + resolution: {integrity: sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==} + dependencies: + bn.js: 4.12.0 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + /asn1.js/5.4.1: resolution: {integrity: sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==} dependencies: @@ -3859,10 +3832,6 @@ packages: engines: {node: '>= 4.0.0'} dev: true - /atob-lite/1.0.0: - resolution: {integrity: sha512-ArXcmHR/vwSN37HLVap/Y5SKpz12CuEybxe1sIYl7th/S6SQPrVMNFt6rblJzCOAxn0SHbXpknUtqbAIeo3Aow==} - dev: false - /atob/2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} engines: {node: '>= 4.5.0'} @@ -3891,11 +3860,12 @@ packages: /aws4/1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} - /axios/0.27.2: - resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} + /axios/0.28.0: + resolution: {integrity: sha512-Tu7NYoGY4Yoc7I+Npf9HhUMtEEpV7ZiLH9yndTCoNhcpBH0kwcvFbzYN9/u5QKI5A6uefjsNNWaz5olJVYS62Q==} dependencies: follow-redirects: 1.15.6 form-data: 4.0.0 + proxy-from-env: 1.1.0 transitivePeerDependencies: - debug dev: false @@ -3925,7 +3895,7 @@ packages: '@babel/types': 7.22.11 eslint: 6.8.0 eslint-visitor-keys: 1.3.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -4034,7 +4004,7 @@ packages: dependencies: '@babel/runtime': 7.9.0 cosmiconfig: 6.0.0 - resolve: 1.15.0 + resolve: 1.22.4 dev: false /babel-plugin-named-asset-import/0.3.8_@babel+core@7.9.0: @@ -4159,12 +4129,6 @@ packages: /balanced-match/1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - /barycentric/1.0.1: - resolution: {integrity: sha512-47BuWXsenBbox4q1zqJrUoxq1oM1ysrYc5mdBACAwaP+CL+tcNauC3ybA0lzbIWzJCLZYMqebAx46EauTI2Nrg==} - dependencies: - robust-linear-solve: 1.0.0 - dev: false - /base/0.11.2: resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} engines: {node: '>=0.10.0'} @@ -4196,14 +4160,6 @@ packages: dependencies: tweetnacl: 0.14.5 - /big-rat/1.0.4: - resolution: {integrity: sha512-AubEohDDrak6urvKkFMIlwPWyQbJ/eq04YsK/SNipH7NNiPCYchjQNvWYK5vyyMmtGXAmNmsAjIcfkaDuTtd8g==} - dependencies: - bit-twiddle: 1.0.2 - bn.js: 4.12.0 - double-bits: 1.1.1 - dev: false - /big.js/5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} dev: false @@ -4230,10 +4186,6 @@ packages: dev: false optional: true - /bit-twiddle/0.0.2: - resolution: {integrity: sha512-76iFAOrkcuw5UPA30Pt32XaytMHXz/04JembgIwsQAp7ImHYSWNq1shBbrlWf6CUvh1+amQ81LI8hNhqQgsBEw==} - dev: false - /bit-twiddle/1.0.2: resolution: {integrity: sha512-B9UhK0DKFZhoTFcfvAzhqsjStvGJp9vYWf3+6SNTtdSQnvIgfkHbgHrg/e4+TH71N2GDu8tpmCVoyfrL1d7ntA==} dev: false @@ -4259,8 +4211,8 @@ packages: /bn.js/5.2.1: resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} - /body-parser/1.20.1: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} + /body-parser/1.20.2: + resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -4272,40 +4224,20 @@ packages: iconv-lite: 0.4.24 on-finished: 2.4.1 qs: 6.11.0 - raw-body: 2.5.1 + raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 transitivePeerDependencies: - supports-color dev: true - /body-parser/1.20.1_supports-color@6.1.0: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9_supports-color@6.1.0 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.11.0 - raw-body: 2.5.1 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - dev: false - - /body-parser/1.20.2: + /body-parser/1.20.2_supports-color@6.1.0: resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 content-type: 1.0.5 - debug: 2.6.9 + debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 destroy: 1.2.0 http-errors: 2.0.0 @@ -4317,7 +4249,7 @@ packages: unpipe: 1.0.0 transitivePeerDependencies: - supports-color - dev: true + dev: false /bonjour/3.5.0: resolution: {integrity: sha512-RaVTblr+OnEli0r/ud8InrU7D+G0y6aJhlxaLa6Pwty4+xoxboF1BsUI45tujvRpbj9dQVoglChqonGAsjEBYg==} @@ -4334,17 +4266,6 @@ packages: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} dev: false - /boundary-cells/2.0.2: - resolution: {integrity: sha512-/S48oUFYEgZMNvdqC87iYRbLBAPHYijPRNrNpm/sS8u7ijIViKm/hrV3YD4sx/W68AsG5zLMyBEditVHApHU5w==} - dev: false - - /box-intersect/1.0.2: - resolution: {integrity: sha512-yJeMwlmFPG1gIa7Rs/cGXeI6iOj6Qz5MG5PE61xLKpElUGzmJ4abm+qsLpzxKJFpsSDq742BQEocr8dI2t8Nxw==} - dependencies: - bit-twiddle: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /boxen/3.2.0: resolution: {integrity: sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==} engines: {node: '>=6'} @@ -4464,17 +4385,19 @@ packages: bn.js: 5.2.1 randombytes: 2.1.0 - /browserify-sign/4.2.1: - resolution: {integrity: sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==} + /browserify-sign/4.2.3: + resolution: {integrity: sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==} + engines: {node: '>= 0.12'} dependencies: bn.js: 5.2.1 browserify-rsa: 4.1.0 create-hash: 1.2.0 create-hmac: 1.1.7 - elliptic: 6.5.4 + elliptic: 6.5.5 + hash-base: 3.0.4 inherits: 2.0.4 - parse-asn1: 5.1.6 - readable-stream: 3.6.2 + parse-asn1: 5.1.7 + readable-stream: 2.3.8 safe-buffer: 5.2.1 /browserify-zlib/0.2.0: @@ -4707,18 +4630,6 @@ packages: /caseless/0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} - /cdt2d/1.0.0: - resolution: {integrity: sha512-pFKb7gVhpsI6onS5HUXRoqbBIJB4CJ+KPk8kgaIVcm0zFgOxIyBT5vzifZ4j1aoGVJS0U1A+S4oFDshuLAitlA==} - dependencies: - binary-search-bounds: 2.0.5 - robust-in-sphere: 1.2.1 - robust-orientation: 1.2.1 - dev: false - - /cell-orientation/1.0.1: - resolution: {integrity: sha512-DtEsrgP+donmPxpEZm7hK8zCPYDXAQ977ecJiE7G0gbTfnS6TZVBlief3IdRP/TZS1PVnJRGJTDdjSdV8mRDug==} - dev: false - /chalk/1.1.3: resolution: {integrity: sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==} engines: {node: '>=0.10.0'} @@ -4832,19 +4743,6 @@ packages: inherits: 2.0.4 safe-buffer: 5.2.1 - /circumcenter/1.0.0: - resolution: {integrity: sha512-YRw0mvttcISviaOtSmaHb2G3ZVbkxzYPQeAEd57/CFFtmOkwfRTw9XuxYZ7PCi2BYa0NajjHV6bq4nbY1VCC8g==} - dependencies: - dup: 1.0.0 - robust-linear-solve: 1.0.0 - dev: false - - /circumradius/1.0.0: - resolution: {integrity: sha512-5ltoQvWQzJiZjCVX9PBKgKt+nsuzOLKayqXMNllfRSqIp2L5jFpdanv1V6j27Ue7ACxlzmamlR+jnLy+NTTVTw==} - dependencies: - circumcenter: 1.0.0 - dev: false - /clamp/1.0.1: resolution: {integrity: sha512-kgMuFyE78OC6Dyu3Dy7vcx4uy97EIbVxJB/B0eJ3bUNAkwdNcxYzgKltnyADiYwsR7SEqkkUPsEUT//OVS6XMA==} dev: false @@ -4873,18 +4771,6 @@ packages: source-map: 0.6.1 dev: false - /clean-pslg/1.1.2: - resolution: {integrity: sha512-bJnEUR6gRiiNi2n4WSC6yrc0Hhn/oQDOTzs6evZfPwEF/VKVXM6xu0F4n/WSBz7TjTt/ZK6I5snRM9gVKMVAxA==} - dependencies: - big-rat: 1.0.4 - box-intersect: 1.0.2 - nextafter: 1.0.0 - rat-vec: 1.1.1 - robust-segment-intersect: 1.0.1 - union-find: 1.0.2 - uniq: 1.0.1 - dev: false - /clean-regexp/1.0.0: resolution: {integrity: sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw==} engines: {node: '>=4'} @@ -5049,12 +4935,6 @@ packages: color-string: 1.9.1 dev: false - /colormap/2.3.2: - resolution: {integrity: sha512-jDOjaoEEmA9AgA11B/jCSAvYE95r3wRoAyTf3LEHGiUVlNHJaL1mRkf5AyLSpQBVGfTEPwGEqCIzL+kgr2WgNA==} - dependencies: - lerp: 1.0.3 - dev: false - /combined-stream/1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -5078,27 +4958,6 @@ packages: /commondir/1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} - /compare-angle/1.0.1: - resolution: {integrity: sha512-adM1/bpLFQFquh0/Qr5aiOPuztoga/lCf2Z45s+Oydgzf18F3wBSkdHmcHMeig0bD+dDKlz52u1rLOAOqiyE5A==} - dependencies: - robust-orientation: 1.2.1 - robust-product: 1.0.0 - robust-sum: 1.0.0 - signum: 0.0.0 - two-sum: 1.0.0 - dev: false - - /compare-cell/1.0.0: - resolution: {integrity: sha512-uNIkjiNLZLhdCgouF39J+W04R7oP1vwrNME4vP2b2/bAa6PHOj+h8yXu52uPjPTKs5RatvqNsDVwEN7Yp19vNA==} - dev: false - - /compare-oriented-cell/1.0.1: - resolution: {integrity: sha512-9D7R2MQfsGGRskZAZF0TkJHt9eFNbFkZyVdVps+WUYxtRHgG77BLbieKgSkj7iEAb9PNDSU9QNa9MtigjQ3ktQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - dev: false - /component-classes/1.2.6: resolution: {integrity: sha512-hPFGULxdwugu1QWW3SvVOCUHLzO34+a2J6Wqy0c5ASQkfi9/8nZcBB0ZohaEbXOQlCflMAEMmEWk7u7BVs4koA==} dependencies: @@ -5243,19 +5102,11 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convex-hull/1.0.3: - resolution: {integrity: sha512-24rZAoh81t41GHPLAxcsokgjH9XNoVqU2OiSi8iMHUn6HUURfiefcEWAPt1AfwZjBBWTKadOm1xUcUMnfFukhQ==} - dependencies: - affine-hull: 1.0.0 - incremental-convex-hull: 1.0.1 - monotone-convex-hull-2d: 1.0.1 - dev: false - /cookie-signature/1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} - /cookie/0.5.0: - resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + /cookie/0.6.0: + resolution: {integrity: sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==} engines: {node: '>= 0.6'} /copy-anything/2.0.6: @@ -5421,7 +5272,7 @@ packages: resolution: {integrity: sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==} dependencies: browserify-cipher: 1.0.1 - browserify-sign: 4.2.1 + browserify-sign: 4.2.3 create-ecdh: 4.0.4 create-hash: 1.2.0 create-hmac: 1.1.7 @@ -5715,10 +5566,6 @@ packages: resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} dev: false - /cubic-hermite/1.0.0: - resolution: {integrity: sha512-DKZ6yLcJiJJgl54mGA4n0uueYB4qdPfOJrQ1HSEZqdKp6D25AAAWVDwpoAxLflOku5a/ALBO77oEIyWcVa+UYg==} - dev: false - /currently-unhandled/0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -5732,12 +5579,6 @@ packages: lodash.flow: 3.5.0 dev: false - /cwise-compiler/1.1.3: - resolution: {integrity: sha512-WXlK/m+Di8DMMcCjcWr4i+XzcQra9eCdXIJrgh4TUgh0pIS/yJduLxS9JgefsHJ/YVLdgPtXm9r62W92MvanEQ==} - dependencies: - uniq: 1.0.1 - dev: false - /cyclist/1.0.2: resolution: {integrity: sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==} dev: false @@ -5745,7 +5586,7 @@ packages: /d/1.0.1: resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} dependencies: - es5-ext: 0.10.62 + es5-ext: 0.10.64 type: 1.2.0 dev: false @@ -5774,12 +5615,33 @@ packages: d3-timer: 1.0.10 dev: false + /d3-format/1.4.5: + resolution: {integrity: sha512-J0piedu6Z8iB6TbIGfZgDzfXxUFN3qQRMofy2oPdXzQibYGqPB/9iMcxr/TGalU+2RsyDO+U4f33id8tbnSRMQ==} + dev: false + + /d3-geo-projection/2.9.0: + resolution: {integrity: sha512-ZULvK/zBn87of5rWAfFMc9mJOipeSo57O+BBitsKIXmU4rTVAnX1kSsJkE0R+TxY8pGNoM1nbyRRE7GYHhdOEQ==} + hasBin: true + dependencies: + commander: 2.20.3 + d3-array: 1.2.4 + d3-geo: 1.12.1 + resolve: 1.22.4 + dev: false + + /d3-geo/1.12.1: + resolution: {integrity: sha512-XG4d1c/UJSEX9NfU02KwBL6BYPj8YKHxgBEw5om2ZnTRSbIcego6dhHwcxuSR3clxh0EpE38os1DVPOmnYtTPg==} + dependencies: + d3-array: 1.2.4 + dev: false + /d3-hierarchy/1.1.9: resolution: {integrity: sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==} dev: false - /d3-interpolate/1.4.0: - resolution: {integrity: sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==} + /d3-interpolate/3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} dependencies: d3-color: 1.4.1 dev: false @@ -5812,10 +5674,6 @@ packages: resolution: {integrity: sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw==} dev: false - /d3/3.5.17: - resolution: {integrity: sha512-yFk/2idb8OHPKkbAL8QaOaqENNoMhIaSHZerk3oQsECwkObkCpJyjYwCe+OHiq6UEdhe1m8ZGARRRO3ljFjlKg==} - dev: false - /damerau-levenshtein/1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} dev: false @@ -6017,13 +5875,6 @@ packages: rimraf: 2.7.1 dev: false - /delaunay-triangulate/1.1.6: - resolution: {integrity: sha512-mhAclqFCgLoiBIDQDIz2K+puZq6OhYxunXrG2wtTcZS+S1xuzl+H3h0MIOajpES+Z+jfY/rz0wVt3o5iipt1wg==} - dependencies: - incremental-convex-hull: 1.0.1 - uniq: 1.0.1 - dev: false - /delayed-stream/1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} @@ -6110,7 +5961,7 @@ packages: /dns-packet/1.3.4: resolution: {integrity: sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==} dependencies: - ip: 1.1.8 + ip: 1.1.9 safe-buffer: 5.2.1 dev: false @@ -6255,10 +6106,6 @@ packages: engines: {node: '>=8'} dev: false - /double-bits/1.1.1: - resolution: {integrity: sha512-BCLEIBq0O/DWoA7BsCu/R+RP0ZXiowP8BhtJT3qeuuQEBpnS8LK/Wo6UTJQv6v8mK1fj8n90YziHLwGdM5whSg==} - dev: false - /draft-js/0.10.5_wcqkhtmu7mswc6yz4uyexck3ty: resolution: {integrity: sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg==} peerDependencies: @@ -6315,12 +6162,6 @@ packages: jsbn: 0.1.1 safer-buffer: 2.1.2 - /edges-to-adjacency-list/1.0.0: - resolution: {integrity: sha512-0n0Z+xTLfg96eYXm91PEY4rO4WGxohLWjJ9qD1RI3fzxKU6GHez+6KPajpobR4zeZxp7rSiHjHG5dZPj8Kj58Q==} - dependencies: - uniq: 1.0.1 - dev: false - /ee-first/1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} @@ -6349,6 +6190,17 @@ packages: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + /elliptic/6.5.5: + resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + hash.js: 1.1.7 + hmac-drbg: 1.0.1 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + /emoji-regex/7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} @@ -6526,13 +6378,14 @@ packages: is-date-object: 1.0.5 is-symbol: 1.0.4 - /es5-ext/0.10.62: - resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + /es5-ext/0.10.64: + resolution: {integrity: sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==} engines: {node: '>=0.10'} requiresBuild: true dependencies: es6-iterator: 2.0.3 es6-symbol: 3.1.3 + esniff: 2.0.1 next-tick: 1.1.0 dev: false @@ -6540,14 +6393,10 @@ packages: resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-symbol: 3.1.3 dev: false - /es6-promise/4.2.8: - resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} - dev: false - /es6-symbol/3.1.3: resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} dependencies: @@ -6559,7 +6408,7 @@ packages: resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-iterator: 2.0.3 es6-symbol: 3.1.3 dev: false @@ -6872,7 +6721,7 @@ packages: minimatch: 3.1.2 object.values: 1.1.6 read-pkg-up: 2.0.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -7181,6 +7030,16 @@ packages: transitivePeerDependencies: - supports-color + /esniff/2.0.1: + resolution: {integrity: sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==} + engines: {node: '>=0.10'} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + event-emitter: 0.3.5 + type: 2.7.2 + dev: false + /espree/6.2.1: resolution: {integrity: sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==} engines: {node: '>=6.0.0'} @@ -7235,6 +7094,13 @@ packages: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} + /event-emitter/0.3.5: + resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + dev: false + /eventemitter3/4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} dev: false @@ -7345,16 +7211,16 @@ packages: - supports-color dev: true - /express/4.18.2: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1 + body-parser: 1.20.2 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 @@ -7384,16 +7250,16 @@ packages: - supports-color dev: true - /express/4.18.2_supports-color@6.1.0: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2_supports-color@6.1.0: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1_supports-color@6.1.0 + body-parser: 1.20.2_supports-color@6.1.0 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 @@ -7484,10 +7350,6 @@ packages: - supports-color dev: false - /extract-frustum-planes/1.0.0: - resolution: {integrity: sha512-GivvxEMgjSNnB3e1mIMBlB5ogPB6XyEjOQRGG0SfYVVLtu1ntLGHLT1ly8+mE819dKBHBwnm9+UBCScjiMgppA==} - dev: false - /extsprintf/1.3.0: resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} engines: {'0': node >=0.6.0} @@ -7622,13 +7484,6 @@ packages: dependencies: to-regex-range: 5.0.1 - /filtered-vector/1.2.5: - resolution: {integrity: sha512-5Vu6wdtQJ1O2nRmz39dIr9m3hEDq1skYby5k1cJQdNWK4dMgvYcUEiA/9j7NcKfNZ5LGxn8w2LSLiigyH7pTAw==} - dependencies: - binary-search-bounds: 2.0.5 - cubic-hermite: 1.0.0 - dev: false - /finalhandler/1.2.0: resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} engines: {node: '>= 0.8'} @@ -7971,10 +7826,6 @@ packages: /functions-have-names/1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} - /gamma/0.1.0: - resolution: {integrity: sha512-IgHc/jnzNTA2KjXmRSx/CVd1ONp7HTAV81SLI+n3G6PyyHkakkE+2d3hteJYFm7aoe01NEl4m7ziUAsoWCc5AA==} - dev: false - /gensync/1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -8058,120 +7909,6 @@ packages: dependencies: assert-plus: 1.0.0 - /gl-axes3d/1.5.3: - resolution: {integrity: sha512-KRYbguKQcDQ6PcB9g1pgqB8Ly4TY1DQODpPKiDTasyWJ8PxQk0t2Q7XoQQijNqvsguITCpVVCzNb5GVtIWiVlQ==} - dependencies: - bit-twiddle: 1.0.2 - dup: 1.0.0 - extract-frustum-planes: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-state: 1.0.0 - gl-vao: 1.3.0 - gl-vec4: 1.0.1 - glslify: 7.1.1 - robust-orientation: 1.2.1 - split-polygon: 1.0.0 - vectorize-text: 3.2.2 - dev: false - - /gl-buffer/2.1.2: - resolution: {integrity: sha512-uVvLxxhEbQGl43xtDeKu75ApnrGyNHoPmOcvvuJNyP04HkK0/sX5Dll6OFffQiwSV4j0nlAZsgznvO3CPT3dFg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-cone3d/1.5.2: - resolution: {integrity: sha512-1JNeHH4sUtUmDA4ZK7Om8/kShwb8IZVAsnxaaB7IPRJsNGciLj1sTpODrJGeMl41RNkex5kXD2SQFrzyEAR2Rw==} - dependencies: - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - gl-vec3: 1.1.3 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-constants/1.0.0: - resolution: {integrity: sha512-3DNyoAUdb1c+o7jNk5Nm7eh6RSQFi9ZmMQIQb2xxsO27rUopE+IUhoh4xlUvZYBn1YPgUC8BlCnrVjXq/d2dQA==} - dev: false - - /gl-contour2d/1.1.7: - resolution: {integrity: sha512-GdebvJ9DtT3pJDpoE+eU2q+Wo9S3MijPpPz5arZbhK85w2bARmpFpVfPaDlZqWkB644W3BlH8TVyvAo1KE4Bhw==} - dependencies: - binary-search-bounds: 2.0.5 - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - ndarray: 1.0.19 - surface-nets: 1.0.2 - dev: false - - /gl-error3d/1.0.16: - resolution: {integrity: sha512-TGJewnKSp7ZnqGgG3XCF9ldrDbxZrO+OWlx6oIet4OdOM//n8xJ5isArnIV/sdPJnFbhfoLxWrW9f5fxHFRQ1A==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - dev: false - - /gl-fbo/2.0.5: - resolution: {integrity: sha512-tDq6zQSQzvvK2QwPV7ln7cf3rs0jV1rQXqKOEuB145LdN+xhADPBtXHDJ3Ftk80RAJimJU0AaQBgP/X6yYGNhQ==} - dependencies: - gl-texture2d: 2.1.0 - dev: false - - /gl-format-compiler-error/1.0.3: - resolution: {integrity: sha512-FtQaBYlsM/rnz7YhLkxG9dLcNDB+ExErIsFV2DXl0nk+YgIZ2i0jMob4BrhT9dNa179zFb0gZMWpNAokytK+Ug==} - dependencies: - add-line-numbers: 1.0.1 - gl-constants: 1.0.0 - glsl-shader-name: 1.0.0 - sprintf-js: 1.1.2 - dev: false - - /gl-heatmap2d/1.1.1: - resolution: {integrity: sha512-6Vo1fPIB1vQFWBA/MR6JAA16XuQuhwvZRbSjYEq++m4QV33iqjGS2HcVIRfJGX+fomd5eiz6bwkVZcKm69zQPw==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - - /gl-line3d/1.2.1: - resolution: {integrity: sha512-eeb0+RI2ZBRqMYJK85SgsRiJK7c4aiOjcnirxv0830A3jmOc99snY3AbPcV8KvKmW0Yaf3KA4e+qNCbHiTOTnA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-mat3/1.0.0: - resolution: {integrity: sha512-obeEq9y7xaDoVkwMGJNL1upwpYlPJiXJFhREaNytMqUdfHKHNna9HvImmLV8F8Ys6QOYwPPddptZNoiiec/XOg==} - dev: false - /gl-mat4/1.2.0: resolution: {integrity: sha512-sT5C0pwB1/e9G9AvAoLsoaJtbMGjfd/jfxo8jMCKqYYEnjZuFvqV5rehqar0538EmssjdDeiEWnKyBSTw7quoA==} dev: false @@ -8180,169 +7917,6 @@ packages: resolution: {integrity: sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA==} dev: false - /gl-mesh3d/2.3.1: - resolution: {integrity: sha512-pXECamyGgu4/9HeAQSE5OEUuLBGS1aq9V4BCsTcxsND4fNLaajEkYKUz/WY2QSYElqKdsMBVsldGiKRKwlybqA==} - dependencies: - barycentric: 1.0.1 - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - normals: 1.1.0 - polytope-closest-point: 1.0.0 - simplicial-complex-contour: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-plot2d/1.4.5: - resolution: {integrity: sha512-6GmCN10SWtV+qHFQ1gjdnVubeHFVsm6P4zmo0HrPIl9TcdePCUHDlBKWAuE6XtFhiMKMj7R8rApOX8O8uXUYog==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - glsl-inverse: 1.0.0 - glslify: 7.1.1 - text-cache: 4.2.2 - dev: false - - /gl-plot3d/2.4.7: - resolution: {integrity: sha512-mLDVWrl4Dj0O0druWyHUK5l7cBQrRIJRn2oROEgrRuOgbbrLAzsREKefwMO0bA0YqkiZMFMnV5VvPA9j57X5Xg==} - dependencies: - 3d-view: 2.0.1 - a-big-triangle: 1.0.3 - gl-axes3d: 1.5.3 - gl-fbo: 2.0.5 - gl-mat4: 1.2.0 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - gl-spikes3d: 1.0.10 - glslify: 7.1.1 - has-passive-events: 1.0.0 - is-mobile: 2.2.2 - mouse-change: 1.4.0 - mouse-event-offset: 3.0.2 - mouse-wheel: 1.2.0 - ndarray: 1.0.19 - right-now: 1.0.0 - dev: false - - /gl-pointcloud2d/1.0.3: - resolution: {integrity: sha512-OS2e1irvJXVRpg/GziXj10xrFJm9kkRfFoB6BLUvkjCQV7ZRNNcs2CD+YSK1r0gvMwTg2T3lfLM3UPwNtz+4Xw==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - typedarray-pool: 1.2.0 - dev: false - - /gl-quat/1.0.0: - resolution: {integrity: sha512-Pv9yvjJgQN85EbE79S+DF50ujxDkyjfYHIyXJcCRiimU1UxMY7vEHbVkj0IWLFaDndhfZT9vVOyfdMobLlrJsQ==} - dependencies: - gl-mat3: 1.0.0 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - dev: false - - /gl-scatter3d/1.2.3: - resolution: {integrity: sha512-nXqPlT1w5Qt51dTksj+DUqrZqwWAEWg0PocsKcoDnVNv0X8sGA+LBZ0Y+zrA+KNXUL0PPCX9WR9cF2uJAZl1Sw==} - dependencies: - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - is-string-blank: 1.0.1 - typedarray-pool: 1.2.0 - vectorize-text: 3.2.2 - dev: false - - /gl-select-box/1.0.4: - resolution: {integrity: sha512-mKsCnglraSKyBbQiGq0Ila0WF+m6Tr+EWT2yfaMn/Sh9aMHq5Wt0F/l6Cf/Ed3CdERq5jHWAY5yxLviZteYu2w==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - dev: false - - /gl-select-static/2.0.7: - resolution: {integrity: sha512-OvpYprd+ngl3liEatBTdXhSyNBjwvjMSvV2rN0KHpTU+BTi4viEETXNZXFgGXY37qARs0L28ybk3UQEW6C5Nnw==} - dependencies: - bit-twiddle: 1.0.2 - gl-fbo: 2.0.5 - ndarray: 1.0.19 - typedarray-pool: 1.2.0 - dev: false - - /gl-shader/4.3.1: - resolution: {integrity: sha512-xLoN6XtRLlg97SEqtuzfKc+pVWpVkQ3YjDI1kuCale8tF7+zMhiKlMfmG4IMQPMdKJZQbIc/Ny8ZusEpfh5U+w==} - dependencies: - gl-format-compiler-error: 1.0.3 - weakmap-shim: 1.1.1 - dev: false - - /gl-spikes2d/1.0.2: - resolution: {integrity: sha512-QVeOZsi9nQuJJl7NB3132CCv5KA10BWxAY2QgJNsKqbLsG53B/TrGJpjIAohnJftdZ4fT6b3ZojWgeaXk8bOOA==} - dev: false - - /gl-spikes3d/1.0.10: - resolution: {integrity: sha512-lT3xroowOFxMvlhT5Mof76B2TE02l5zt/NIWljhczV2FFHgIVhA4jMrd5dIv1so1RXMBDJIKu0uJI3QKliDVLg==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glslify: 7.1.1 - dev: false - - /gl-state/1.0.0: - resolution: {integrity: sha512-Od836PpgCuTC0W7uHYnEEPRdQPL1FakWlznz3hRvlO6tD5sdLfBKX9qNRGy1DjfMCDTudhyYWxiWjhql1B8N4Q==} - dependencies: - uniq: 1.0.1 - dev: false - - /gl-streamtube3d/1.4.1: - resolution: {integrity: sha512-rH02v00kgwgdpkXVo7KsSoPp38bIAYR9TE1iONjcQ4cQAlDhrGRauqT/P5sUaOIzs17A2DxWGcXM+EpNQs9pUA==} - dependencies: - gl-cone3d: 1.5.2 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - dev: false - - /gl-surface3d/1.6.0: - resolution: {integrity: sha512-x15+u4712ysnB85G55RLJEml6mOB4VaDn0VTlXCc9JcjRl5Es10Tk7lhGGyiPtkCfHwvhnkxzYA1/rHHYN7Y0A==} - dependencies: - binary-search-bounds: 2.0.5 - bit-twiddle: 1.0.2 - colormap: 2.3.2 - dup: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-beckmann: 1.1.2 - glslify: 7.1.1 - ndarray: 1.0.19 - ndarray-gradient: 1.0.1 - ndarray-ops: 1.2.2 - ndarray-pack: 1.2.1 - ndarray-scratch: 1.2.0 - surface-nets: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /gl-text/1.3.1: resolution: {integrity: sha512-/f5gcEMiZd+UTBJLTl3D+CkCB/0UFGTx3nflH8ZmyWcLkZhsZ1+Xx5YYkw2rgWAzgPeE35xCqBuHSoMKQVsR+w==} dependencies: @@ -8365,14 +7939,6 @@ packages: typedarray-pool: 1.2.0 dev: false - /gl-texture2d/2.1.0: - resolution: {integrity: sha512-W0tzEjtlGSsCKq5FFwFVhH+fONFUTUeqM4HhA/BleygKaX39IwNTVOiqkwfu9szQZ4dQEq8ZDl7w1ud/eKLaZA==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - /gl-util/3.1.3: resolution: {integrity: sha512-dvRTggw5MSkJnCbh74jZzSoTOGnVYK+Bt+Ckqm39CVcl6+zSsxqWk4lr5NKhkqXHL6qvZAU9h17ZF8mIskY9mA==} dependencies: @@ -8385,18 +7951,6 @@ packages: weak-map: 1.0.8 dev: false - /gl-vao/1.3.0: - resolution: {integrity: sha512-stSOZ+n0fnAxgDfipwKK/73AwzCNL+AFEc/v2Xm76nyFnUZGmQtD2FEC3lt1icoOHAzMgHBAjCue7dBIDeOTcw==} - dev: false - - /gl-vec3/1.1.3: - resolution: {integrity: sha512-jduKUqT0SGH02l8Yl+mV1yVsDfYgQAJyXGxkJQGyxPLHRiW25DwVIRPt6uvhrEMHftJfqhqKthRcyZqNEl9Xdw==} - dev: false - - /gl-vec4/1.0.1: - resolution: {integrity: sha512-/gx5zzIy75JXzke4yuwcbvK+COWf8UJbVCUPvhfsYVw1GVey4Eextk/0H0ctXnOICruNK7+GS4ILQzEQcHcPEg==} - dev: false - /glob-parent/3.1.0: resolution: {integrity: sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==} dependencies: @@ -8519,14 +8073,6 @@ packages: glsl-tokenizer: 2.1.5 dev: false - /glsl-inverse/1.0.0: - resolution: {integrity: sha512-+BsseNlgqzd4IFX1dMqg+S0XuIXzH0acvTtW7svwhJESM1jb2BZFwdO+tOWdCXD5Zse6b9bOmzp5sCNA7GQ2QA==} - dev: false - - /glsl-out-of-range/1.0.4: - resolution: {integrity: sha512-fCcDu2LCQ39VBvfe1FbhuazXEf0CqMZI9OYXrYlL6uUARG48CTAbL04+tZBtVM0zo1Ljx4OLu2AxNquq++lxWQ==} - dev: false - /glsl-resolve/0.0.1: resolution: {integrity: sha512-xxFNsfnhZTK9NBhzJjSBGX6IOqYpvBHxxmo+4vapiljyGNCY0Bekzn0firQkQrazK59c1hYxMDxYS8MDlhw4gA==} dependencies: @@ -8534,23 +8080,6 @@ packages: xtend: 2.2.0 dev: false - /glsl-shader-name/1.0.0: - resolution: {integrity: sha512-OtHon0dPCbJD+IrVA1vw9QDlp2cS/f9z8X/0y+W7Qy1oZ3U1iFAQUEco2v30V0SAlVLDG5rEfhjEfc3DKdGbFQ==} - dependencies: - atob-lite: 1.0.0 - glsl-tokenizer: 2.1.5 - dev: false - - /glsl-specular-beckmann/1.1.2: - resolution: {integrity: sha512-INvd7szO1twNPLGwE0Kf2xXIEy5wpOPl/LYoiw3+3nbAe6Rfn5rjdK9xvfnwoWksTCs3RejuLeAiZkLTkdFtwg==} - dev: false - - /glsl-specular-cook-torrance/2.0.1: - resolution: {integrity: sha512-bFtTfbgLXIbto/U6gM7h0IxoPMU+5zpMK5HoAaA2LnPuGk3JSzKAnsoyh5QGTT8ioIEQrjk6jcQNrgujPsP7rw==} - dependencies: - glsl-specular-beckmann: 1.1.2 - dev: false - /glsl-token-assignments/2.0.2: resolution: {integrity: sha512-OwXrxixCyHzzA0U2g4btSNAyB2Dx8XrztY5aVUCjRSh4/D0WoJn8Qdps7Xub3sz6zE73W3szLrmWtQ7QMpeHEQ==} dev: false @@ -8806,7 +8335,14 @@ packages: resolution: {integrity: sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==} engines: {node: '>= 0.4.0'} dependencies: - function-bind: 1.1.1 + function-bind: 1.1.1 + + /hash-base/3.0.4: + resolution: {integrity: sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 /hash-base/3.1.0: resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} @@ -9064,12 +8600,6 @@ packages: dev: false optional: true - /image-size/0.7.5: - resolution: {integrity: sha512-Hiyv+mXHfFEP7LzUL/llg9RwFxxY+o9N3JVLIeG5E7iFIFAalxvRU9UZthBdYDEVnzHMgjnKJPPpay5BWf1g9g==} - engines: {node: '>=6.9.0'} - hasBin: true - dev: false - /immer/1.10.0: resolution: {integrity: sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg==} dev: false @@ -9131,13 +8661,6 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} - /incremental-convex-hull/1.0.1: - resolution: {integrity: sha512-mKRJDXtzo1R9LxCuB1TdwZXHaPaIEldoGPsXy2jrJc/kufyqp8y/VAQQxThSxM2aroLoh6uObexPk1ASJ7FB7Q==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 1.0.0 - dev: false - /indent-string/3.2.0: resolution: {integrity: sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ==} engines: {node: '>=4'} @@ -9236,33 +8759,19 @@ packages: engines: {node: '>= 0.10'} dev: true - /interval-tree-1d/1.0.4: - resolution: {integrity: sha512-wY8QJH+6wNI0uh4pDQzMvl+478Qh7Rl4qLmqiluxALlNvl+I+o5x38Pw3/z7mDPTPS1dQalZJXsmbvxx5gclhQ==} - dependencies: - binary-search-bounds: 2.0.5 - dev: false - /invariant/2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} dependencies: loose-envify: 1.4.0 dev: false - /invert-permutation/1.0.0: - resolution: {integrity: sha512-8f473/KSrnvyBd7Khr4PC5wPkAOehwkGc+AH5Q7D+U/fE+cdDob2FJ3naXAs4mspR9JIaEwbDI3me8H0KlVzSQ==} - dev: false - - /iota-array/1.0.0: - resolution: {integrity: sha512-pZ2xT+LOHckCatGQ3DcG/a+QuEqvoxqkiL7tvE8nn3uuu+f6i1TtpB5/FtWFbxUuVr5PZCx8KskuGatbJDXOWA==} - dev: false - /ip-regex/2.1.0: resolution: {integrity: sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw==} engines: {node: '>=4'} dev: false - /ip/1.1.8: - resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==} + /ip/1.1.9: + resolution: {integrity: sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==} dev: false /ipaddr.js/1.9.1: @@ -9545,6 +9054,10 @@ packages: resolution: {integrity: sha512-wW/SXnYJkTjs++tVK5b6kVITZpAZPtUrt9SF80vvxGiF/Oywal+COk1jlRkiVq15RFNEQKQY31TkV24/1T5cVg==} dev: false + /is-mobile/4.0.0: + resolution: {integrity: sha512-mlcHZA84t1qLSuWkt2v0I2l61PYdyQDt4aG1mLIXF5FDMm4+haBCxCPYSr/uwqQNRk1MiTizn0ypEuRAOLRAew==} + dev: false + /is-negated-glob/1.0.0: resolution: {integrity: sha512-czXVVn/QEmgvej1f50BZ648vUI+em0xqMq2Sn+QncCLN4zj1UAxlT+kw/6ggQTOaZPd1HqKQGEqbpQVtJucWug==} engines: {node: '>=0.10.0'} @@ -10451,7 +9964,7 @@ packages: connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.18.2 + express: 4.19.2 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -10624,10 +10137,6 @@ packages: deprecated: use String.prototype.padStart() dev: false - /lerp/1.0.3: - resolution: {integrity: sha512-70Rh4rCkJDvwWiTsyZ1HmJGvnyfFah4m6iTux29XmasRiZPDBpT9Cfa4ai73+uLZxnlKruUS62jj2lb11wURiA==} - dev: false - /less-loader/5.0.0_less@3.13.1: resolution: {integrity: sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg==} engines: {node: '>= 4.8.0'} @@ -10794,6 +10303,10 @@ packages: resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} dev: false + /lodash.merge/4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: false + /lodash.sortby/4.7.0: resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==} dev: false @@ -10984,49 +10497,11 @@ packages: vt-pbf: 3.1.3 dev: false - /marching-simplex-table/1.0.0: - resolution: {integrity: sha512-PexXXVF4f5Bux3vGCNlRRBqF/GyTerNo77PbBz8g/MFFXv212b48IGVglj/VfaYBRY6vlFQffa9dFbCCN0+7LA==} - dependencies: - convex-hull: 1.0.3 - dev: false - - /mat4-decompose/1.0.4: - resolution: {integrity: sha512-M3x6GXrzRTt5Ok4/bcHFc869Pe8F3uWaSp3xkUpi+uaTRulPXIZ1GWD13Z3A8WK2bxTrcvX21mjp05gUy/Dwbw==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - - /mat4-interpolate/1.0.4: - resolution: {integrity: sha512-+ulnoc6GUHq8eGZGbLyhQU61tx2oeNAFilV/xzCCzLV+F3nDk8jqERUqRmx8eNMMMvrdvoRSw0JXmnisfVPY9A==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-decompose: 1.0.4 - mat4-recompose: 1.0.4 - quat-slerp: 1.0.1 - dev: false - - /mat4-recompose/1.0.4: - resolution: {integrity: sha512-s1P2Yl4LQxq8dN0CgJE+mCO8y3IX/SmauSZ+H0zJsE1UKlgJ9loInfPC/OUxn2MzUW9bfBZf0Wcc2QKA3/e6FQ==} - dependencies: - gl-mat4: 1.2.0 - dev: false - /math-log2/1.0.1: resolution: {integrity: sha512-9W0yGtkaMAkf74XGYVy4Dqw3YUMnTNB2eeiw9aQbUl4A3KmuCEHTt2DgAB07ENzOYAjsYSAYufkAq0Zd+jU7zA==} engines: {node: '>=0.10.0'} dev: false - /matrix-camera-controller/2.1.4: - resolution: {integrity: sha512-zsPGPONclrKSImNpqqKDTcqFpWLAIwMXEJtCde4IFPOw1dA9udzFg4HOFytOTosOFanchrx7+Hqq6glLATIxBA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-interpolate: 1.0.4 - dev: false - /md5.js/1.3.5: resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} dependencies: @@ -11338,12 +10813,6 @@ packages: resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} dev: false - /monotone-convex-hull-2d/1.0.1: - resolution: {integrity: sha512-ixQ3qdXTVHvR7eAoOjKY8kGxl9YjOFtzi7qOjwmFFPfBqZHVOjUFOBy/Dk9dusamRSPJe9ggyfSypRbs0Bl8BA==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /morgan/1.10.0: resolution: {integrity: sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==} engines: {node: '>= 0.8.0'} @@ -11480,6 +10949,10 @@ packages: - supports-color dev: false + /native-promise-only/0.8.1: + resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==} + dev: false + /native-request/1.1.0: resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} requiresBuild: true @@ -11489,55 +10962,16 @@ packages: /natural-compare/1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - /ndarray-extract-contour/1.0.1: - resolution: {integrity: sha512-iDngNoFRqrqbXGLP8BzyGrybw/Jnkkn7jphzc3ZFfO7dfmpL1Ph74/6xCi3xSvJFyVW90XpMnd766jTaRPsTCg==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray-gradient/1.0.1: - resolution: {integrity: sha512-+xONVi7xxTCGL6KOb11Yyoe0tPNqAUKF39CvFoRjL5pdOmPd2G2pckK9lD5bpLF3q45LLnYNyiUSJSdNmQ2MTg==} - dependencies: - cwise-compiler: 1.1.3 - dup: 1.0.0 - dev: false - - /ndarray-linear-interpolate/1.0.0: - resolution: {integrity: sha512-UN0f4+6XWsQzJ2pP5gVp+kKn5tJed6mA3K/L50uO619+7LKrjcSNdcerhpqxYaSkbxNJuEN76N05yBBJySnZDw==} - dev: false - - /ndarray-ops/1.2.2: - resolution: {integrity: sha512-BppWAFRjMYF7N/r6Ie51q6D4fs0iiGmeXIACKY66fLpnwIui3Wc3CXiD/30mgLbDjPpSLrsqcp3Z62+IcHZsDw==} - dependencies: - cwise-compiler: 1.1.3 - dev: false - - /ndarray-pack/1.2.1: - resolution: {integrity: sha512-51cECUJMT0rUZNQa09EoKsnFeDL4x2dHRT0VR5U2H5ZgEcm95ZDWcMA5JShroXjHOejmAD/fg8+H+OvUnVXz2g==} - dependencies: - cwise-compiler: 1.1.3 - ndarray: 1.0.19 - dev: false - - /ndarray-scratch/1.2.0: - resolution: {integrity: sha512-a4pASwB1jQyJcKLYrwrladVfDZDUGc78qLJZbHyb1Q4rhte0URhzc6ALQpBcauwgov0sXLwZz3vYH5jKAhSMIg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /ndarray-sort/1.0.1: - resolution: {integrity: sha512-Gpyis5NvEPOQVadDOG+Dx8bhYCkaxn5IlA4Ig/jBJIlnW1caDiPneQLzT/+AIMeHEmqlGZfdqO/I1TXJS2neAw==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray/1.0.19: - resolution: {integrity: sha512-B4JHA4vdyZU30ELBw3g7/p9bZupyew5a7tX1Y/gGeF2hafrPaQZhgrGQfsvgfYbgdFZjYwuEcnaobeM/WMW+HQ==} + /needle/2.9.1: + resolution: {integrity: sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==} + engines: {node: '>= 4.4.x'} + hasBin: true dependencies: - iota-array: 1.0.0 - is-buffer: 1.1.6 + debug: 3.2.7 + iconv-lite: 0.4.24 + sax: 1.2.4 + transitivePeerDependencies: + - supports-color dev: false /negotiator/0.6.3: @@ -11552,12 +10986,6 @@ packages: resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} dev: false - /nextafter/1.0.0: - resolution: {integrity: sha512-7PO+A89Tll2rSEfyrjtqO0MaI37+nnxBdnQcPypfbEYYuGaJxWGCqaOwQX4a3GHNTS08l1kazuiLEWZniZjMUQ==} - dependencies: - double-bits: 1.1.1 - dev: false - /nice-try/1.0.5: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} @@ -11684,10 +11112,6 @@ packages: engines: {node: '>=8'} dev: true - /normals/1.1.0: - resolution: {integrity: sha512-XWeliW48BLvbVJ+cjQAOE+tA0m1M7Yi1iTPphAS9tBmW1A/c/cOVnEUecPCCMH5lEAihAcG6IRle56ls9k3xug==} - dev: false - /npm-run-all/4.1.5: resolution: {integrity: sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==} engines: {node: '>= 4'} @@ -11733,10 +11157,6 @@ packages: is-finite: 1.1.0 dev: false - /numeric/1.2.6: - resolution: {integrity: sha512-avBiDAP8siMa7AfJgYyuxw1oyII4z2sswS23+O+ZfV28KrtNzy0wxUFwi4f3RyM4eeeXNs1CThxR7pb5QQcMiw==} - dev: false - /nwsapi/2.2.7: resolution: {integrity: sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==} dev: false @@ -11949,13 +11369,6 @@ packages: type-check: 0.3.2 word-wrap: 1.2.5 - /orbit-camera-controller/4.0.0: - resolution: {integrity: sha512-/XTmpr6FUT6MuKPBGN2nv9cS8jhhVs8do71VagBQS5p4rxM04MhqSnI/Uu+gVNN5s6KPcS73o1dHzjuDThEJUA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - dev: false - /os-browserify/0.3.0: resolution: {integrity: sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==} @@ -12056,13 +11469,6 @@ packages: semver: 6.3.1 dev: true - /pad-left/1.0.2: - resolution: {integrity: sha512-saxSV1EYAytuZDtQYEwi0DPzooG6aN18xyHrnJtzwjVwmMauzkEecd7hynVJGolNGk1Pl9tltmZqfze4TZTCxg==} - engines: {node: '>=0.10.0'} - dependencies: - repeat-string: 1.6.1 - dev: false - /pako/1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -12100,6 +11506,17 @@ packages: pbkdf2: 3.1.2 safe-buffer: 5.2.1 + /parse-asn1/5.1.7: + resolution: {integrity: sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==} + engines: {node: '>= 0.10'} + dependencies: + asn1.js: 4.10.1 + browserify-aes: 1.2.0 + evp_bytestokey: 1.0.3 + hash-base: 3.0.4 + pbkdf2: 3.1.2 + safe-buffer: 5.2.1 + /parse-json/2.2.0: resolution: {integrity: sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==} engines: {node: '>=0.10.0'} @@ -12251,19 +11668,6 @@ packages: /performance-now/2.1.0: resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} - /permutation-parity/1.0.0: - resolution: {integrity: sha512-mRaEvnnWolbZuErWD08StRUZP9YOWG3cURP5nYpRg1D2PENzPXCUrPv8/bOk0tfln0hISLZjOdOcQCbsVpL2nQ==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /permutation-rank/1.0.0: - resolution: {integrity: sha512-kmXwlQcd4JlV8g61jz0xDyroFNlJ/mP+KbSBllMuQD7FvaQInRnnAStElcppkUXd8qVFLvemy6msUmBn7sDzHg==} - dependencies: - invert-permutation: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - /pick-by-alias/1.2.0: resolution: {integrity: sha512-ESj2+eBxhGrcA1azgHs7lARG5+5iLakc/6nlfbpjcLl00HuuUOIuORhYXN4D1HfvMSKuVtFQjAlnwi1JHEeDIw==} dev: false @@ -12343,94 +11747,56 @@ packages: find-up: 3.0.0 dev: false - /planar-dual/1.0.2: - resolution: {integrity: sha512-jfQCbX1kXu53+enC+BPQlfoZI1u5m8IUhFVtFG+9tUj84wnuaYNheR69avYWCNXWnUCkwUajmYMqX9M2Ruh4ug==} - dependencies: - compare-angle: 1.0.1 - dup: 1.0.0 - dev: false - - /planar-graph-to-polyline/1.0.6: - resolution: {integrity: sha512-h8a9kdAjo7mRhC0X6HZ42xzFp7vKDZA+Hygyhsq/08Qi4vVAQYJaLLYLvKUUzRbVKvdYqq0reXHyV0EygyEBHA==} - dependencies: - edges-to-adjacency-list: 1.0.0 - planar-dual: 1.0.2 - point-in-big-polygon: 2.0.1 - robust-orientation: 1.2.1 - robust-sum: 1.0.0 - two-product: 1.0.2 - uniq: 1.0.1 - dev: false - /please-upgrade-node/3.2.0: resolution: {integrity: sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==} dependencies: semver-compare: 1.0.0 dev: true - /plotly.js/1.58.5: - resolution: {integrity: sha512-ChTlnFXB4tB0CzcG1mqgUKYnrJsZ8REDGox8BHAa/ltsd48MOAhOmFgjyDxwsXyjjgwOI296GeYDft8g4ftLHQ==} + /plotly.js/2.25.2: + resolution: {integrity: sha512-Pf6dPYGl21W7A3FTgLQ52fpgvrqGhCPDT3+612bxwg4QXlvxhnoFwvuhT1BRW/l2nbYGpRoUH79K54yf2vCMVQ==} dependencies: + '@plotly/d3': 3.8.1 '@plotly/d3-sankey': 0.7.2 '@plotly/d3-sankey-circular': 0.33.1 - '@plotly/point-cluster': 3.1.9 '@turf/area': 6.5.0 '@turf/bbox': 6.5.0 '@turf/centroid': 6.5.0 - alpha-shape: 1.0.0 canvas-fit: 1.5.0 color-alpha: 1.0.4 color-normalize: 1.5.0 color-parse: 1.3.8 color-rgba: 2.1.1 - convex-hull: 1.0.3 country-regex: 1.1.0 - d3: 3.5.17 d3-force: 1.2.1 + d3-format: 1.4.5 + d3-geo: 1.12.1 + d3-geo-projection: 2.9.0 d3-hierarchy: 1.1.9 - d3-interpolate: 1.4.0 + d3-interpolate: 3.0.1 + d3-time: 1.1.0 d3-time-format: 2.3.0 - delaunay-triangulate: 1.1.6 - es6-promise: 4.2.8 fast-isnumeric: 1.1.4 - gl-cone3d: 1.5.2 - gl-contour2d: 1.1.7 - gl-error3d: 1.0.16 - gl-heatmap2d: 1.1.1 - gl-line3d: 1.2.1 gl-mat4: 1.2.0 - gl-mesh3d: 2.3.1 - gl-plot2d: 1.4.5 - gl-plot3d: 2.4.7 - gl-pointcloud2d: 1.0.3 - gl-scatter3d: 1.2.3 - gl-select-box: 1.0.4 - gl-spikes2d: 1.0.2 - gl-streamtube3d: 1.4.1 - gl-surface3d: 1.6.0 gl-text: 1.3.1 glslify: 7.1.1 has-hover: 1.0.1 has-passive-events: 1.0.0 - image-size: 0.7.5 - is-mobile: 2.2.2 + is-mobile: 4.0.0 mapbox-gl: 1.10.1 - matrix-camera-controller: 2.1.4 mouse-change: 1.4.0 mouse-event-offset: 3.0.2 mouse-wheel: 1.2.0 - ndarray: 1.0.19 - ndarray-linear-interpolate: 1.0.0 + native-promise-only: 0.8.1 parse-svg-path: 0.1.2 + point-in-polygon: 1.1.0 polybooljs: 1.2.0 - regl: 1.7.0 + probe-image-size: 7.2.3 + regl: /@plotly/regl/2.1.2 regl-error2d: 2.0.12 regl-line2d: 3.1.2 regl-scatter2d: 3.2.9 regl-splom: 1.0.14 - right-now: 1.0.0 - robust-orientation: 1.2.1 - sane-topojson: 4.0.0 strongly-connected-components: 1.0.1 superscript-text: 1.0.0 svg-path-sdf: 1.1.3 @@ -12439,6 +11805,8 @@ packages: topojson-client: 3.1.0 webgl-context: 2.2.0 world-calendars: 1.0.3 + transitivePeerDependencies: + - supports-color dev: false /plur/3.1.1: @@ -12466,25 +11834,14 @@ packages: - typescript dev: false - /point-in-big-polygon/2.0.1: - resolution: {integrity: sha512-DtrN8pa2VfMlvmWlCcypTFeBE4+OYz1ojDNJLKCWa4doiVAD6PRBbxFYAT71tsp5oKaRXT5sxEiHCAQKb1zr2Q==} - dependencies: - binary-search-bounds: 2.0.5 - interval-tree-1d: 1.0.4 - robust-orientation: 1.2.1 - slab-decomposition: 1.0.3 + /point-in-polygon/1.1.0: + resolution: {integrity: sha512-3ojrFwjnnw8Q9242TzgXuTD+eKiutbzyslcq1ydfu82Db2y+Ogbmyrkpv0Hgj31qwT3lbS9+QAAO/pIQM35XRw==} dev: false /polybooljs/1.2.0: resolution: {integrity: sha512-mKjR5nolISvF+q2BtC1fi/llpxBPTQ3wLWN8+ldzdw2Hocpc8C72ZqnamCM4Z6z+68GVVjkeM01WJegQmZ8MEQ==} dev: false - /polytope-closest-point/1.0.0: - resolution: {integrity: sha512-rvmt1e2ci9AUyWeHg+jsNuhGC4eBtxX4WjD9uDdvQzv2I1CVJSgbblJTslNXpGUu4KZSsUtSzvIdHKRKfRF3kw==} - dependencies: - numeric: 1.2.6 - dev: false - /portfinder/1.0.32_supports-color@6.1.0: resolution: {integrity: sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==} engines: {node: '>= 0.12.0'} @@ -13217,6 +12574,16 @@ packages: parse-ms: 2.1.0 dev: false + /probe-image-size/7.2.3: + resolution: {integrity: sha512-HubhG4Rb2UH8YtV4ba0Vp5bQ7L78RTONYu/ujmCu5nBI8wGv24s4E9xSKBi0N1MowRpxk76pFCpJtW0KPzOK0w==} + dependencies: + lodash.merge: 4.6.2 + needle: 2.9.1 + stream-parser: 0.3.1 + transitivePeerDependencies: + - supports-color + dev: false + /process-nextick-args/2.0.1: resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} @@ -13291,6 +12658,10 @@ packages: forwarded: 0.2.0 ipaddr.js: 1.9.1 + /proxy-from-env/1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: false + /prr/1.0.1: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} dev: false @@ -13368,12 +12739,6 @@ packages: resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} engines: {node: '>=0.6'} - /quat-slerp/1.0.1: - resolution: {integrity: sha512-OTozCDeP5sW7cloGR+aIycctZasBhblk1xdsSGP1Iz5pEwDqyChloTmc96xsDfusFD7GRxwDDu+tpJX0Wa1kJw==} - dependencies: - gl-quat: 1.0.0 - dev: false - /query-string/4.3.4: resolution: {integrity: sha512-O2XLNDBIg1DnTOa+2XrIwSiXEV8h2KImXUnjhhn2+UsvZ+Es2uyd5CCRTNQlDGbzUQOW3aYCBx9rVA6dzsiY7Q==} engines: {node: '>=0.10.0'} @@ -13420,21 +12785,6 @@ packages: resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} engines: {node: '>= 0.6'} - /rat-vec/1.1.1: - resolution: {integrity: sha512-FbxGwkQxmw4Jx41LR7yMOR+g8M9TWCEmf/SUBQVLuK2eh0nThnffF7IUualr3XE2x5F8AdLiCVeSGwXd4snfgg==} - dependencies: - big-rat: 1.0.4 - dev: false - - /raw-body/2.5.1: - resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} - engines: {node: '>= 0.8'} - dependencies: - bytes: 3.1.2 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - unpipe: 1.0.0 - /raw-body/2.5.2: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} @@ -13443,7 +12793,6 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 unpipe: 1.0.0 - dev: true /rc-align/2.4.5: resolution: {integrity: sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==} @@ -14072,13 +13421,13 @@ packages: resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==} dev: false - /react-plotly.js/2.6.0_f6dluzp62qf57yw3gl4ocsg3e4: + /react-plotly.js/2.6.0_qtjenpcawcnnxnr626ndcvhi4u: resolution: {integrity: sha512-g93xcyhAVCSt9kV1svqG1clAEdL6k3U+jjuSzfTV7owaSU9Go6Ph8bl25J+jKfKvIGAEYpe4qj++WHJuc9IaeA==} peerDependencies: plotly.js: '>1.34.0' react: '>0.13.0' dependencies: - plotly.js: 1.58.5 + plotly.js: 2.25.2 prop-types: 15.8.1 react: 16.14.0 dev: false @@ -14394,14 +13743,6 @@ packages: strip-indent: 2.0.0 dev: true - /reduce-simplicial-complex/1.0.0: - resolution: {integrity: sha512-t+nT7sHDtcxBx8TbglqfLsLKoFiSn9hp6GFojJEThHBAFv72wQeq/uRiPYZa4Xb8FR1Ye1foRcBV3Ki6bgm+pQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - compare-oriented-cell: 1.0.1 - dev: false - /reflect.getprototypeof/1.0.3: resolution: {integrity: sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw==} engines: {node: '>= 0.4'} @@ -14570,10 +13911,6 @@ packages: regl-scatter2d: 3.2.9 dev: false - /regl/1.7.0: - resolution: {integrity: sha512-bEAtp/qrtKucxXSJkD4ebopFZYP0q1+3Vb2WECWv/T8yQEgKxDxJ7ztO285tAMaYZVR6mM1GgI6CCn8FROtL1w==} - dev: false - /regl/2.1.0: resolution: {integrity: sha512-oWUce/aVoEvW5l2V0LK7O5KJMzUSKeiOwFuJehzpSFd43dO5spP9r+sSUfhKtsky4u6MCqWJaRL+abzExynfTg==} dev: false @@ -14833,78 +14170,6 @@ packages: classnames: 2.3.2 dev: false - /robust-compress/1.0.0: - resolution: {integrity: sha512-E8btSpQ6zZr7LvRLrLvb+N5rwQ0etUbsXFKv5NQj6TVK6RYT00Qg9iVFvIWR+GxXUvpes7FDN0WfXa3l7wtGOw==} - dev: false - - /robust-determinant/1.1.0: - resolution: {integrity: sha512-xva9bx/vyAv3pVYL2++vlnvM9q7oQOeCS5iscmlWtmaXHEgI4GFWeuYPUVVhvmYwx9N49EsQTonVJihYtcMo1Q==} - dependencies: - robust-compress: 1.0.0 - robust-scale: 1.0.2 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-dot-product/1.0.0: - resolution: {integrity: sha512-Nu/wah8B8RotyZLRPdlEL0ZDh3b7wSwUBLdbTHwS/yw0qqjMJ943PSCkd6EsF5R5QFDWF2x77DGsbmnv9/7/ew==} - dependencies: - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-in-sphere/1.2.1: - resolution: {integrity: sha512-3zJdcMIOP1gdwux93MKTS0RiMYEGwQBoE5R1IW/9ZQmGeZzP7f7i4+xdcK8ujJvF/dEOS1WPuI9IB1WNFbj3Cg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-linear-solve/1.0.0: - resolution: {integrity: sha512-I1qW8Bl9+UYeGNh2Vt8cwkcD74xWMyjnU6lSVcZrf0eyfwPmreflY3v0SvqCZOj5ddxnSS1Xp31igbFNcg1TGQ==} - dependencies: - robust-determinant: 1.1.0 - dev: false - - /robust-orientation/1.2.1: - resolution: {integrity: sha512-FuTptgKwY6iNuU15nrIJDLjXzCChWB+T4AvksRtwPS/WZ3HuP1CElCm1t+OBfgQKfWbtZIawip+61k7+buRKAg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-product/1.0.0: - resolution: {integrity: sha512-7ww6m+ICW6Dt7ylHVy1aeeNwTfMXfh2BHqHVNE+CHvrU9sI97Vb6uHnid0MN3I9afTI5DXOB7q4SQa2fxuo2Gw==} - dependencies: - robust-scale: 1.0.2 - robust-sum: 1.0.0 - dev: false - - /robust-scale/1.0.2: - resolution: {integrity: sha512-jBR91a/vomMAzazwpsPTPeuTPPmWBacwA+WYGNKcRGSh6xweuQ2ZbjRZ4v792/bZOhRKXRiQH0F48AvuajY0tQ==} - dependencies: - two-product: 1.0.2 - two-sum: 1.0.0 - dev: false - - /robust-segment-intersect/1.0.1: - resolution: {integrity: sha512-QWngxcL7rCRLK7nTMcTNBPi/q+fecrOo6aOtTPnXjT/Dve5AK20DzUSq2fznUS+rCAxyir6OdPgDCzcUxFtJoQ==} - dependencies: - robust-orientation: 1.2.1 - dev: false - - /robust-subtract/1.0.0: - resolution: {integrity: sha512-xhKUno+Rl+trmxAIVwjQMiVdpF5llxytozXJOdoT4eTIqmqsndQqFb1A0oiW3sZGlhMRhOi6pAD4MF1YYW6o/A==} - dev: false - - /robust-sum/1.0.0: - resolution: {integrity: sha512-AvLExwpaqUqD1uwLU6MwzzfRdaI6VEZsyvQ3IAQ0ZJ08v1H+DTyqskrf2ZJyh0BDduFVLN7H04Zmc+qTiahhAw==} - dev: false - /rsvp/4.8.5: resolution: {integrity: sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==} engines: {node: 6.* || >= 7.*} @@ -14966,10 +14231,6 @@ packages: /safer-buffer/2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - /sane-topojson/4.0.0: - resolution: {integrity: sha512-bJILrpBboQfabG3BNnHI2hZl52pbt80BE09u4WhnrmzuF2JbMKZdl62G5glXskJ46p+gxE2IzOwGj/awR4g8AA==} - dev: false - /sane/4.1.0: resolution: {integrity: sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==} engines: {node: 6.* || 8.* || >= 10.*} @@ -15294,10 +14555,6 @@ packages: /signal-exit/3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} - /signum/0.0.0: - resolution: {integrity: sha512-nct2ZUmwemVxeuPY5h+JLpHGJvLCXXNahGVI7IB3a6Fy5baX9AGSb854HceYH4FBw4eGjoZfEo9YRfkGfKdZQA==} - dev: false - /signum/1.0.0: resolution: {integrity: sha512-yodFGwcyt59XRh7w5W3jPcIQb3Bwi21suEfT7MAWnBX3iCdklJpgDgvGT9o04UonglZN5SNMfJFkHIR/jO8GHw==} dev: false @@ -15308,55 +14565,10 @@ packages: is-arrayish: 0.3.2 dev: false - /simplicial-complex-boundary/1.0.1: - resolution: {integrity: sha512-hz/AaVbs+s08EVoxlbCE68AlC6/mxFJLxJrGRMbDoTjz3030nhcOq+w5+f0/ZaU2EYjmwa8CdVKpiRVIrhaZjA==} - dependencies: - boundary-cells: 2.0.2 - reduce-simplicial-complex: 1.0.0 - dev: false - - /simplicial-complex-contour/1.0.2: - resolution: {integrity: sha512-Janyqvpa7jgr9MJbwR/XGyYz7bdhXNq7zgHxD0G54LCRNyn4bf3Hely2iWQeK/IGu3c5BaWFUh7ElxqXhKrq0g==} - dependencies: - marching-simplex-table: 1.0.0 - ndarray: 1.0.19 - ndarray-sort: 1.0.1 - typedarray-pool: 1.2.0 - dev: false - - /simplicial-complex/0.3.3: - resolution: {integrity: sha512-JFSxp7I5yORuKSuwGN96thhkqZVvYB4pkTMkk+PKP2QsOYYU1e84OBoHwOpFyFmjyvB9B3UDZKzHQI5S/CPUPA==} - dependencies: - bit-twiddle: 0.0.2 - union-find: 0.0.4 - dev: false - - /simplicial-complex/1.0.0: - resolution: {integrity: sha512-mHauIKSOy3GquM5VnYEiu7eP5y4A8BiaN9ezUUgyYFz1k68PqDYcyaH3kenp2cyvWZE96QKE3nrxYw65Allqiw==} - dependencies: - bit-twiddle: 1.0.2 - union-find: 1.0.2 - dev: false - - /simplify-planar-graph/2.0.1: - resolution: {integrity: sha512-KdC2ZPFvrGl9+lH/P3Yik7G0si2Zpk6Xiqjq8l9U1lOox5a/9dGLjevi9tvqoh4V7yQbs7fs6+rNCOAdrzUktw==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 0.3.3 - dev: false - /sisteransi/1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} dev: false - /slab-decomposition/1.0.3: - resolution: {integrity: sha512-1EfR304JHvX9vYQkUi4AKqN62mLsjk6W45xTk/TxwN8zd3HGwS7PVj9zj0I6fgCZqfGlimDEY+RzzASHn97ZmQ==} - dependencies: - binary-search-bounds: 2.0.5 - functional-red-black-tree: 1.0.1 - robust-orientation: 1.2.1 - dev: false - /slash/1.0.0: resolution: {integrity: sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==} engines: {node: '>=0.10.0'} @@ -15529,13 +14741,6 @@ packages: - supports-color dev: false - /split-polygon/1.0.0: - resolution: {integrity: sha512-nBFcgQUVEE8dcOjuKaRdlM53k8RxUYpRxZ//n0pHJQGhbVscrsti+gllJI3pK3y7fgFwGWgt7NFhAX5sz0UoWQ==} - dependencies: - robust-dot-product: 1.0.0 - robust-sum: 1.0.0 - dev: false - /split-string/3.1.0: resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} engines: {node: '>=0.10.0'} @@ -15545,10 +14750,6 @@ packages: /sprintf-js/1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - /sprintf-js/1.1.2: - resolution: {integrity: sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==} - dev: false - /sshpk/1.17.0: resolution: {integrity: sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==} engines: {node: '>=0.10.0'} @@ -15649,6 +14850,14 @@ packages: to-arraybuffer: 1.0.1 xtend: 4.0.2 + /stream-parser/0.3.1: + resolution: {integrity: sha512-bJ/HgKq41nlKvlhccD5kaCr/P+Hu0wPNKPJOH7en+YrJu/9EgqUF+88w5Jb6KNcjOFMhfX4B2asfeAtIGuHObQ==} + dependencies: + debug: 2.6.9 + transitivePeerDependencies: + - supports-color + dev: false + /stream-shift/1.0.1: resolution: {integrity: sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==} dev: false @@ -15903,14 +15112,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /surface-nets/1.0.2: - resolution: {integrity: sha512-Se+BaCb5yc8AV1IfT6TwTWEe/KuzzjzcMQQCbcIahzk9xRO5bIxxGM2MmKxE9nmq8+RD8DLBLXu0BjXoRs21iw==} - dependencies: - ndarray-extract-contour: 1.0.1 - triangulate-hypercube: 1.0.1 - zero-crossings: 1.0.1 - dev: false - /svg-arc-to-cubic-bezier/3.2.0: resolution: {integrity: sha512-djbJ/vZKZO+gPoSDThGNpKDO+o+bAeA4XQKovvkNCqnIS2t+S4qnLAGQhyyrulhCFRl1WWzAp0wUDV8PpTVU3g==} dev: false @@ -16053,12 +15254,6 @@ packages: require-main-filename: 2.0.0 dev: false - /text-cache/4.2.2: - resolution: {integrity: sha512-zky+UDYiX0a/aPw/YTBD+EzKMlCTu1chFuCMZeAkgoRiceySdROu1V2kJXhCbtEdBhiOviYnAdGiSYl58HW0ZQ==} - dependencies: - vectorize-text: 3.2.2 - dev: false - /text-table/0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} @@ -16210,20 +15405,6 @@ packages: punycode: 2.3.0 dev: false - /triangulate-hypercube/1.0.1: - resolution: {integrity: sha512-SAIacSBfUNfgeCna8q2i+1taOtFJkYuOqpduaJ1KUeOJpqc0lLKMYzPnZb4CA6KCOiD8Pd4YbuVq41wa9dvWyw==} - dependencies: - gamma: 0.1.0 - permutation-parity: 1.0.0 - permutation-rank: 1.0.0 - dev: false - - /triangulate-polyline/1.0.3: - resolution: {integrity: sha512-crJcVFtVPFYQ8r9iIhe9JqkauDvNWDSZLot8ly3DniSCO+zyUfKbtfD3fEoBaA5uMrQU/zBi11NBuVQeSToToQ==} - dependencies: - cdt2d: 1.0.0 - dev: false - /trim-newlines/2.0.0: resolution: {integrity: sha512-MTBWv3jhVjTU7XR3IQHllbiJs8sc75a80OEhB6or/q7pLTWgQ0bMGQXXYQSrSuXe6WiKWDZ5txXY5P59a/coVA==} engines: {node: '>=4'} @@ -16274,25 +15455,9 @@ packages: dependencies: safe-buffer: 5.2.1 - /turntable-camera-controller/3.0.1: - resolution: {integrity: sha512-UOGu9W/Mx053pAaczi0BEPqvWJOqSgtpdigWG9C8dX8rQVdyl2hWmpdJW3m15QrGxJtJHIhhDTHVtTZzPkd/FA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - /tweetnacl/0.14.5: resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} - /two-product/1.0.2: - resolution: {integrity: sha512-vOyrqmeYvzjToVM08iU52OFocWT6eB/I5LUWYnxeAPGXAhAxXYU/Yr/R2uY5/5n4bvJQL9AQulIuxpIsMoT8XQ==} - dev: false - - /two-sum/1.0.0: - resolution: {integrity: sha512-phP48e8AawgsNUjEY2WvoIWqdie8PoiDZGxTDv70LDr01uX5wLEQbOgSP7Z/B6+SW5oLtbe8qaYX2fKJs3CGTw==} - dev: false - /type-check/0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} @@ -16433,14 +15598,6 @@ packages: engines: {node: '>=4'} dev: false - /union-find/0.0.4: - resolution: {integrity: sha512-207oken6EyGDCBK5l/LTPsWfgy8N8s6idwRK2TG0ssWhzPlxEDdBA8nIV+eLbkEMdA8pAwE8F7/xwv2sCESVjQ==} - dev: false - - /union-find/1.0.2: - resolution: {integrity: sha512-wFA9bMD/40k7ZcpKVXfu6X1qD3ri5ryO8HUsuA1RnxPCQl66Mu6DgkxyR+XNnd+osD0aLENixcJVFj+uf+O4gw==} - dev: false - /union-value/1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -16687,18 +15844,6 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - /vectorize-text/3.2.2: - resolution: {integrity: sha512-34NVOCpMMQVXujU4vb/c6u98h6djI0jGdtC202H4Huvzn48B6ARsR7cmGh1xsAc0pHNQiUKGK/aHF05VtGv+eA==} - dependencies: - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - ndarray: 1.0.19 - planar-graph-to-polyline: 1.0.6 - simplify-planar-graph: 2.0.1 - surface-nets: 1.0.2 - triangulate-polyline: 1.0.3 - dev: false - /vendors/1.0.4: resolution: {integrity: sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==} dev: false @@ -16781,10 +15926,6 @@ packages: resolution: {integrity: sha512-lNR9aAefbGPpHO7AEnY0hCFjz1eTkWCXYvkTRrTHs9qv8zJp+SkVYpzfLIFXQQiG3tVvbNFQgVg2bQS8YGgxyw==} dev: false - /weakmap-shim/1.1.1: - resolution: {integrity: sha512-/wNyG+1FpiHhnfQo+TuA/XAUpvOOkKVl0A4qpT+oGcj5SlZCLmM+M1Py/3Sj8sy+YrEauCVITOxCsZKo6sPbQg==} - dev: false - /webgl-context/2.2.0: resolution: {integrity: sha512-q/fGIivtqTT7PEoF07axFIlHNk/XCPaYpq64btnepopSWvKNFkoORlQYgqDigBIuGA1ExnFd/GnSUnBNEPQY7Q==} dependencies: @@ -16827,12 +15968,12 @@ packages: connect-history-api-fallback: 1.6.0 debug: 4.3.4_supports-color@6.1.0 del: 4.1.1 - express: 4.18.2_supports-color@6.1.0 + express: 4.19.2_supports-color@6.1.0 html-entities: 1.4.0 http-proxy-middleware: 0.19.1_tmpgdztspuwvsxzgjkhoqk7duq import-local: 2.0.0 internal-ip: 4.3.0 - ip: 1.1.8 + ip: 1.1.9 is-absolute-url: 3.0.3 killable: 1.0.1 loglevel: 1.8.1 @@ -17430,9 +16571,3 @@ packages: y18n: 4.0.3 yargs-parser: 15.0.3 dev: true - - /zero-crossings/1.0.1: - resolution: {integrity: sha512-iNIldMZaDtAyIJMJ8NnGVHeejH//y4eVmpXriM+q/B/BPNz+2E7oAgSnw9MXqCd3RbQ8W+hor7T2jEyRoc/s2A==} - dependencies: - cwise-compiler: 1.1.3 - dev: false diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index b1aecc9a4f4e..a9ed342faad4 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -65,6 +65,7 @@ */ public final class OMMetadataManagerTestUtils { + private static OzoneConfiguration configuration; private OMMetadataManagerTestUtils() { } @@ -129,8 +130,9 @@ public static ReconOMMetadataManager getTestReconOmMetadataManager( DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); assertNotNull(checkpoint.getCheckpointLocation()); - - OzoneConfiguration configuration = new OzoneConfiguration(); + if (configuration == null) { + configuration = new OzoneConfiguration(); + } configuration.set(OZONE_RECON_OM_SNAPSHOT_DB_DIR, reconOmDbDir .getAbsolutePath()); @@ -501,4 +503,14 @@ public static OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID, public static BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } + + public static OzoneConfiguration getConfiguration() { + return configuration; + } + + public static void setConfiguration( + OzoneConfiguration configuration) { + OMMetadataManagerTestUtils.configuration = configuration; + } + } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 854ac74bd390..82c7c1b5bef0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -885,6 +885,7 @@ public void testUnhealthyContainers() throws IOException, TimeoutException { public void testUnhealthyContainersFilteredResponse() throws IOException, TimeoutException { String missing = UnHealthyContainerStates.MISSING.toString(); + String emptyMissing = UnHealthyContainerStates.EMPTY_MISSING.toString(); Response response = containerEndpoint .getUnhealthyContainers(missing, 1000, 1); @@ -904,6 +905,7 @@ public void testUnhealthyContainersFilteredResponse() uuid3 = newDatanode("host3", "127.0.0.3"); uuid4 = newDatanode("host4", "127.0.0.4"); createUnhealthyRecords(5, 4, 3, 2); + createEmptyMissingUnhealthyRecords(2); response = containerEndpoint.getUnhealthyContainers(missing, 1000, 1); @@ -926,6 +928,13 @@ public void testUnhealthyContainersFilteredResponse() for (UnhealthyContainerMetadata r : records) { assertEquals(missing, r.getContainerState()); } + + Response filteredEmptyMissingResponse = containerEndpoint + .getUnhealthyContainers(emptyMissing, 1000, 1); + responseObject = (UnhealthyContainersResponse) filteredEmptyMissingResponse.getEntity(); + records = responseObject.getContainers(); + // Assert for zero empty missing containers. + assertEquals(0, records.size()); } @Test @@ -1026,6 +1035,14 @@ UUID newDatanode(String hostName, String ipAddress) throws IOException { return uuid; } + private void createEmptyMissingUnhealthyRecords(int emptyMissing) { + int cid = 0; + for (int i = 0; i < emptyMissing; i++) { + createUnhealthyRecord(++cid, UnHealthyContainerStates.EMPTY_MISSING.toString(), + 3, 3, 0, null); + } + } + private void createUnhealthyRecords(int missing, int overRep, int underRep, int misRep) { int cid = 0; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java index ba00f843f447..765399f71e3a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithLegacy.java @@ -74,15 +74,17 @@ import java.util.Set; import java.util.HashSet; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.setConfiguration; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDirToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -875,6 +877,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + setConfiguration(omConfiguration); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java similarity index 71% rename from hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java rename to hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java index ac8dee5f0937..8d8299aefc18 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBSAndLegacy.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -62,6 +63,7 @@ import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithLegacy; import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -91,28 +93,37 @@ import static org.mockito.Mockito.when; /** - * Test for NSSummary REST APIs with OBS. - * Testing is done on a simple object store model with a flat hierarchy: - * Testing the following case. - * ├── vol - * │ ├── bucket1 - * │ │ ├── file1 - * │ │ └── file2 - * │ │ └── file3 - * │ └── bucket2 - * │ ├── file4 - * │ └── file5 - * └── vol2 - * ├── bucket3 - * │ ├── file8 - * │ ├── file9 - * │ └── file10 - * └── bucket4 - * └── file11 - * This tests the Rest APIs for NSSummary in the context of OBS buckets, - * focusing on disk usage, quota usage, and file size distribution. + * Tests the NSSummary REST APIs within the context of an Object Store (OBS) layout, + * as well as Legacy layout buckets with FileSystemPaths disabled. The tests aim to + * validate API responses for buckets that follow the flat hierarchy model typical + * of OBS layouts. + *

+ * The test environment simulates a simple object storage structure with volumes + * containing buckets, which in turn contain files. Specifically, it includes: + * - Two OBS layout buckets (bucket1 and bucket2) under 'vol', each containing + * multiple files. + * - Two Legacy layout buckets (bucket3 and bucket4) under 'vol2', with 'bucket4' + * the fileSystemEnabled flag set to false for these legacy buckets. + *

+ * The directory structure for testing is as follows: + * . + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 + * └── vol2 + * ├── bucket3 (Legacy) + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 (Legacy) + * └── file11 */ -public class TestNSSummaryEndpointWithOBS { +public class TestNSSummaryEndpointWithOBSAndLegacy { @TempDir private Path temporaryFolder; @@ -136,14 +147,14 @@ public class TestNSSummaryEndpointWithOBS { private static final String BUCKET_THREE = "bucket3"; private static final String BUCKET_FOUR = "bucket4"; private static final String KEY_ONE = "file1"; - private static final String KEY_TWO = "file2"; - private static final String KEY_THREE = "file3"; + private static final String KEY_TWO = "////file2"; + private static final String KEY_THREE = "file3///"; private static final String KEY_FOUR = "file4"; - private static final String KEY_FIVE = "file5"; + private static final String KEY_FIVE = "_//////"; private static final String KEY_EIGHT = "file8"; - private static final String KEY_NINE = "file9"; - private static final String KEY_TEN = "file10"; - private static final String KEY_ELEVEN = "file11"; + private static final String KEY_NINE = "//////"; + private static final String KEY_TEN = "///__file10"; + private static final String KEY_ELEVEN = "////file11"; private static final String MULTI_BLOCK_FILE = KEY_THREE; private static final long PARENT_OBJECT_ID_ZERO = 0L; @@ -256,6 +267,13 @@ public class TestNSSummaryEndpointWithOBS { + FILE2_SIZE_WITH_REPLICA + FILE3_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3 + = FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA; + + private static final long MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY = FILE4_SIZE_WITH_REPLICA; @@ -278,7 +296,29 @@ public class TestNSSummaryEndpointWithOBS { ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; private static final String BUCKET_TWO_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; - private static final String KEY_PATH = + private static final String BUCKET_THREE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE; + private static final String BUCKET_FOUR_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR; + private static final String KEY_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_ONE; + private static final String KEY_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_TWO; + private static final String KEY_THREE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String KEY_FOUR_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String KEY_FIVE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FIVE; + private static final String KEY_EIGHT_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_EIGHT; + private static final String KEY_NINE_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_NINE; + private static final String KEY_TEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_THREE + ROOT_PATH + KEY_TEN; + private static final String KEY_ELEVEN_PATH = + ROOT_PATH + VOL_TWO + ROOT_PATH + BUCKET_FOUR + ROOT_PATH + KEY_ELEVEN; + private static final String KEY4_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; private static final String MULTI_BLOCK_KEY_PATH = ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; @@ -302,10 +342,17 @@ public class TestNSSummaryEndpointWithOBS { private static final long BUCKET_TWO_DATA_SIZE = FILE_FOUR_SIZE + FILE_FIVE_SIZE; + private static final long BUCKET_THREE_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE; + + private static final long BUCKET_FOUR_DATA_SIZE = FILE_ELEVEN_SIZE; + @BeforeEach public void setUp() throws Exception { conf = new OzoneConfiguration(); + // By setting this config our Legacy buckets will behave like OBS buckets. + conf.set(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( Files.createDirectory(temporaryFolder.resolve( "JunitOmDBDir")).toFile(), conf); @@ -337,6 +384,10 @@ public void setUp() throws Exception { new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, reconOMMetadataManager, conf); nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy = + new NSSummaryTaskWithLegacy(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); commonUtils = new CommonUtils(); } @@ -381,6 +432,26 @@ public void testGetBasicInfoVol() throws Exception { assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); } + @Test + public void testGetBasicInfoVolTwo() throws Exception { + // Test volume 2's basics + Response volTwoResponse = nsSummaryEndpoint.getBasicInfo(VOL_TWO_PATH); + NamespaceSummaryResponse volTwoResponseObj = + (NamespaceSummaryResponse) volTwoResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volTwoResponseObj.getEntityType()); + assertEquals(2, volTwoResponseObj.getCountStats().getNumBucket()); + assertEquals(4, volTwoResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volTwoResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL_TWO, volTwoResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, + volTwoResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volTwoResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + @Test public void testGetBasicInfoBucketOne() throws Exception { // Test bucket 1's basics @@ -395,7 +466,7 @@ public void testGetBasicInfoBucketOne() throws Exception { assertEquals(StorageType.DISK, ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getStorageType()); - assertEquals(getBucketLayout(), + assertEquals(getOBSBucketLayout(), ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getBucketLayout()); assertEquals(BUCKET_ONE, @@ -405,9 +476,64 @@ public void testGetBasicInfoBucketOne() throws Exception { @Test public void testGetBasicInfoBucketTwo() throws Exception { // Test bucket 2's basics - commonUtils.testNSSummaryBasicInfoBucketTwo( - BucketLayout.OBJECT_STORE, - nsSummaryEndpoint); + Response bucketTwoResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_TWO_PATH); + NamespaceSummaryResponse bucketTwoObj = + (NamespaceSummaryResponse) bucketTwoResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketTwoObj.getEntityType()); + assertEquals(2, bucketTwoObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getStorageType()); + assertEquals(getOBSBucketLayout(), + ((BucketObjectDBInfo) + bucketTwoObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_TWO, + ((BucketObjectDBInfo) bucketTwoObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketThree() throws Exception { + // Test bucket 3's basics + Response bucketThreeResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_THREE_PATH); + NamespaceSummaryResponse bucketThreeObj = (NamespaceSummaryResponse) + bucketThreeResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketThreeObj.getEntityType()); + assertEquals(3, bucketThreeObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketThreeObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_THREE, + ((BucketObjectDBInfo) bucketThreeObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketFour() throws Exception { + // Test bucket 4's basics + Response bucketFourResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_FOUR_PATH); + NamespaceSummaryResponse bucketFourObj = + (NamespaceSummaryResponse) bucketFourResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketFourObj.getEntityType()); + assertEquals(1, bucketFourObj.getCountStats().getNumTotalKey()); + assertEquals(VOL_TWO, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getStorageType()); + assertEquals(getLegacyBucketLayout(), + ((BucketObjectDBInfo) + bucketFourObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_FOUR, + ((BucketObjectDBInfo) bucketFourObj.getObjectDBInfo()).getName()); } @Test @@ -461,24 +587,135 @@ public void testDiskUsageVolume() throws Exception { } @Test - public void testDiskUsageBucket() throws Exception { + public void testDiskUsageVolTwo() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_TWO_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket3 = duData.get(0); + DUResponse.DiskUsage duBucket4 = duData.get(1); + assertEquals(BUCKET_THREE_PATH, duBucket3.getSubpath()); + assertEquals(BUCKET_FOUR_PATH, duBucket4.getSubpath()); + assertEquals(VOL_TWO_DATA_SIZE, duVolRes.getSize()); + } + + @Test + public void testDiskUsageBucketOne() throws Exception { // bucket level DU Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, false, false); DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); // There are no sub-paths under this OBS bucket. assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_ONE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); } @Test - public void testDiskUsageKey() throws Exception { + public void testDiskUsageBucketTwo() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_TWO_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_TWO_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(2, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_TWO_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageBucketThree() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this Legacy bucket. + assertEquals(0, duBucketResponse.getCount()); + + Response bucketResponseWithSubpath = nsSummaryEndpoint.getDiskUsage( + BUCKET_THREE_PATH, true, false); + DUResponse duBucketResponseWithFiles = + (DUResponse) bucketResponseWithSubpath.getEntity(); + assertEquals(3, duBucketResponseWithFiles.getCount()); + + assertEquals(BUCKET_THREE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageKey1() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ONE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ONE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey2() throws Exception { // key level DU - Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_TWO_PATH, false, false); - DUResponse keyObj = (DUResponse) keyResponse.getEntity(); - assertEquals(0, keyObj.getCount()); - assertEquals(FILE_FOUR_SIZE, keyObj.getSize()); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_TWO_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey4() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, + true, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FOUR_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey5() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_FIVE_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_FIVE_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey8() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_EIGHT_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_EIGHT_SIZE, duKeyResponse.getSize()); + } + + @Test + public void testDiskUsageKey11() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_ELEVEN_PATH, + false, false); + DUResponse duKeyResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(0, duKeyResponse.getCount()); + assertEquals(FILE_ELEVEN_SIZE, duKeyResponse.getSize()); } @Test @@ -531,7 +768,7 @@ public void testDataSizeUnderVolWithReplication() throws IOException { } @Test - public void testDataSizeUnderBucketWithReplication() throws IOException { + public void testDataSizeUnderBucketOneWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, false, true); @@ -541,10 +778,21 @@ public void testDataSizeUnderBucketWithReplication() throws IOException { replicaDUResponse.getSizeWithReplica()); } + @Test + public void testDataSizeUnderBucketThreeWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_THREE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET3, + replicaDUResponse.getSizeWithReplica()); + } + @Test public void testDataSizeUnderKeyWithReplication() throws IOException { setUpMultiBlockReplicatedKeys(); - Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY4_PATH, false, true); DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); @@ -579,8 +827,20 @@ public void testQuotaUsage() throws Exception { assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + Response bucketRes3 = nsSummaryEndpoint.getQuotaUsage(BUCKET_THREE_PATH); + QuotaUsageResponse quBucketRes3 = + (QuotaUsageResponse) bucketRes3.getEntity(); + assertEquals(BUCKET_THREE_QUOTA, quBucketRes3.getQuota()); + assertEquals(BUCKET_THREE_DATA_SIZE, quBucketRes3.getQuotaUsed()); + + Response bucketRes4 = nsSummaryEndpoint.getQuotaUsage(BUCKET_FOUR_PATH); + QuotaUsageResponse quBucketRes4 = + (QuotaUsageResponse) bucketRes4.getEntity(); + assertEquals(BUCKET_FOUR_QUOTA, quBucketRes4.getQuota()); + assertEquals(BUCKET_FOUR_DATA_SIZE, quBucketRes4.getQuotaUsed()); + // other level not applicable - Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH); + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY4_PATH); QuotaUsageResponse quotaUsageResponse2 = (QuotaUsageResponse) naResponse2.getEntity(); assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, @@ -617,26 +877,55 @@ public void checkFileSizeDist(String path, int bin0, } } + @Test + public void testNormalizePathUptoBucket() { + // Test null or empty path + assertEquals("/", OmUtils.normalizePathUptoBucket(null)); + assertEquals("/", OmUtils.normalizePathUptoBucket("")); + + // Test path with leading slashes + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("///volume1/bucket1/key1/key2")); + + // Test volume and bucket names + assertEquals("volume1/bucket1", + OmUtils.normalizePathUptoBucket("volume1/bucket1")); + + // Test with additional segments + assertEquals("volume1/bucket1/key1/key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1/key2")); + + // Test path with multiple slashes in key names. + assertEquals("volume1/bucket1/key1//key2", + OmUtils.normalizePathUptoBucket("volume1/bucket1/key1//key2")); + + // Test path with volume, bucket, and special characters in keys + assertEquals("volume/bucket/key$%#1/./////////key$%#2", + OmUtils.normalizePathUptoBucket("volume/bucket/key$%#1/./////////key$%#2")); + } + + /** * Testing the following case. - * ├── vol - * │ ├── bucket1 - * │ │ ├── file1 - * │ │ └── file2 - * │ │ └── file3 - * │ └── bucket2 - * │ ├── file4 - * │ └── file5 + * └── vol + * ├── bucket1 (OBS) + * │ ├── file1 + * │ ├── file2 + * │ └── file3 + * └── bucket2 (OBS) + * ├── file4 + * └── file5 * └── vol2 - * ├── bucket3 + * ├── bucket3 (Legacy) * │ ├── file8 * │ ├── file9 * │ └── file10 - * └── bucket4 + * └── bucket4 (Legacy) * └── file11 * * Write these keys to OM and * replicate them. + * @throws Exception */ @SuppressWarnings("checkstyle:MethodLength") private void populateOMDB() throws Exception { @@ -652,7 +941,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_ONE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_TWO, BUCKET_ONE, @@ -663,7 +952,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_TWO_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_THREE, BUCKET_ONE, @@ -674,7 +963,7 @@ private void populateOMDB() throws Exception { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, FILE_THREE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_FOUR, BUCKET_TWO, @@ -685,7 +974,7 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, FILE_FOUR_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_FIVE, BUCKET_TWO, @@ -696,7 +985,7 @@ private void populateOMDB() throws Exception { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, FILE_FIVE_SIZE, - getBucketLayout()); + getOBSBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_EIGHT, @@ -708,7 +997,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_EIGHT_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_NINE, BUCKET_THREE, @@ -719,7 +1008,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_NINE_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_TEN, BUCKET_THREE, @@ -730,7 +1019,7 @@ private void populateOMDB() throws Exception { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_TEN_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); writeKeyToOm(reconOMMetadataManager, KEY_ELEVEN, BUCKET_FOUR, @@ -741,7 +1030,7 @@ private void populateOMDB() throws Exception { BUCKET_FOUR_OBJECT_ID, VOL_TWO_OBJECT_ID, FILE_ELEVEN_SIZE, - getBucketLayout()); + getLegacyBucketLayout()); } /** @@ -756,7 +1045,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( omConfiguration.set(OZONE_OM_DB_DIRS, omDbDir.getAbsolutePath()); omConfiguration.set(OMConfigKeys - .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "false"); OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( omConfiguration, null); @@ -788,7 +1077,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_ONE) .setObjectID(BUCKET_ONE_OBJECT_ID) .setQuotaInBytes(BUCKET_ONE_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getOBSBucketLayout()) .build(); OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() @@ -796,7 +1085,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_TWO) .setObjectID(BUCKET_TWO_OBJECT_ID) .setQuotaInBytes(BUCKET_TWO_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getOBSBucketLayout()) .build(); OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() @@ -804,7 +1093,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_THREE) .setObjectID(BUCKET_THREE_OBJECT_ID) .setQuotaInBytes(BUCKET_THREE_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getLegacyBucketLayout()) .build(); OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() @@ -812,7 +1101,7 @@ private static OMMetadataManager initializeNewOmMetadataManager( .setBucketName(BUCKET_FOUR) .setObjectID(BUCKET_FOUR_OBJECT_ID) .setQuotaInBytes(BUCKET_FOUR_QUOTA) - .setBucketLayout(getBucketLayout()) + .setBucketLayout(getLegacyBucketLayout()) .build(); String bucketKey = omMetadataManager.getBucketKey( @@ -847,7 +1136,7 @@ private void setUpMultiBlockKey() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup), - getBucketLayout(), + getOBSBucketLayout(), FILE_THREE_SIZE); } @@ -920,7 +1209,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_ONE_SIZE); //vol/bucket1/file2 @@ -934,7 +1223,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getOBSBucketLayout(), FILE_TWO_SIZE); //vol/bucket1/file3 @@ -948,7 +1237,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_ONE_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_THREE_SIZE); //vol/bucket2/file4 @@ -962,7 +1251,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getOBSBucketLayout(), FILE_FOUR_SIZE); //vol/bucket2/file5 @@ -976,7 +1265,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_TWO_OBJECT_ID, VOL_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getOBSBucketLayout(), FILE_FIVE_SIZE); //vol2/bucket3/file8 @@ -990,7 +1279,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getLegacyBucketLayout(), FILE_EIGHT_SIZE); //vol2/bucket3/file9 @@ -1004,7 +1293,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getLegacyBucketLayout(), FILE_NINE_SIZE); //vol2/bucket3/file10 @@ -1018,7 +1307,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_THREE_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup2), - getBucketLayout(), + getLegacyBucketLayout(), FILE_TEN_SIZE); //vol2/bucket4/file11 @@ -1032,7 +1321,7 @@ private void setUpMultiBlockReplicatedKeys() throws IOException { BUCKET_FOUR_OBJECT_ID, VOL_TWO_OBJECT_ID, Collections.singletonList(locationInfoGroup1), - getBucketLayout(), + getLegacyBucketLayout(), FILE_ELEVEN_SIZE); } @@ -1115,10 +1404,14 @@ private static ReconStorageContainerManagerFacade getMockReconSCM() return reconSCM; } - private static BucketLayout getBucketLayout() { + private static BucketLayout getOBSBucketLayout() { return BucketLayout.OBJECT_STORE; } + private static BucketLayout getLegacyBucketLayout() { + return BucketLayout.LEGACY; + } + private static SCMNodeStat getMockSCMRootStat() { return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java index 0414b8715c83..8b35bfdd4d2a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/common/CommonUtils.java @@ -40,7 +40,6 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -64,10 +63,14 @@ private OmPrefixInfo getOmPrefixInfoForTest( String identityString, IAccessAuthorizer.ACLType aclType, OzoneAcl.AclScope scope) { - return new OmPrefixInfo(path, - new ArrayList<>(Collections.singletonList(new OzoneAcl( + return OmPrefixInfo.newBuilder() + .setName(path) + .setAcls(new ArrayList<>(Collections.singletonList(new OzoneAcl( identityType, identityString, - scope, aclType))), new HashMap<>(), 10, 100); + scope, aclType)))) + .setObjectID(10) + .setUpdateID(100) + .build(); } public void testNSSummaryBasicInfoRoot( diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java new file mode 100644 index 000000000000..db4803676390 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java @@ -0,0 +1,554 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Test for NSSummaryTaskWithLegacy focusing on the OBS (Object Store) layout. + */ +public final class TestNSSummaryTaskWithLegacyOBSLayout { + + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static OzoneConfiguration ozoneConfiguration; + private static NSSummaryTaskWithLegacy nSSummaryTaskWithLegacy; + + private static OMMetadataManager omMetadataManager; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "/////key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithLegacyOBSLayout() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, + false); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithLegacy process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithLegacy.reprocessWithLegacy(reconOMMetadataManager); + nSSummaryTaskWithLegacy.processWithLegacy(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.LEGACY; + } +} diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 18bbd906a0b1..5956e92476a8 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -165,6 +165,11 @@ hdds-test-utils test + + org.mockito + mockito-inline + test + diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 81e49d64f7ca..26e51a6d6661 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -126,6 +126,7 @@ import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.PRECOND_FAILED; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.CONTENT_RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; @@ -135,6 +136,7 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_HEADER_SUPPORTED_UNIT; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CopyDirective; import static org.apache.hadoop.ozone.s3.util.S3Utils.urlDecode; /** @@ -217,13 +219,14 @@ public Response put( @HeaderParam("Content-Length") long length, @QueryParam("partNumber") int partNumber, @QueryParam("uploadId") @DefaultValue("") String uploadID, - InputStream body) throws IOException, OS3Exception { + final InputStream body) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); S3GAction s3GAction = S3GAction.CREATE_KEY; boolean auditSuccess = true; PerformanceStringBuilder perf = new PerformanceStringBuilder(); String copyHeader = null, storageType = null; + DigestInputStream digestInputStream = null; try { OzoneVolume volume = getVolume(); if (uploadID != null && !uploadID.equals("")) { @@ -273,7 +276,9 @@ public Response put( boolean hasAmzDecodedLengthZero = amzDecodedLength != null && Long.parseLong(amzDecodedLength) == 0; if (canCreateDirectory && - (length == 0 || hasAmzDecodedLengthZero)) { + (length == 0 || hasAmzDecodedLengthZero) && + StringUtils.endsWith(keyPath, "/") + ) { s3GAction = S3GAction.CREATE_DIRECTORY; getClientProtocol() .createDirectory(volume.getName(), bucketName, keyPath); @@ -297,11 +302,11 @@ public Response put( if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong(amzDecodedLength); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } long putLength; @@ -310,7 +315,7 @@ public Response put( perf.appendStreamMode(); Pair keyWriteResult = ObjectEndpointStreaming .put(bucket, keyPath, length, replicationConfig, chunkSize, - customMetadata, (DigestInputStream) body, perf); + customMetadata, digestInputStream, perf); eTag = keyWriteResult.getKey(); putLength = keyWriteResult.getValue(); } else { @@ -320,9 +325,9 @@ public Response put( long metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); - putLength = IOUtils.copyLarge(body, output); + putLength = IOUtils.copyLarge(digestInputStream, output); eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase(); output.getMetadata().put(ETAG, eTag); } @@ -367,6 +372,11 @@ public Response put( } throw ex; } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } if (auditSuccess) { long opLatencyNs = getMetrics().updateCreateKeySuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); @@ -879,20 +889,21 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:ParameterNumber"}) private Response createMultipartKey(OzoneVolume volume, String bucket, String key, long length, int partNumber, String uploadID, - InputStream body, PerformanceStringBuilder perf) + final InputStream body, PerformanceStringBuilder perf) throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String copyHeader = null; + DigestInputStream digestInputStream = null; try { if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD" .equals(headers.getHeaderString("x-amz-content-sha256"))) { - body = new DigestInputStream(new SignedChunksInputStream(body), - E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(new SignedChunksInputStream(body), + getMessageDigestInstance()); length = Long.parseLong( headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)); } else { - body = new DigestInputStream(body, E_TAG_PROVIDER.get()); + digestInputStream = new DigestInputStream(body, getMessageDigestInstance()); } copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER); @@ -912,7 +923,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, perf.appendStreamMode(); return ObjectEndpointStreaming .createMultipartKey(ozoneBucket, key, length, partNumber, - uploadID, chunkSize, (DigestInputStream) body, perf); + uploadID, chunkSize, digestInputStream, perf); } // OmMultipartCommitUploadPartInfo can only be gotten after the // OzoneOutputStream is closed, so we need to save the KeyOutputStream @@ -993,10 +1004,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, partNumber, uploadID)) { metadataLatencyNs = getMetrics().updatePutKeyMetadataStats(startNanos); - putLength = IOUtils.copyLarge(body, ozoneOutputStream); + putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream); ((KeyMetadataAware)ozoneOutputStream.getOutputStream()) .getMetadata().put(ETAG, DatatypeConverter.printHexBinary( - ((DigestInputStream) body).getMessageDigest().digest()) + digestInputStream.getMessageDigest().digest()) .toLowerCase()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); @@ -1042,6 +1053,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, throw os3Exception; } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (digestInputStream != null) { + digestInputStream.getMessageDigest().reset(); + } } } @@ -1122,21 +1139,20 @@ public void setContext(ContainerRequestContext context) { } @SuppressWarnings("checkstyle:ParameterNumber") - void copy(OzoneVolume volume, InputStream src, long srcKeyLen, + void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, String destKey, String destBucket, ReplicationConfig replication, Map metadata, PerformanceStringBuilder perf, long startNanos) throws IOException { long copyLength; - src = new DigestInputStream(src, E_TAG_PROVIDER.get()); if (datastreamEnabled && !(replication != null && replication.getReplicationType() == EC) && srcKeyLen > datastreamMinLength) { perf.appendStreamMode(); copyLength = ObjectEndpointStreaming .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, - chunkSize, replication, metadata, (DigestInputStream) src, perf, startNanos); + chunkSize, replication, metadata, src, perf, startNanos); } else { try (OzoneOutputStream dest = getClientProtocol() .createKey(volume.getName(), destBucket, destKey, srcKeyLen, @@ -1145,9 +1161,7 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); - String eTag = DatatypeConverter.printHexBinary( - ((DigestInputStream) src).getMessageDigest().digest()) - .toLowerCase(); + String eTag = DatatypeConverter.printHexBinary(src.getMessageDigest().digest()).toLowerCase(); dest.getMetadata().put(ETAG, eTag); } } @@ -1166,6 +1180,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); + DigestInputStream sourceDigestInputStream = null; try { OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( volume.getName(), sourceBucket, sourceKey); @@ -1196,11 +1211,29 @@ private CopyObjectResponse copyObject(OzoneVolume volume, } long sourceKeyLen = sourceKeyDetails.getDataSize(); + // Custom metadata in copyObject with metadata directive + Map customMetadata; + String metadataCopyDirective = headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER); + if (StringUtils.isEmpty(metadataCopyDirective) || metadataCopyDirective.equals(CopyDirective.COPY.name())) { + // The custom metadata will be copied from the source key + customMetadata = sourceKeyDetails.getMetadata(); + } else if (metadataCopyDirective.equals(CopyDirective.REPLACE.name())) { + // Replace the metadata with the metadata form the request headers + customMetadata = getCustomMetadataFromHeaders(headers.getRequestHeaders()); + } else { + OS3Exception ex = newError(INVALID_ARGUMENT, metadataCopyDirective); + ex.setErrorMessage("An error occurred (InvalidArgument) " + + "when calling the CopyObject operation: " + + "The metadata directive specified is invalid. Valid values are COPY or REPLACE."); + throw ex; + } + try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), sourceBucket, sourceKey)) { getMetrics().updateCopyKeyMetadataStats(startNanos); - copy(volume, src, sourceKeyLen, destkey, destBucket, replicationConfig, - sourceKeyDetails.getMetadata(), perf, startNanos); + sourceDigestInputStream = new DigestInputStream(src, getMessageDigestInstance()); + copy(volume, sourceDigestInputStream, sourceKeyLen, destkey, destBucket, replicationConfig, + customMetadata, perf, startNanos); } final OzoneKeyDetails destKeyDetails = getClientProtocol().getKeyDetails( @@ -1221,6 +1254,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, destBucket + "/" + destkey, ex); } throw ex; + } finally { + // Reset the thread-local message digest instance in case of exception + // and MessageDigest#digest is never called + if (sourceDigestInputStream != null) { + sourceDigestInputStream.getMessageDigest().reset(); + } } } @@ -1321,4 +1360,9 @@ private String wrapInQuotes(String value) { return "\"" + value + "\""; } + @VisibleForTesting + public MessageDigest getMessageDigestInstance() { + return E_TAG_PROVIDER.get(); + } + } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java index df3d01936b18..3b38ff03c420 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3Consts.java @@ -62,10 +62,20 @@ private S3Consts() { public static final String S3_XML_NAMESPACE = "http://s3.amazonaws" + ".com/doc/2006-03-01/"; + // Constants related to custom metadata public static final String CUSTOM_METADATA_HEADER_PREFIX = "x-amz-meta-"; + public static final String CUSTOM_METADATA_COPY_DIRECTIVE_HEADER = "x-amz-metadata-directive"; public static final String DECODED_CONTENT_LENGTH_HEADER = "x-amz-decoded-content-length"; + /** + * Copy directive for metadata and tags. + */ + public enum CopyDirective { + COPY, // Default directive + REPLACE + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index d9b834c3186d..0400bc60500c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -482,7 +482,7 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, @Override public void createDirectory(String volumeName, String bucketName, String keyName) throws IOException { - + getBucket(volumeName, bucketName).createDirectory(keyName); } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 0cbe0781c4ba..d272360fc3cd 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -36,12 +36,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.hadoop.hdds.client.DefaultReplicationConfig; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.OzoneAcl; @@ -55,6 +54,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; @@ -63,7 +64,9 @@ /** * In-memory ozone bucket for testing. */ -public class OzoneBucketStub extends OzoneBucket { +public final class OzoneBucketStub extends OzoneBucket { + + private static final Logger LOG = LoggerFactory.getLogger(OzoneBucketStub.class); private Map keyDetails = new HashMap<>(); @@ -80,7 +83,7 @@ public static Builder newBuilder() { return new Builder(); } - public OzoneBucketStub(Builder b) { + private OzoneBucketStub(Builder b) { super(b); this.replicationConfig = super.getReplicationConfig(); } @@ -93,43 +96,6 @@ public static final class Builder extends OzoneBucket.Builder { private Builder() { } - @Override - public Builder setVolumeName(String volumeName) { - super.setVolumeName(volumeName); - return this; - } - - @Override - public Builder setName(String name) { - super.setName(name); - return this; - } - - @Override - public Builder setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - super.setDefaultReplicationConfig(defaultReplicationConfig); - return this; - } - - @Override - public Builder setStorageType(StorageType storageType) { - super.setStorageType(storageType); - return this; - } - - @Override - public Builder setVersioning(Boolean versioning) { - super.setVersioning(versioning); - return this; - } - - @Override - public Builder setCreationTime(long creationTime) { - super.setCreationTime(creationTime); - return this; - } - @Override public OzoneBucketStub build() { return new OzoneBucketStub(this); @@ -149,31 +115,16 @@ public OzoneOutputStream createKey(String key, long size, ReplicationFactor factor, Map metadata) throws IOException { - ByteArrayOutputStream byteArrayOutputStream = - new ByteArrayOutputStream((int) size) { - @Override - public void close() throws IOException { - keyContents.put(key, toByteArray()); - keyDetails.put(key, new OzoneKeyDetails( - getVolumeName(), - getName(), - key, - size, - System.currentTimeMillis(), - System.currentTimeMillis(), - new ArrayList<>(), replicationConfig, metadata, null, - () -> readKey(key), true - )); - super.close(); - } - }; - return new OzoneOutputStream(byteArrayOutputStream, null); + ReplicationConfig replication = ReplicationConfig.fromTypeAndFactor(type, factor); + return createKey(key, size, replication, metadata); } @Override public OzoneOutputStream createKey(String key, long size, ReplicationConfig rConfig, Map metadata) throws IOException { + assertDoesNotExist(key + "/"); + final ReplicationConfig repConfig; if (rConfig == null) { repConfig = getReplicationConfig(); @@ -208,6 +159,8 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, ReplicationConfig rConfig, Map keyMetadata) throws IOException { + assertDoesNotExist(key + "/"); + ByteBufferStreamOutput byteBufferStreamOutput = new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { @@ -611,6 +564,9 @@ public ReplicationConfig getReplicationConfig() { @Override public void createDirectory(String keyName) throws IOException { + assertDoesNotExist(StringUtils.stripEnd(keyName, "/")); + + LOG.info("createDirectory({})", keyName); keyDetails.put(keyName, new OzoneKeyDetails( getVolumeName(), getName(), @@ -622,6 +578,12 @@ public void createDirectory(String keyName) throws IOException { () -> readKey(keyName), false)); } + private void assertDoesNotExist(String keyName) throws OMException { + if (keyDetails.get(keyName) != null) { + throw new OMException("already exists", ResultCodes.FILE_ALREADY_EXISTS); + } + } + /** * ByteArrayOutputStream stub with metadata. */ diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java index 9fab5a181b56..4ce18b41f1cf 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneVolumeStub.java @@ -38,17 +38,17 @@ /** * Ozone volume with in-memory state for testing. */ -public class OzoneVolumeStub extends OzoneVolume { +public final class OzoneVolumeStub extends OzoneVolume { - private Map buckets = new HashMap<>(); + private final Map buckets = new HashMap<>(); - private ArrayList aclList = new ArrayList<>(); + private final ArrayList aclList = new ArrayList<>(); public static Builder newBuilder() { return new Builder(); } - public OzoneVolumeStub(Builder b) { + private OzoneVolumeStub(Builder b) { super(b); } @@ -124,6 +124,7 @@ public void createBucket(String bucketName, BucketArgs bucketArgs) { .setDefaultReplicationConfig(new DefaultReplicationConfig( RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE))) + .setBucketLayout(bucketArgs.getBucketLayout()) .setStorageType(bucketArgs.getStorageType()) .setVersioning(bucketArgs.getVersioning()) .setCreationTime(Time.now()) diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 0daa666ae4c7..abae489b4135 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -23,31 +23,43 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; +import java.util.stream.Stream; +import java.io.OutputStream; +import java.security.MessageDigest; import javax.ws.rs.core.HttpHeaders; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; -import org.apache.hadoop.ozone.client.protocol.ClientProtocol; -import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.http.HttpStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_COPY_DIRECTIVE_HEADER; +import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; import static org.apache.hadoop.ozone.s3.util.S3Consts.DECODED_CONTENT_LENGTH_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -56,11 +68,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -68,110 +80,104 @@ /** * Test put object. */ -public class TestObjectPut { - public static final String CONTENT = "0123456789"; - private String bucketName = "b1"; - private String keyName = "key=value/1"; - private String destBucket = "b2"; - private String destkey = "key=value/2"; - private String nonexist = "nonexist"; +class TestObjectPut { + private static final String CONTENT = "0123456789"; + private static final String FSO_BUCKET_NAME = "fso-bucket"; + private static final String BUCKET_NAME = "b1"; + private static final String KEY_NAME = "key=value/1"; + private static final String DEST_BUCKET_NAME = "b2"; + private static final String DEST_KEY = "key=value/2"; + private static final String NO_SUCH_BUCKET = "nonexist"; + private OzoneClient clientStub; private ObjectEndpoint objectEndpoint; + private HttpHeaders headers; + private OzoneBucket bucket; + private OzoneBucket fsoBucket; + + static Stream argumentsForPutObject() { + ReplicationConfig ratis3 = RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE); + ECReplicationConfig ec = new ECReplicationConfig("rs-3-2-1024K"); + return Stream.of( + Arguments.of(0, ratis3), + Arguments.of(10, ratis3), + Arguments.of(0, ec), + Arguments.of(10, ec) + ); + } @BeforeEach - public void setup() throws IOException { + void setup() throws IOException { + OzoneConfiguration config = new OzoneConfiguration(); + //Create client stub and object store stub. clientStub = new OzoneClientStub(); // Create bucket - clientStub.getObjectStore().createS3Bucket(bucketName); - clientStub.getObjectStore().createS3Bucket(destBucket); + clientStub.getObjectStore().createS3Bucket(BUCKET_NAME); + bucket = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME); + clientStub.getObjectStore().createS3Bucket(DEST_BUCKET_NAME); // Create PutObject and setClient to OzoneClientStub - objectEndpoint = new ObjectEndpoint(); + objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); - objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setOzoneConfiguration(config); + + headers = mock(HttpHeaders.class); + objectEndpoint.setHeaders(headers); + + String volumeName = config.get(OzoneConfigKeys.OZONE_S3_VOLUME_NAME, + OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT); + OzoneVolume volume = clientStub.getObjectStore().getVolume(volumeName); + BucketArgs fsoBucketArgs = BucketArgs.newBuilder() + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .build(); + volume.createBucket(FSO_BUCKET_NAME, fsoBucketArgs); + fsoBucket = volume.getBucket(FSO_BUCKET_NAME); } - @Test - public void testPutObject() throws IOException, OS3Exception { + @ParameterizedTest + @MethodSource("argumentsForPutObject") + void testPutObject(int length, ReplicationConfig replication) throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); + final String content = RandomStringUtils.randomAlphanumeric(length); + ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes(UTF_8)); + bucket.setReplicationConfig(replication); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, length, 1, null, body); //THEN - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); - assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); - } - @Test - public void testPutObjectWithECReplicationConfig() - throws IOException, OS3Exception { - //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - ByteArrayInputStream body = - new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - ECReplicationConfig ecReplicationConfig = - new ECReplicationConfig("rs-3-2-1024K"); - clientStub.getObjectStore().getS3Bucket(bucketName) - .setReplicationConfig(ecReplicationConfig); - Response response = objectEndpoint.put(bucketName, keyName, CONTENT - .length(), 1, null, body); - - assertEquals(ecReplicationConfig, - clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName) - .getReplicationConfig()); - OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + String keyContent; + try (InputStream input = bucket.readKey(KEY_NAME)) { + keyContent = IOUtils.toString(input, UTF_8); + } + assertEquals(content, keyContent); - assertEquals(200, response.getStatus()); - assertEquals(CONTENT, keyContent); + OzoneKeyDetails keyDetails = bucket.getKey(KEY_NAME); + assertEquals(replication, keyDetails.getReplicationConfig()); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test - public void testPutObjectContentLength() throws IOException, OS3Exception { + void testPutObjectContentLength() throws IOException, OS3Exception { // The contentLength specified when creating the Key should be the same as // the Content-Length, the key Commit will compare the Content-Length with // the actual length of the data written. - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); long dataSize = CONTENT.length(); - objectEndpoint.put(bucketName, keyName, dataSize, 0, null, body); - assertEquals(dataSize, getKeyDataSize(keyName)); + objectEndpoint.put(BUCKET_NAME, KEY_NAME, dataSize, 0, null, body); + assertEquals(dataSize, getKeyDataSize()); } @Test - public void testPutObjectContentLengthForStreaming() + void testPutObjectContentLengthForStreaming() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -182,22 +188,19 @@ public void testPutObjectContentLengthForStreaming() when(headers.getHeaderString(DECODED_CONTENT_LENGTH_HEADER)) .thenReturn("15"); - objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 0, null, + objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 0, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); - assertEquals(15, getKeyDataSize(keyName)); + assertEquals(15, getKeyDataSize()); } - private long getKeyDataSize(String key) throws IOException { - return clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(key).getDataSize(); + private long getKeyDataSize() throws IOException { + return clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME).getDataSize(); } @Test - public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { + void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //GIVEN - HttpHeaders headers = mock(HttpHeaders.class); - objectEndpoint.setHeaders(headers); - String chunkedContent = "0a;chunk-signature=signature\r\n" + "1234567890\r\n" + "05;chunk-signature=signature\r\n" @@ -209,219 +212,296 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { .thenReturn("15"); //WHEN - Response response = objectEndpoint.put(bucketName, keyName, + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, chunkedContent.length(), 1, null, new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN OzoneInputStream ozoneInputStream = - clientStub.getObjectStore().getS3Bucket(bucketName) - .readKey(keyName); + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); } @Test - public void testCopyObject() throws IOException, OS3Exception { + public void testPutObjectMessageDigestResetDuringException() throws OS3Exception { + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // For example, EOFException during put-object due to client cancelling the operation before it completes + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + try { + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT + .length(), 1, null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + + @Test + void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket - HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; - Response response = objectEndpoint.put(bucketName, keyName, + // Add some custom metadata + MultivaluedMap metadataHeaders = new MultivaluedHashMap<>(); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1", "custom-value-1"); + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2", "custom-value-2"); + when(headers.getRequestHeaders()).thenReturn(metadataHeaders); + // Add COPY metadata directive (default) + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body); OzoneInputStream ozoneInputStream = clientStub.getObjectStore() - .getS3Bucket(bucketName) - .readKey(keyName); + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); - OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + assertThat(keyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(keyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); + // This will be ignored since the copy directive is COPY + metadataHeaders.putSingle(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-3", "custom-value-3"); + // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); - response = objectEndpoint.put(destBucket, destkey, CONTENT.length(), 1, + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body); // Check destination key and response - ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) - .readKey(destkey); + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() - .getS3Bucket(bucketName).getKey(keyName); + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() - .getS3Bucket(destBucket).getKey(destkey); + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); assertNotNull(keyDetails.getMetadata()); - assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); // Source key eTag should remain unchanged and the dest key should have // the same Etag since the key content is the same assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().get("custom-key-1")).isEqualTo("custom-value-1"); + assertThat(destKeyDetails.getMetadata().get("custom-key-2")).isEqualTo("custom-value-2"); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-3")).isFalse(); - // source and dest same + // Now use REPLACE metadata directive (default) and remove some custom metadata used in the source key + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("REPLACE"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-1"); + metadataHeaders.remove(CUSTOM_METADATA_HEADER_PREFIX + "custom-key-2"); + + response = objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); + + ozoneInputStream = clientStub.getObjectStore().getS3Bucket(DEST_BUCKET_NAME) + .readKey(DEST_KEY); + + keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(DEST_BUCKET_NAME).getKey(DEST_KEY); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-1")).isFalse(); + assertThat(destKeyDetails.getMetadata().containsKey("custom-key-2")).isFalse(); + assertThat(destKeyDetails.getMetadata().get("custom-key-3")).isEqualTo("custom-value-3"); + + + // wrong copy metadata directive + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("INVALID"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body), + DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, null, body), + "test copy object failed"); + assertThat(e.getHttpCode()).isEqualTo(400); + assertThat(e.getCode()).isEqualTo("InvalidArgument"); + assertThat(e.getErrorMessage()).contains("The metadata directive specified is invalid"); + + when(headers.getHeaderString(CUSTOM_METADATA_COPY_DIRECTIVE_HEADER)).thenReturn("COPY"); + + // source and dest same + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getErrorMessage()).contains("This copy request is illegal"); // source bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(destBucket, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(DEST_BUCKET_NAME, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); //Both source and dest bucket not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - nonexist + "/" + urlEncode(keyName)); - e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(nonexist, - destkey, CONTENT.length(), 1, null, body), "test copy object failed"); + NO_SUCH_BUCKET + "/" + urlEncode(KEY_NAME)); + e = assertThrows(OS3Exception.class, () -> objectEndpoint.put(NO_SUCH_BUCKET, + DEST_KEY, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); // source key not found when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( - bucketName + "/" + urlEncode(nonexist)); + BUCKET_NAME + "/" + urlEncode(NO_SUCH_BUCKET)); e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - "nonexistent", keyName, CONTENT.length(), 1, null, body), + "nonexistent", KEY_NAME, CONTENT.length(), 1, null, body), "test copy object failed"); assertThat(e.getCode()).contains("NoSuchBucket"); } @Test - public void testInvalidStorageType() throws IOException { - HttpHeaders headers = mock(HttpHeaders.class); + public void testCopyObjectMessageDigestResetDuringException() throws IOException, OS3Exception { + // Put object in to source bucket + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + + Response response = objectEndpoint.put(BUCKET_NAME, KEY_NAME, + CONTENT.length(), 1, null, body); + + OzoneInputStream ozoneInputStream = clientStub.getObjectStore() + .getS3Bucket(BUCKET_NAME) + .readKey(KEY_NAME); + + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(BUCKET_NAME).getKey(KEY_NAME); + + assertEquals(200, response.getStatus()); + assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertThat(keyDetails.getMetadata().get(OzoneConsts.ETAG)).isNotEmpty(); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + // Add copy header, and then call put + when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( + BUCKET_NAME + "/" + urlEncode(KEY_NAME)); + + try { + objectEndpoint.put(DEST_BUCKET_NAME, DEST_KEY, CONTENT.length(), 1, + null, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + + @Test + void testInvalidStorageType() { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( - bucketName, keyName, CONTENT.length(), 1, null, body)); + BUCKET_NAME, KEY_NAME, CONTENT.length(), 1, null, body)); assertEquals(S3ErrorTable.INVALID_ARGUMENT.getErrorMessage(), e.getErrorMessage()); assertEquals("random", e.getResource()); } @Test - public void testEmptyStorageType() throws IOException, OS3Exception { - HttpHeaders headers = mock(HttpHeaders.class); + void testEmptyStorageType() throws IOException, OS3Exception { ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); - objectEndpoint.setHeaders(headers); - keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(BUCKET_NAME, KEY_NAME, CONTENT .length(), 1, null, body); OzoneKeyDetails key = - clientStub.getObjectStore().getS3Bucket(bucketName) - .getKey(keyName); - + clientStub.getObjectStore().getS3Bucket(BUCKET_NAME) + .getKey(KEY_NAME); //default type is set - assertEquals(ReplicationType.RATIS, key.getReplicationType()); + assertEquals( + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE), + key.getReplicationConfig()); } @Test - public void testDirectoryCreation() throws IOException, + void testDirectoryCreation() throws IOException, OS3Exception { // GIVEN - final String path = "dir"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; - final InputStream body = null; - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + final String path = "dir/"; // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - final Response response = objEndpoint.put(bucketName, path, length, - partNumber, uploadId, body); + try (Response response = objectEndpoint.put(fsoBucket.getName(), path, + 0L, 0, "", null)) { + assertEquals(HttpStatus.SC_OK, response.getStatus()); + } // THEN - assertEquals(HttpStatus.SC_OK, response.getStatus()); - verify(protocol).createDirectory(any(), eq(bucketName), eq(path)); + OzoneKeyDetails key = fsoBucket.getKey(path); + assertThat(key.isFile()).as("directory").isFalse(); } @Test - public void testDirectoryCreationOverFile() throws IOException { + void testDirectoryCreationOverFile() throws IOException, OS3Exception { // GIVEN final String path = "key"; - final long length = 0L; - final int partNumber = 0; - final String uploadId = ""; final ByteArrayInputStream body = - new ByteArrayInputStream("content".getBytes(UTF_8)); - final HttpHeaders headers = mock(HttpHeaders.class); - final ObjectEndpoint objEndpoint = new ObjectEndpoint(); - objEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - objEndpoint.setHeaders(headers); - final OzoneClient client = mock(OzoneClient.class); - objEndpoint.setClient(client); - final ObjectStore objectStore = mock(ObjectStore.class); - final OzoneVolume volume = mock(OzoneVolume.class); - final OzoneBucket bucket = mock(OzoneBucket.class); - final ClientProtocol protocol = mock(ClientProtocol.class); + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); + objectEndpoint.put(FSO_BUCKET_NAME, path, CONTENT.length(), 0, "", body); // WHEN - when(client.getObjectStore()).thenReturn(objectStore); - when(client.getObjectStore().getS3Volume()).thenReturn(volume); - when(volume.getBucket(bucketName)).thenReturn(bucket); - when(bucket.getBucketLayout()) - .thenReturn(BucketLayout.FILE_SYSTEM_OPTIMIZED); - when(client.getProxy()).thenReturn(protocol); - doThrow(new OMException(OMException.ResultCodes.FILE_ALREADY_EXISTS)) - .when(protocol) - .createDirectory(any(), any(), any()); + final OS3Exception exception = assertThrows(OS3Exception.class, + () -> objectEndpoint + .put(FSO_BUCKET_NAME, path + "/", 0, 0, "", null) + .close()); // THEN - final OS3Exception exception = assertThrows(OS3Exception.class, - () -> objEndpoint - .put(bucketName, path, length, partNumber, uploadId, body)); - assertEquals("Conflict", exception.getCode()); - assertEquals(409, exception.getHttpCode()); - verify(protocol, times(1)).createDirectory(any(), any(), any()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getCode(), exception.getCode()); + assertEquals(S3ErrorTable.NO_OVERWRITE.getHttpCode(), exception.getHttpCode()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index bb1b7037bd9a..aecc56fe172b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; @@ -28,12 +29,16 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.mockito.MockedStatic; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.security.MessageDigest; import java.util.UUID; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -44,7 +49,13 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; /** @@ -194,6 +205,53 @@ public void testPartUploadContentLength() throws IOException, OS3Exception { assertContentLength(uploadID, keyName, content.length()); } + @Test + public void testPartUploadMessageDigestResetDuringException() throws IOException, OS3Exception { + OzoneClient clientStub = new OzoneClientStub(); + clientStub.getObjectStore().createS3Bucket(OzoneConsts.S3_BUCKET); + + + HttpHeaders headers = mock(HttpHeaders.class); + when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( + "STANDARD"); + + ObjectEndpoint objectEndpoint = spy(new ObjectEndpoint()); + + objectEndpoint.setHeaders(headers); + objectEndpoint.setClient(clientStub); + objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + + Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, + OzoneConsts.KEY); + MultipartUploadInitiateResponse multipartUploadInitiateResponse = + (MultipartUploadInitiateResponse) response.getEntity(); + assertNotNull(multipartUploadInitiateResponse.getUploadID()); + String uploadID = multipartUploadInitiateResponse.getUploadID(); + + assertEquals(200, response.getStatus()); + + MessageDigest messageDigest = mock(MessageDigest.class); + try (MockedStatic mocked = mockStatic(IOUtils.class)) { + // Add the mocked methods only during the copy request + when(objectEndpoint.getMessageDigestInstance()).thenReturn(messageDigest); + mocked.when(() -> IOUtils.copyLarge(any(InputStream.class), any(OutputStream.class))) + .thenThrow(IOException.class); + + String content = "Multipart Upload"; + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); + try { + objectEndpoint.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + content.length(), 1, uploadID, body); + fail("Should throw IOException"); + } catch (IOException ignored) { + // Verify that the message digest is reset so that the instance can be reused for the + // next request in the same thread + verify(messageDigest, times(1)).reset(); + } + } + } + private void assertContentLength(String uploadID, String key, long contentLength) throws IOException { OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 0585fea000c9..3e7214ce988b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -17,18 +17,16 @@ */ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.internal.LinkedTreeMap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.shell.ListOptions; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; - +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -101,21 +99,20 @@ public Void call() throws Exception { return null; } - HashMap duResponse = getResponseMap(response); + JsonNode duResponse = JsonUtils.readTree(response); - if (duResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(duResponse.path("status").asText(""))) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } - long totalSize = (long)(double)duResponse.get("size"); - + long totalSize = duResponse.path("size").asLong(-1); if (!noHeader) { printWithUnderline("Path", false); printKVSeparator(); - System.out.println(duResponse.get("path")); + System.out.println(duResponse.path("path").asText("")); printWithUnderline("Total Size", false); printKVSeparator(); @@ -124,11 +121,11 @@ public Void call() throws Exception { if (withReplica) { printWithUnderline("Total Disk Usage", false); printKVSeparator(); - long du = (long)(double)duResponse.get("sizeWithReplica"); + long du = duResponse.path("sizeWithReplica").asLong(-1); System.out.println(FileUtils.byteCountToDisplaySize(du)); } - long sizeDirectKey = (long)(double)duResponse.get("sizeDirectKey"); + long sizeDirectKey = duResponse.path("sizeDirectKey").asLong(-1); if (!listFiles && sizeDirectKey != -1) { printWithUnderline("Size of Direct Keys", false); printKVSeparator(); @@ -137,7 +134,7 @@ public Void call() throws Exception { printNewLines(1); } - if ((double)duResponse.get("subPathCount") == 0) { + if (duResponse.path("subPathCount").asInt(-1) == 0) { if (totalSize == 0) { // the object is empty System.out.println("The object is empty.\n" + @@ -160,20 +157,19 @@ public Void call() throws Exception { seekStr = ""; } - ArrayList duData = (ArrayList)duResponse.get("subPaths"); + ArrayNode subPaths = (ArrayNode) duResponse.path("subPaths"); int cnt = 0; - for (int i = 0; i < duData.size(); ++i) { + for (JsonNode subPathDU : subPaths) { if (cnt >= limit) { break; } - LinkedTreeMap subPathDU = (LinkedTreeMap) duData.get(i); - String subPath = subPathDU.get("path").toString(); + String subPath = subPathDU.path("path").asText(""); // differentiate key from other types - if (!(boolean)subPathDU.get("isKey")) { + if (!subPathDU.path("isKey").asBoolean(false)) { subPath += OM_KEY_PREFIX; } - long size = (long)(double)subPathDU.get("size"); - long sizeWithReplica = (long)(double)subPathDU.get("sizeWithReplica"); + long size = subPathDU.path("size").asLong(-1); + long sizeWithReplica = subPathDU.path("sizeWithReplica").asLong(-1); if (subPath.startsWith(seekStr)) { printDURow(subPath, size, sizeWithReplica); ++cnt; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index f74ee109504c..0af263dbe31d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +72,11 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap distResponse = getResponseMap(response); + JsonNode distResponse = JsonUtils.readTree(response); - if (distResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(distResponse.path("status").asText())) { printPathNotFound(); - } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(distResponse.path("status").asText())) { printTypeNA("File Size Distribution"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,11 +84,11 @@ public Void call() throws Exception { } printWithUnderline("File Size Distribution", true); - ArrayList fileSizeDist = (ArrayList) distResponse.get("dist"); + JsonNode fileSizeDist = distResponse.path("dist"); double sum = 0; for (int i = 0; i < fileSizeDist.size(); ++i) { - sum += (double) fileSizeDist.get(i); + sum += fileSizeDist.get(i).asDouble(); } if (sum == 0) { printSpaces(2); @@ -100,11 +99,11 @@ public Void call() throws Exception { } for (int i = 0; i < fileSizeDist.size(); ++i) { - if ((double)fileSizeDist.get(i) == 0) { + if (fileSizeDist.get(i).asDouble() == 0) { continue; } String label = convertBinIndexToReadableRange(i); - printDistRow(label, (double) fileSizeDist.get(i), sum); + printDistRow(label, fileSizeDist.get(i).asDouble(), sum); } } printNewLines(1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java index 729aa20c5ce3..9aff2e9999ad 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.Gson; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -31,7 +30,6 @@ import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.util.HashMap; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; @@ -107,10 +105,6 @@ public static String makeHttpCall(StringBuffer url, String path, } } - public static HashMap getResponseMap(String response) { - return new Gson().fromJson(response, HashMap.class); - } - public static void printNewLines(int cnt) { for (int i = 0; i < cnt; ++i) { System.out.println(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index 113193c929b4..1e4e719baf83 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +73,11 @@ public Void call() throws Exception { return null; } - HashMap quotaResponse = getResponseMap(response); + JsonNode quotaResponse = JsonUtils.readTree(response); - if (quotaResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(quotaResponse.path("status").asText())) { printPathNotFound(); - } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(quotaResponse.path("status").asText())) { printTypeNA("Quota"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,8 +85,10 @@ public Void call() throws Exception { } printWithUnderline("Quota", true); - long quotaAllowed = (long)(double)quotaResponse.get("allowed"); - long quotaUsed = (long)(double)quotaResponse.get("used"); + + long quotaAllowed = quotaResponse.get("allowed").asLong(); + long quotaUsed = quotaResponse.get("used").asLong(); + printSpaces(2); System.out.print("Allowed"); printKVSeparator(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 9180274b9c70..d2060b8db526 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -71,9 +71,9 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap summaryResponse = getResponseMap(response); + JsonNode summaryResponse = JsonUtils.readTree(response); - if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(summaryResponse.path("status").asText())) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -83,10 +83,11 @@ public Void call() throws Exception { printWithUnderline("Entity Type", false); printKVSeparator(); System.out.println(summaryResponse.get("type")); - int numVol = ((Double) summaryResponse.get("numVolume")).intValue(); - int numBucket = ((Double) summaryResponse.get("numBucket")).intValue(); - int numDir = ((Double) summaryResponse.get("numDir")).intValue(); - int numKey = ((Double) summaryResponse.get("numKey")).intValue(); + + int numVol = summaryResponse.path("numVolume").asInt(-1); + int numBucket = summaryResponse.path("numBucket").asInt(-1); + int numDir = summaryResponse.path("numDir").asInt(-1); + int numKey = summaryResponse.path("numKey").asInt(-1); if (numVol != -1) { printWithUnderline("Volumes", false); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index 012ab989d522..5c311d49c93f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -24,11 +24,9 @@ import java.util.List; import java.util.Map; import java.util.HashSet; -import com.google.gson.GsonBuilder; -import com.google.gson.Gson; -import com.google.gson.JsonObject; -import com.google.gson.JsonArray; -import com.google.gson.JsonElement; + +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; @@ -40,6 +38,7 @@ import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; @@ -80,13 +79,12 @@ protected void execute(OzoneClient client, OzoneAddress address) XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); address.ensureKeyAddress(); - JsonElement element; - JsonObject result = new JsonObject(); + ObjectNode result = JsonUtils.createObjectNode(null); String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); - List tempchunks = null; - List chunkDetailsList = new ArrayList(); + List tempchunks; + List chunkDetailsList = new ArrayList<>(); HashSet chunkPaths = new HashSet<>(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).build(); @@ -102,7 +100,7 @@ protected void execute(OzoneClient client, OzoneAddress address) } ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion .getConfiguredVersion(getConf()); - JsonArray responseArrayList = new JsonArray(); + ArrayNode responseArrayList = JsonUtils.createArrayNode(); for (OmKeyLocationInfo keyLocation : locationInfos) { ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo(); ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo(); @@ -128,24 +126,17 @@ protected void execute(OzoneClient client, OzoneAddress address) keyLocation.getBlockID().getDatanodeBlockIDProtobuf(); // doing a getBlock on all nodes Map - responses = null; - Map - readContainerResponses = null; - try { - responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, - datanodeBlockID, keyLocation.getToken()); - readContainerResponses = - containerOperationClient.readContainerFromAllNodes( - keyLocation.getContainerID(), pipeline); - } catch (InterruptedException e) { - LOG.error("Execution interrupted due to " + e); - Thread.currentThread().interrupt(); - } - JsonArray responseFromAllNodes = new JsonArray(); - for (Map.Entry - entry : responses.entrySet()) { + responses = + ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, + keyLocation.getBlockID().getDatanodeBlockIDProtobuf(), + keyLocation.getToken()); + Map readContainerResponses = + containerOperationClient.readContainerFromAllNodes( + keyLocation.getContainerID(), pipeline); + ArrayNode responseFromAllNodes = JsonUtils.createArrayNode(); + for (Map.Entry entry : responses.entrySet()) { chunkPaths.clear(); - JsonObject jsonObj = new JsonObject(); + ObjectNode jsonObj = JsonUtils.createObjectNode(null); if (entry.getValue() == null) { LOG.error("Cant execute getBlock on this node"); continue; @@ -177,29 +168,29 @@ protected void execute(OzoneClient client, OzoneAddress address) containerChunkInfoVerbose.setChunkType(blockChunksType); containerChunkInfo.setChunkType(blockChunksType); } - Gson gson = new GsonBuilder().create(); + if (isVerbose()) { - element = gson.toJsonTree(containerChunkInfoVerbose); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfoVerbose)); } else { - element = gson.toJsonTree(containerChunkInfo); + jsonObj.set("Locations", + JsonUtils.createObjectNode(containerChunkInfo)); } - jsonObj.addProperty("Datanode-HostName", entry.getKey() - .getHostName()); - jsonObj.addProperty("Datanode-IP", entry.getKey() - .getIpAddress()); - jsonObj.addProperty("Container-ID", containerId); - jsonObj.addProperty("Block-ID", keyLocation.getLocalID()); - jsonObj.add("Locations", element); + jsonObj.put("Datanode-HostName", entry.getKey().getHostName()); + jsonObj.put("Datanode-IP", entry.getKey().getIpAddress()); + jsonObj.put("Container-ID", containerId); + jsonObj.put("Block-ID", keyLocation.getLocalID()); responseFromAllNodes.add(jsonObj); } responseArrayList.add(responseFromAllNodes); + } catch (InterruptedException e) { + throw new RuntimeException(e); } finally { xceiverClientManager.releaseClientForReadData(xceiverClient, false); } } - result.add("KeyLocations", responseArrayList); - Gson gson2 = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson2.toJson(result); + result.set("KeyLocations", responseArrayList); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); System.out.println(prettyJson); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java index f88e08413d4b..130c1bca0fc8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ContainerChunkInfo.java @@ -32,6 +32,7 @@ public class ContainerChunkInfo { private String containerPath; private List chunkInfos; + private HashSet files; private UUID pipelineID; private Pipeline pipeline; @@ -65,6 +66,27 @@ public void setChunkType(ChunkType chunkType) { this.chunkType = chunkType; } + public String getContainerPath() { + return containerPath; + } + + public List getChunkInfos() { + return chunkInfos; + } + + public HashSet getFiles() { + return files; + } + + public UUID getPipelineID() { + return pipelineID; + } + + public ChunkType getChunkType() { + return chunkType; + } + + @Override public String toString() { return "Container{" diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index ac9fc7854a7c..48ed7c74ae7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -17,14 +17,11 @@ package org.apache.hadoop.ozone.debug; -import com.google.gson.Gson; -import com.google.gson.GsonBuilder; -import com.google.gson.JsonArray; -import com.google.gson.JsonObject; import org.apache.hadoop.hdds.cli.SubcommandWithParent; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; @@ -36,6 +33,8 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; +import com.fasterxml.jackson.databind.node.ArrayNode; +import com.fasterxml.jackson.databind.node.ObjectNode; import jakarta.annotation.Nonnull; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -129,18 +128,17 @@ protected void execute(OzoneClient client, OzoneAddress address) replicasWithoutChecksum = noChecksumClient .getKeysEveryReplicas(volumeName, bucketName, keyName); - JsonObject result = new JsonObject(); - result.addProperty(JSON_PROPERTY_FILE_NAME, + ObjectNode result = JsonUtils.createObjectNode(null); + result.put(JSON_PROPERTY_FILE_NAME, volumeName + "/" + bucketName + "/" + keyName); - result.addProperty(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); + result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); - JsonArray blocks = new JsonArray(); + ArrayNode blocks = JsonUtils.createArrayNode(); downloadReplicasAndCreateManifest(keyName, replicas, replicasWithoutChecksum, dir, blocks); - result.add(JSON_PROPERTY_FILE_BLOCKS, blocks); + result.set(JSON_PROPERTY_FILE_BLOCKS, blocks); - Gson gson = new GsonBuilder().setPrettyPrinting().create(); - String prettyJson = gson.toJson(result); + String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); String manifestFileName = keyName + "_manifest"; System.out.println("Writing manifest file : " + manifestFileName); @@ -158,25 +156,22 @@ private void downloadReplicasAndCreateManifest( Map> replicas, Map> replicasWithoutChecksum, - File dir, JsonArray blocks) throws IOException { + File dir, ArrayNode blocks) throws IOException { int blockIndex = 0; for (Map.Entry> block : replicas.entrySet()) { - JsonObject blockJson = new JsonObject(); - JsonArray replicasJson = new JsonArray(); + ObjectNode blockJson = JsonUtils.createObjectNode(null); + ArrayNode replicasJson = JsonUtils.createArrayNode(); blockIndex += 1; - blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex); + blockJson.put(JSON_PROPERTY_BLOCK_INDEX, blockIndex); OmKeyLocationInfo locationInfo = block.getKey(); - blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, + blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, locationInfo.getContainerID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, - locationInfo.getLocalID()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, - locationInfo.getLength()); - blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, - locationInfo.getOffset()); + blockJson.put(JSON_PROPERTY_BLOCK_LOCALID, locationInfo.getLocalID()); + blockJson.put(JSON_PROPERTY_BLOCK_LENGTH, locationInfo.getLength()); + blockJson.put(JSON_PROPERTY_BLOCK_OFFSET, locationInfo.getOffset()); BlockID blockID = locationInfo.getBlockID(); Map blockReplicasWithoutChecksum = @@ -186,12 +181,10 @@ private void downloadReplicasAndCreateManifest( replica : block.getValue().entrySet()) { DatanodeDetails datanode = replica.getKey(); - JsonObject replicaJson = new JsonObject(); + ObjectNode replicaJson = JsonUtils.createObjectNode(null); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, - datanode.getHostName()); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, - datanode.getUuidString()); + replicaJson.put(JSON_PROPERTY_REPLICA_HOSTNAME, datanode.getHostName()); + replicaJson.put(JSON_PROPERTY_REPLICA_UUID, datanode.getUuidString()); String fileName = keyName + "_block" + blockIndex + "_" + datanode.getHostName(); @@ -202,8 +195,7 @@ private void downloadReplicasAndCreateManifest( Files.copy(is, path, StandardCopyOption.REPLACE_EXISTING); } catch (IOException e) { Throwable cause = e.getCause(); - replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, - e.getMessage()); + replicaJson.put(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage()); if (cause instanceof OzoneChecksumException) { try (InputStream is = getReplica( blockReplicasWithoutChecksum, datanode)) { @@ -213,7 +205,7 @@ private void downloadReplicasAndCreateManifest( } replicasJson.add(replicaJson); } - blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); + blockJson.set(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); blocks.add(blockJson); IOUtils.close(LOG, blockReplicasWithoutChecksum.values()); diff --git a/pom.xml b/pom.xml index c5ab1f5d96e6..90f5667ae2ea 100644 --- a/pom.xml +++ b/pom.xml @@ -94,7 +94,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs UTF-8 UTF-8 - 3.1.0 + 3.2.2 bash @@ -106,8 +106,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 9.4.53.v20231009 5.2.0 4.2.0 - _ - _ 4 @@ -117,10 +115,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.15 3.2.2 1.26.0 - 2.8.0 - 1.5.2-5 + 2.10.1 + 1.5.6-2 1.0.13 - 2.15.1 + 2.16.0 3.14.0 1.2 1.1 @@ -138,14 +136,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.3.2 3.12.2 - 5.0.4 0.8.0.RELEASE 1.77 - 3.3.0 10.14.2.0 3.0.2 3.2.4 - 0.8.11 + 0.8.12 3.21.0-GA 1.2.2 2.3.3 @@ -159,7 +155,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.19.4 - 2.41 + 2.42 1.9.13 @@ -187,7 +183,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.11 4.7.5 0.16.0 - 0.9.11 + 0.10.2 1.7 @@ -198,7 +194,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.1 1.1.1 - 3.1.12 + 3.1.12.2 2.1.7 4.12.0 4.2.2 @@ -210,7 +206,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.2.0 32.0.0-jre 6.0.0 - 2.9.0 + 2.10.1 2.7.5 3.6.0 @@ -247,7 +243,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError - flaky | slow | unhealthy + flaky | native | slow | unhealthy 3.0.0-M4 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} @@ -256,7 +252,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.9.0 3.1.1 3.1.0 - 3.5.1 + 3.5.2 3.3.0 3.4.0 3.3.0 @@ -264,10 +260,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.5 3.4.1 3.0.1 - 3.6.0 + 3.7.1 0.16.1 3.1.1 - 1.9 + 3.5.0 3.6.1 4.2.2 0.44.0 @@ -304,6 +300,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.2.1 3.9.6 1.1.10.5 + 1.2.0 + 9.37.2 @@ -338,44 +336,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-annotations ${hadoop.version} - - org.apache.hadoop - hadoop-client-modules - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-api - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-check-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-check-test-invariants - ${hadoop.version} - pom - - - org.apache.hadoop - hadoop-client-integration-tests - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-runtime - ${hadoop.version} - - - org.apache.hadoop - hadoop-client-minicluster - ${hadoop.version} - org.apache.hadoop hadoop-common @@ -398,11 +358,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-nfs - ${hadoop.version} - org.apache.hadoop hadoop-hdfs @@ -413,237 +368,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-hdfs-client ${hadoop.version} - - org.apache.hadoop - hadoop-hdfs-rbf - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - - - org.apache.hadoop - hadoop-mapreduce-client-app - ${hadoop.version} - test-jar - - - org.apache.hadoop - hadoop-mapreduce-client-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-api - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-core - ${hadoop.version} - - org.apache.hadoop hadoop-mapreduce-client-jobclient ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-client-shuffle - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn - ${hadoop.version} - pom - - - - org.apache.hadoop - hadoop-yarn-server - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-web-proxy - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-tests - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-common - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-registry - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-nodemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - - - org.apache.hadoop - hadoop-yarn-server-resourcemanager - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-applicationhistoryservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-client - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timelineservice-hbase-common - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-applications-distributedshell - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-timeline-pluginstorage - test-jar - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-server-router - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - - - - org.apache.hadoop - hadoop-yarn-services-core - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop.version} - test-jar - - - - org.apache.hadoop - hadoop-mapreduce-client-hs - ${hadoop.version} - - - - org.apache.hadoop - hadoop-mapreduce-examples - ${hadoop.version} - - - org.apache.hadoop - hadoop-gridmix - ${hadoop.version} - - - - org.apache.hadoop - hadoop-streaming - ${hadoop.version} - - - org.apache.hadoop - hadoop-archives - ${hadoop.version} - - - org.apache.hadoop - hadoop-archive-logs - ${hadoop.version} + test org.apache.hadoop @@ -656,70 +385,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - - org.apache.hadoop - hadoop-datajoin - ${hadoop.version} - - - org.apache.hadoop - hadoop-rumen - ${hadoop.version} - - - org.apache.hadoop - hadoop-extras - ${hadoop.version} - - org.apache.hadoop hadoop-client ${hadoop.version} - - - org.apache.hadoop - hadoop-minicluster - ${hadoop.version} - - org.apache.hadoop hadoop-minikdc ${hadoop.version} - - - org.apache.hadoop - hadoop-openstack - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure - ${hadoop.version} - - - - org.apache.hadoop - hadoop-azure-datalake - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aws - ${hadoop.version} - - - - org.apache.hadoop - hadoop-aliyun - ${hadoop.version} - - org.apache.hadoop hadoop-kms @@ -731,7 +406,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} test-jar - com.google.guava guava @@ -940,12 +614,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs jersey-media-jaxb ${jersey2.version} - - - org.ow2.asm - asm - ${asm.version} - com.sun.jersey jersey-core @@ -1000,13 +668,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs bonecp ${bonecp.version} - - - cglib - cglib - ${cglib.version} - - com.sun.jersey.contribs jersey-guice @@ -1460,6 +1121,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.reflections reflections ${reflections.version} + + + com.google.code.findbugs + jsr305 + + org.rocksdb @@ -1546,6 +1213,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs snappy-java ${snappy-java.version} + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + ${hadoop-shaded-guava.version} + + + com.nimbusds + nimbus-jose-jwt + ${com.nimbusds.nimbus-jose-jwt.version} + @@ -2020,11 +1697,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs **/Test*.java - - **/${test.exclude}.java - ${test.exclude.pattern} - **/Test*$*.java - @@ -2284,6 +1956,24 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + container + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.hdds.scm.container.** + org.apache.hadoop.ozone.container.** + + ${unstable-test-groups} + + + + + om @@ -2295,6 +1985,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.ozone.om.** + + org.apache.hadoop.ozone.om.snapshot.** + ${unstable-test-groups} @@ -2302,7 +1995,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - contract + snapshot @@ -2310,7 +2003,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.contract.** + org.apache.hadoop.ozone.om.snapshot.** ${unstable-test-groups} @@ -2327,11 +2020,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin - org.apache.hadoop.fs.ozone.** + org.apache.hadoop.fs.** - - org.apache.hadoop.fs.ozone.contract.** - ${unstable-test-groups} @@ -2349,6 +2039,9 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.hadoop.hdds.** + + org.apache.hadoop.hdds.scm.container.** + ${unstable-test-groups} @@ -2364,13 +2057,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin + org.apache.ozone.** org.apache.hadoop.ozone.** org.apache.hadoop.ozone.client.** + org.apache.hadoop.ozone.container.** org.apache.hadoop.ozone.debug.** org.apache.hadoop.ozone.freon.** org.apache.hadoop.ozone.om.** + org.apache.hadoop.ozone.recon.** org.apache.hadoop.ozone.shell.** ${unstable-test-groups} @@ -2379,6 +2075,23 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + recon + + + + org.apache.maven.plugins + maven-surefire-plugin + + + org.apache.hadoop.ozone.recon.** + + ${unstable-test-groups} + + + + + shell @@ -2421,7 +2134,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin flaky - slow | unhealthy + native | slow | unhealthy @@ -2436,6 +2149,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs maven-surefire-plugin native + slow | unhealthy