diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index 25ea315af284..9469fee7e284 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -154,6 +154,14 @@ public String getReplication() { + chunkKB(); } + /** Similar to {@link #getReplication()}, but applies to proto structure, without any validation. */ + public static String toString(HddsProtos.ECReplicationConfig proto) { + return proto.getCodec() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getData() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getParity() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getEcChunkSize(); + } + public HddsProtos.ECReplicationConfig toProto() { return HddsProtos.ECReplicationConfig.newBuilder() .setData(data) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java index 1dbbc7384321..a10846bd61d0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -74,7 +74,10 @@ public enum HealthState { "OpenUnhealthyContainers"), QUASI_CLOSED_STUCK( "Containers QuasiClosed with insufficient datanode origins", - "StuckQuasiClosedContainers"); + "StuckQuasiClosedContainers"), + OPEN_WITHOUT_PIPELINE( + "Containers in OPEN state without any healthy Pipeline", + "OpenContainersWithoutPipeline"); private String description; private String metricName; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java index ada60397efe4..ab05b34b3fea 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java @@ -112,6 +112,7 @@ public void testJsonOutput() throws IOException { assertEquals(0, stats.get("EMPTY").longValue()); assertEquals(0, stats.get("OPEN_UNHEALTHY").longValue()); assertEquals(0, stats.get("QUASI_CLOSED_STUCK").longValue()); + assertEquals(0, stats.get("OPEN_WITHOUT_PIPELINE").longValue()); JsonNode samples = json.get("samples"); assertEquals(ARRAY, samples.get("UNDER_REPLICATED").getNodeType()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 362c08c6a94b..a501b663d126 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -777,7 +777,7 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data, info.getChecksumData(), 0); + Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -877,9 +877,9 @@ ContainerCommandResponseProto handlePutSmallFile( // chunks will be committed as a part of handling putSmallFile // here. There is no need to maintain this info in openContainerBlockMap. + validateChunkChecksumData(data, chunkInfo); chunkManager .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); - validateChunkChecksumData(data, chunkInfo); chunkManager.finishWriteChunks(kvContainer, blockData); List chunks = new LinkedList<>(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index ee51463309b8..5ceea125e814 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -106,7 +106,6 @@ public static class Builder { private Clock clock; private IntConsumer executorThreadUpdater = threadCount -> { }; - private String threadNamePrefix; public Builder clock(Clock newClock) { clock = newClock; @@ -138,11 +137,6 @@ public Builder executorThreadUpdater(IntConsumer newUpdater) { return this; } - public Builder threadNamePrefix(String threadPrefix) { - this.threadNamePrefix = threadPrefix; - return this; - } - public ReplicationSupervisor build() { if (replicationConfig == null || datanodeConfig == null) { ConfigurationSource conf = new OzoneConfiguration(); @@ -162,6 +156,7 @@ public ReplicationSupervisor build() { if (executor == null) { LOG.info("Initializing replication supervisor with thread count = {}", replicationConfig.getReplicationMaxStreams()); + String threadNamePrefix = context != null ? context.getThreadNamePrefix() : ""; ThreadFactory threadFactory = new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(threadNamePrefix + "ContainerReplicationThread-%d") diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 3d4495d1d5ba..d87f555b947a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -21,6 +21,7 @@ import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; @@ -40,6 +41,8 @@ import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChecksumData; +import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; @@ -47,6 +50,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op; @@ -170,6 +174,76 @@ public void testContainerCloseActionWhenFull() throws IOException { } } + @Test + public void testSmallFileChecksum() throws IOException { + String testDirPath = GenericTestUtils.getTempPath( + TestHddsDispatcher.class.getSimpleName()); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + + ContainerCommandResponseProto smallFileResponse = + hddsDispatcher.dispatch(newPutSmallFile(1L, 1L), null); + + assertEquals(ContainerProtos.Result.SUCCESS, smallFileResponse.getResult()); + } finally { + ContainerMetrics.remove(); + FileUtils.deleteDirectory(new File(testDirPath)); + } + } + + @Test + public void testWriteChunkChecksum() throws IOException { + String testDirPath = GenericTestUtils.getTempPath( + TestHddsDispatcher.class.getSimpleName()); + try { + UUID scmId = UUID.randomUUID(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, testDirPath); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath); + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + dnConf.setChunkDataValidationCheck(true); + conf.setFromObject(dnConf); + DatanodeDetails dd = randomDatanodeDetails(); + HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf); + //Send a few WriteChunkRequests + ContainerCommandResponseProto response; + ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0); + hddsDispatcher.dispatch(writeChunkRequest0, null); + hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 1), null); + response = hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 2), null); + + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + // Send Read Chunk request for written chunk. + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + + ByteString responseData = BufferUtils.concatByteStrings( + response.getReadChunk().getDataBuffers().getBuffersList()); + assertEquals(writeChunkRequest0.getWriteChunk().getData(), + responseData); + + // Test checksum on Read: + final DispatcherContext context = DispatcherContext + .newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA) + .build(); + response = + hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context); + assertEquals(ContainerProtos.Result.SUCCESS, response.getResult()); + } finally { + ContainerMetrics.remove(); + FileUtils.deleteDirectory(new File(testDirPath)); + } + } + @Test public void testContainerCloseActionWhenVolumeFull() throws Exception { String testDir = GenericTestUtils.getTempPath( @@ -527,6 +601,84 @@ private ContainerCommandRequestProto getWriteChunkRequest( .build(); } + static ChecksumData checksum(ByteString data) { + try { + return new Checksum(ContainerProtos.ChecksumType.CRC32, 256) + .computeChecksum(data.asReadOnlyByteBuffer()); + } catch (OzoneChecksumException e) { + throw new IllegalStateException(e); + } + } + + private ContainerCommandRequestProto getWriteChunkRequest0( + String datanodeId, Long containerId, Long localId, int chunkNum) { + final int lenOfBytes = 32; + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + + ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo + .newBuilder() + .setChunkName( + DigestUtils.md5Hex("dummy-key") + "_stream_" + + containerId + "_chunk_" + localId) + .setOffset((long) chunkNum * lenOfBytes) + .setLen(lenOfBytes) + .setChecksumData(checksum(chunkData).getProtoBufMessage()) + .build(); + + WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto + .newBuilder() + .setBlockID(new BlockID(containerId, localId) + .getDatanodeBlockIDProtobuf()) + .setChunkData(chunk) + .setData(chunkData); + + return ContainerCommandRequestProto + .newBuilder() + .setContainerID(containerId) + .setCmdType(ContainerProtos.Type.WriteChunk) + .setDatanodeUuid(datanodeId) + .setWriteChunk(writeChunkRequest) + .build(); + } + + static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) { + ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32)); + return newPutSmallFile(new BlockID(containerId, localId), chunkData); + } + + static ContainerCommandRequestProto newPutSmallFile( + BlockID blockID, ByteString data) { + final ContainerProtos.BlockData.Builder blockData + = ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID.getDatanodeBlockIDProtobuf()); + final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest + = ContainerProtos.PutBlockRequestProto.newBuilder() + .setBlockData(blockData); + final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder() + .setKey("OverWriteRequested") + .setValue("true") + .build(); + final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder() + .setChunkName(blockID.getLocalID() + "_chunk") + .setOffset(0) + .setLen(data.size()) + .addMetadata(keyValue) + .setChecksumData(checksum(data).getProtoBufMessage()) + .build(); + final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest + = ContainerProtos.PutSmallFileRequestProto.newBuilder() + .setChunkInfo(chunk) + .setBlock(putBlockRequest) + .setData(data) + .build(); + return ContainerCommandRequestProto.newBuilder() + .setCmdType(ContainerProtos.Type.PutSmallFile) + .setContainerID(blockID.getContainerID()) + .setDatanodeUuid(UUID.randomUUID().toString()) + .setPutSmallFile(putSmallFileRequest) + .build(); + } + /** * Creates container read chunk request using input container write chunk * request. diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 7ed20ab6acb3..e61772c40a75 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -39,10 +39,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + org.xerial.snappy snappy-java + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.hadoop hadoop-annotations @@ -194,12 +202,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + com.google.guava guava diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 542dc2883a4c..1a014b467f8e 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -39,10 +39,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + com.nimbusds + nimbus-jose-jwt + org.xerial.snappy snappy-java + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + org.apache.curator * @@ -101,6 +109,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + + + com.nimbusds + nimbus-jose-jwt + commons-cli @@ -112,6 +128,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} compile + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + io.netty * diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index 3778bdca0420..ab6861c12f61 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -77,6 +77,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> mockito-core test + + org.mockito + mockito-junit-jupiter + test + org.apache.ozone hdds-rocks-native diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java new file mode 100644 index 000000000000..8031eca7b0db --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/graph/TestPrintableGraph.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ozone.graph; + +import com.google.common.graph.MutableGraph; +import org.apache.ozone.rocksdiff.CompactionNode; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.when; + +/** + * This class is used for testing the PrintableGraph class. + * It contains methods to test the generation and printing of graphs with different types. + */ +@ExtendWith(MockitoExtension.class) +public class TestPrintableGraph { + @TempDir + private Path dir; + + @Mock + private MutableGraph mutableGraph; + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintNoGraphMessage(PrintableGraph.GraphType graphType) { + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + try { + graph.generateImage(dir.resolve(graphType.name()).toString()); + } catch (IOException e) { + assertEquals("Graph is empty.", e.getMessage()); + } + } + + @ParameterizedTest + @EnumSource(PrintableGraph.GraphType.class) + void testPrintActualGraph(PrintableGraph.GraphType graphType) throws IOException { + Set nodes = Stream.of( + new CompactionNode("fileName1", + 100, 100, "startKey1", "endKey1", "columnFamily1"), + new CompactionNode("fileName2", + 200, 200, "startKey2", "endKey2", null), + new CompactionNode("fileName3", + 300, 300, null, "endKey3", "columnFamily3"), + new CompactionNode("fileName4", + 400, 400, "startKey4", null, "columnFamily4") + ).collect(Collectors.toSet()); + when(mutableGraph.nodes()).thenReturn(nodes); + + PrintableGraph graph = new PrintableGraph(mutableGraph, graphType); + graph.generateImage(dir.resolve(graphType.name()).toString()); + + assertTrue(Files.exists(dir.resolve(graphType.name())), "Graph hasn't been generated"); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 72d90abe1f4f..62e1f0193561 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -251,16 +251,12 @@ private void initialize() throws IOException { pipelineManager.addContainerToPipelineSCMStart( container.getPipelineID(), container.containerID()); } catch (PipelineNotFoundException ex) { + // We are ignoring this here. The container will be moved to + // CLOSING state by ReplicationManager's OpenContainerHandler + // For more info: HDDS-10231 LOG.warn("Found container {} which is in OPEN state with " + "pipeline {} that does not exist. Marking container for " + "closing.", container, container.getPipelineID()); - try { - updateContainerState(container.containerID().getProtobuf(), - LifeCycleEvent.FINALIZE); - } catch (InvalidStateTransitionException e) { - // This cannot happen. - LOG.warn("Unable to finalize Container {}.", container); - } } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index 84263435cd29..32310ef9e7bf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; @@ -1546,5 +1547,14 @@ private int getRemainingMaintenanceRedundancy(boolean isEC) { private static boolean isEC(ReplicationConfig replicationConfig) { return replicationConfig.getReplicationType() == EC; } + + public boolean hasHealthyPipeline(ContainerInfo container) { + try { + return scmContext.getScm().getPipelineManager() + .getPipeline(container.getPipelineID()) != null; + } catch (PipelineNotFoundException e) { + return false; + } + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java index 2c0b405db972..21c3c76d3e97 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java @@ -53,20 +53,26 @@ public boolean handle(ContainerCheckRequest request) { if (containerInfo.getState() == HddsProtos.LifeCycleState.OPEN) { LOG.debug("Checking open container {} in OpenContainerHandler", containerInfo); - if (!isOpenContainerHealthy( - containerInfo, request.getContainerReplicas())) { - // This is an unhealthy open container, so we need to trigger the - // close process on it. - LOG.debug("Container {} is open but unhealthy. Triggering close.", - containerInfo); - request.getReport().incrementAndSample( - ReplicationManagerReport.HealthState.OPEN_UNHEALTHY, + final boolean noPipeline = !replicationManager.hasHealthyPipeline(containerInfo); + // Minor optimization. If noPipeline is true, isOpenContainerHealthy will not + // be called. + final boolean unhealthy = noPipeline || !isOpenContainerHealthy(containerInfo, + request.getContainerReplicas()); + if (unhealthy) { + // For an OPEN container, we close the container + // if the container has no Pipeline or if the container is unhealthy. + LOG.info("Container {} is open but {}. Triggering close.", + containerInfo, noPipeline ? "has no Pipeline" : "unhealthy"); + + request.getReport().incrementAndSample(noPipeline ? + ReplicationManagerReport.HealthState.OPEN_WITHOUT_PIPELINE : + ReplicationManagerReport.HealthState.OPEN_UNHEALTHY, containerInfo.containerID()); + if (!request.isReadOnly()) { replicationManager .sendCloseContainerEvent(containerInfo.containerID()); } - return true; } // For open containers we do not want to do any further processing in RM // so return true to stop the command chain. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 8e1fa7b33bbf..043cfcf3b6ca 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -43,6 +44,9 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.token.ContainerTokenGenerator; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand; import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand; @@ -57,6 +61,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentCaptor; +import org.mockito.Matchers; import org.mockito.Mockito; import java.io.IOException; @@ -94,6 +99,7 @@ import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; /** * Tests for the ReplicationManager. @@ -167,6 +173,16 @@ public void setup() throws IOException { // Ensure that RM will run when asked. Mockito.when(scmContext.isLeaderReady()).thenReturn(true); Mockito.when(scmContext.isInSafeMode()).thenReturn(false); + + PipelineManager pipelineManager = mock(PipelineManager.class); + Mockito.when(pipelineManager.getPipeline(Matchers.any())) + .thenReturn(HddsTestUtils.getRandomPipeline()); + + StorageContainerManager scm = mock(StorageContainerManager.class); + Mockito.when(scm.getPipelineManager()).thenReturn(pipelineManager); + Mockito.when(scm.getContainerTokenGenerator()).thenReturn(ContainerTokenGenerator.DISABLED); + + Mockito.when(scmContext.getScm()).thenReturn(scm); } @AfterEach diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java index 32ef98e76a55..418d4e127a93 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; import org.apache.hadoop.hdds.scm.container.replication.ContainerCheckRequest; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil; @@ -37,6 +38,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.times; /** @@ -55,6 +57,7 @@ public void setup() { ratisReplicationConfig = RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.THREE); replicationManager = Mockito.mock(ReplicationManager.class); + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(true); openContainerHandler = new OpenContainerHandler(replicationManager); } @@ -73,7 +76,7 @@ public void testClosedContainerReturnsFalse() { .build(); Assertions.assertFalse(openContainerHandler.handle(request)); Mockito.verify(replicationManager, times(0)) - .sendCloseContainerEvent(Mockito.any()); + .sendCloseContainerEvent(any()); } @Test @@ -91,7 +94,7 @@ public void testOpenContainerReturnsTrue() { .build(); Assertions.assertTrue(openContainerHandler.handle(request)); Mockito.verify(replicationManager, times(0)) - .sendCloseContainerEvent(Mockito.any()); + .sendCloseContainerEvent(any()); } @Test @@ -118,6 +121,35 @@ public void testOpenUnhealthyContainerIsClosed() { Assertions.assertTrue(openContainerHandler.handle(readRequest)); Mockito.verify(replicationManager, times(1)) .sendCloseContainerEvent(containerInfo.containerID()); + Assertions.assertEquals(1, request.getReport().getStat(HealthState.OPEN_UNHEALTHY)); + } + + @Test + public void testOpenContainerWithoutPipelineIsClosed() { + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false); + ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo( + ecReplicationConfig, 1, OPEN); + Set containerReplicas = ReplicationTestUtil + .createReplicas(containerInfo.containerID(), + ContainerReplicaProto.State.OPEN, 1, 2, 3, 4); + ContainerCheckRequest request = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .build(); + ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .setReadOnly(true) + .build(); + Assertions.assertTrue(openContainerHandler.handle(request)); + Assertions.assertTrue(openContainerHandler.handle(readRequest)); + Mockito.verify(replicationManager, times(1)) + .sendCloseContainerEvent(containerInfo.containerID()); + Assertions.assertEquals(1, request.getReport().getStat(HealthState.OPEN_WITHOUT_PIPELINE)); } @Test @@ -135,7 +167,7 @@ public void testClosedRatisContainerReturnsFalse() { .build(); Assertions.assertFalse(openContainerHandler.handle(request)); Mockito.verify(replicationManager, times(0)) - .sendCloseContainerEvent(Mockito.any()); + .sendCloseContainerEvent(any()); } @Test @@ -153,7 +185,7 @@ public void testOpenRatisContainerReturnsTrue() { .build(); Assertions.assertTrue(openContainerHandler.handle(request)); Mockito.verify(replicationManager, times(0)) - .sendCloseContainerEvent(Mockito.any()); + .sendCloseContainerEvent(any()); } @Test @@ -179,6 +211,33 @@ public void testOpenUnhealthyRatisContainerIsClosed() { Assertions.assertTrue(openContainerHandler.handle(request)); Assertions.assertTrue(openContainerHandler.handle(readRequest)); Mockito.verify(replicationManager, times(1)) - .sendCloseContainerEvent(Mockito.any()); + .sendCloseContainerEvent(any()); + } + + @Test + public void testOpenRatisContainerWithoutPipelineIsClosed() { + Mockito.when(replicationManager.hasHealthyPipeline(any())).thenReturn(false); + ContainerInfo containerInfo = ReplicationTestUtil.createContainerInfo( + ratisReplicationConfig, 1, OPEN); + Set containerReplicas = ReplicationTestUtil + .createReplicas(containerInfo.containerID(), + ContainerReplicaProto.State.OPEN, 0, 0, 0); + ContainerCheckRequest request = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .build(); + ContainerCheckRequest readRequest = new ContainerCheckRequest.Builder() + .setPendingOps(Collections.emptyList()) + .setReport(new ReplicationManagerReport()) + .setContainerInfo(containerInfo) + .setContainerReplicas(containerReplicas) + .setReadOnly(true) + .build(); + Assertions.assertTrue(openContainerHandler.handle(request)); + Assertions.assertTrue(openContainerHandler.handle(readRequest)); + Mockito.verify(replicationManager, times(1)).sendCloseContainerEvent(any()); + Assertions.assertEquals(1, request.getReport().getStat(ReplicationManagerReport.HealthState.OPEN_WITHOUT_PIPELINE)); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index f8c752aab271..321c108af729 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -200,6 +200,23 @@ public Map toAuditMap() { if (this.ownerName != null) { auditMap.put(OzoneConsts.OWNER, this.ownerName); } + if (this.quotaInBytesSet && quotaInBytes > 0 || + (this.quotaInBytes != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, + String.valueOf(this.quotaInBytes)); + } + if (this.quotaInNamespaceSet && quotaInNamespace > 0 || + (this.quotaInNamespace != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); + } + if (this.defaultReplicationConfig != null) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, String.valueOf( + this.defaultReplicationConfig.getType())); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + this.defaultReplicationConfig.getReplicationConfig() + .getReplication()); + } return auditMap; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index cc811053eb27..3c8afc9b827a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -370,6 +370,17 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.USED_BYTES, String.valueOf(this.usedBytes)); auditMap.put(OzoneConsts.USED_NAMESPACE, String.valueOf(this.usedNamespace)); + auditMap.put(OzoneConsts.OWNER, this.owner); + auditMap.put(OzoneConsts.REPLICATION_TYPE, + (this.defaultReplicationConfig != null) ? + String.valueOf(this.defaultReplicationConfig.getType()) : null); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + (this.defaultReplicationConfig != null) ? + this.defaultReplicationConfig.getReplicationConfig() + .getReplication() : null); + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, String.valueOf(this.quotaInBytes)); + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); return auditMap; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 8ee9c6ee1f52..f7a39ea10001 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -728,4 +728,28 @@ public SnapshotInfo copyObject() { .setExclusiveReplicatedSize(exclusiveReplicatedSize) .build(); } + + @Override + public String toString() { + return "SnapshotInfo{" + + "snapshotId: '" + snapshotId + '\'' + + ", name: '" + name + "'," + + ", volumeName: '" + volumeName + '\'' + + ", bucketName: '" + bucketName + '\'' + + ", snapshotStatus: '" + snapshotStatus + '\'' + + ", creationTime: '" + creationTime + '\'' + + ", deletionTime: '" + deletionTime + '\'' + + ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + + ", snapshotPath: '" + snapshotPath + '\'' + + ", checkpointDir: '" + checkpointDir + '\'' + + ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + + ", deepClean: '" + deepClean + '\'' + + ", sstFiltered: '" + sstFiltered + '\'' + + ", referencedSize: '" + referencedSize + '\'' + + ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + + ", exclusiveSize: '" + exclusiveSize + '\'' + + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + + '}'; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 840b3244a4b2..eae36091bdb4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -253,7 +253,6 @@ public final class OzoneManagerProtocolClientSideTranslatorPB private OmTransport transport; private ThreadLocal threadLocalS3Auth = new ThreadLocal<>(); - private boolean s3AuthCheck; public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { @@ -1030,7 +1029,7 @@ public ListKeysLightResult listKeysLight(String volumeName, reqBuilder.setBucketName(bucketName); reqBuilder.setCount(maxKeys); - if (StringUtils.isNotEmpty(startKey)) { + if (startKey != null) { reqBuilder.setStartKey(startKey); } @@ -2227,16 +2226,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setSortDatanodes(args.getSortDatanodes()) .setLatestVersionLocation(args.getLatestVersionLocation()) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatus) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2263,16 +2255,9 @@ public List listStatusLight(OmKeyArgs args, .setSortDatanodes(false) .setLatestVersionLocation(true) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatusLight) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2289,6 +2274,26 @@ public List listStatusLight(OmKeyArgs args, return statusList; } + private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs, boolean recursive, String startKey, + long numEntries, boolean allowPartialPrefixes) { + ListStatusRequest.Builder listStatusRequestBuilder = + ListStatusRequest.newBuilder() + .setKeyArgs(keyArgs) + .setRecursive(recursive) + .setNumEntries(numEntries); + + if (startKey != null) { + listStatusRequestBuilder.setStartKey(startKey); + } else { + listStatusRequestBuilder.setStartKey(""); + } + + if (allowPartialPrefixes) { + listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); + } + return listStatusRequestBuilder; + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 7d224af80898..b4f83965f4a1 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -3,7 +3,6 @@ share/ozone/lib/animal-sniffer-annotations.jar share/ozone/lib/annotations.jar share/ozone/lib/annotations.jar share/ozone/lib/aopalliance.jar -share/ozone/lib/antlr4-runtime.jar share/ozone/lib/aopalliance-repackaged.jar share/ozone/lib/aspectjrt.jar share/ozone/lib/aspectjweaver.jar @@ -147,10 +146,10 @@ share/ozone/lib/jetty-util-ajax.jar share/ozone/lib/jetty-util.jar share/ozone/lib/jetty-webapp.jar share/ozone/lib/jetty-xml.jar -share/ozone/lib/jgraph.jar share/ozone/lib/jgrapht-core.jar share/ozone/lib/jgrapht-ext.jar share/ozone/lib/jgraphx.jar +share/ozone/lib/jheaps.jar share/ozone/lib/jmespath-java.jar share/ozone/lib/jna.jar share/ozone/lib/jna-platform.jar diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index c40b46a79d77..b8b66c18f802 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -249,6 +249,17 @@ static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { out.write(value.getBytes(StandardCharsets.UTF_8)); } verifyKeyData(bucket, keyName, value, testStartTime); + OzoneKeyDetails key1 = bucket.getKey(keyName); + + // Overwrite the key + try (OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(StandardCharsets.UTF_8).length, + ReplicationConfig.fromTypeAndFactor(RATIS, ONE), + new HashMap<>())) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + OzoneKeyDetails key2 = bucket.getKey(keyName); + assertNotEquals(key1.getFileEncryptionInfo().toString(), key2.getFileEncryptionInfo().toString()); } static void verifyKeyData(OzoneBucket bucket, String keyName, String value, @@ -269,7 +280,6 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, len = is.read(fileContent); } - assertEquals(len, value.length()); assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, RATIS, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index f5d6ed752968..c5ad5e42c65b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -67,6 +67,8 @@ public class TestListKeysWithFSO { private static OzoneBucket fsoOzoneBucket; private static OzoneBucket legacyOzoneBucket2; private static OzoneBucket fsoOzoneBucket2; + private static OzoneBucket emptyLegacyOzoneBucket; + private static OzoneBucket emptyFsoOzoneBucket; private static OzoneClient client; /** @@ -113,6 +115,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(fsoBucketName, omBucketArgs); fsoOzoneBucket2 = ozoneVolume.getBucket(fsoBucketName); + fsoBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(fsoBucketName, omBucketArgs); + emptyFsoOzoneBucket = ozoneVolume.getBucket(fsoBucketName); + builder = BucketArgs.newBuilder(); builder.setStorageType(StorageType.DISK); builder.setBucketLayout(BucketLayout.LEGACY); @@ -121,6 +127,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(legacyBucketName, omBucketArgs); legacyOzoneBucket2 = ozoneVolume.getBucket(legacyBucketName); + legacyBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(legacyBucketName, omBucketArgs); + emptyLegacyOzoneBucket = ozoneVolume.getBucket(legacyBucketName); + initFSNameSpace(); } @@ -482,6 +492,23 @@ public void testShallowListKeys() throws Exception { expectedKeys = getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); + + // case-7: keyPrefix corresponds to multiple existing keys and + // startKey is null in empty bucket + keyPrefix = "a1/b1/c12"; + startKey = null; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, emptyLegacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, emptyFsoOzoneBucket); + + // case-8: keyPrefix corresponds to multiple existing keys and + // startKey is null + keyPrefix = "a1/b1/c12"; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index 13e44402363e..20977f9d4834 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -29,28 +29,30 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; -import java.util.UUID; import java.util.List; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.apache.hadoop.ozone.OzoneConfigKeys. - OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * A simple test that asserts that list status output is sorted. */ @Timeout(1200) public class TestListStatus { + private static final Logger LOG = LoggerFactory.getLogger(TestListStatus.class); private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static String clusterId; - private static String scmId; - private static String omId; private static OzoneBucket fsoOzoneBucket; private static OzoneClient client; @@ -58,18 +60,14 @@ public class TestListStatus { * Create a MiniDFSCluster for testing. *

* - * @throws IOException + * @throws IOException in case of I/O error */ @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - omId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) - .setScmId(scmId).setOmId(omId).build(); + cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); @@ -77,7 +75,7 @@ public static void init() throws Exception { fsoOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED); - // Set the number of keys to be processed during batch operate. + // Set the number of keys to be processed during batch operated. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); buildNameSpaceTree(fsoOzoneBucket); @@ -91,44 +89,30 @@ public static void teardownClass() { } } - @Test - public void testSortedListStatus() throws Exception { - // a) test if output is sorted - checkKeyList("", "", 1000, 10, false); - - // b) number of keys returns is expected - checkKeyList("", "", 2, 2, false); - - // c) check if full prefix works - checkKeyList("a1", "", 100, 3, false); - - // d) check if full prefix with numEntries work - checkKeyList("a1", "", 2, 2, false); - - // e) check if existing start key >>> - checkKeyList("a1", "a1/a12", 100, 2, false); - - // f) check with non-existing start key - checkKeyList("", "a7", 100, 6, false); - - // g) check if half prefix works - checkKeyList("b", "", 100, 4, true); - - // h) check half prefix with non-existing start key - checkKeyList("b", "b5", 100, 2, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c", 100, 0, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "b/g5", 100, 4, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c/g5", 100, 0, true); + @MethodSource("sortedListStatusParametersSource") + @ParameterizedTest(name = "{index} {5}") + public void testSortedListStatus(String keyPrefix, String startKey, int numEntries, int expectedNumKeys, + boolean isPartialPrefix, String testName) throws Exception { + checkKeyList(keyPrefix, startKey, numEntries, expectedNumKeys, isPartialPrefix); + } - // j) check prefix with non-existing prefix key - // and non-existing parent in start key - checkKeyList("a1/a111", "a1/a111/a100", 100, 0, true); + private static Stream sortedListStatusParametersSource() { + return Stream.of( + arguments("", "", 1000, 10, false, "Test if output is sorted"), + arguments("", "", 2, 2, false, "Number of keys returns is expected"), + arguments("a1", "", 100, 3, false, "Check if the full prefix works"), + arguments("a1", "", 2, 2, false, "Check if full prefix with numEntries work"), + arguments("a1", "a1/a12", 100, 2, false, "Check if existing start key >>>"), + arguments("", "a7", 100, 6, false, "Check with a non-existing start key"), + arguments("b", "", 100, 4, true, "Check if half-prefix works"), + arguments("b", "b5", 100, 2, true, "Check half prefix with non-existing start key"), + arguments("b", "c", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "b/g5", 100, 4, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "c/g5", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("a1/a111", "a1/a111/a100", 100, 0, true, "Check prefix with a non-existing prefix key\n" + + " and non-existing parent in a start key"), + arguments("a1/a111", null, 100, 0, true, "Check start key is null") + ); } private static void createFile(OzoneBucket bucket, String keyName) @@ -139,6 +123,7 @@ private static void createFile(OzoneBucket bucket, String keyName) oos.flush(); } } + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { /* @@ -180,33 +165,29 @@ private static void buildNameSpaceTree(OzoneBucket ozoneBucket) createFile(ozoneBucket, "/b8"); } - private void checkKeyList(String keyPrefix, String startKey, - long numEntries, int expectedNumKeys, - boolean isPartialPrefix) - throws Exception { + private void checkKeyList(String keyPrefix, String startKey, long numEntries, int expectedNumKeys, + boolean isPartialPrefix) throws Exception { List statuses = fsoOzoneBucket.listStatus(keyPrefix, false, startKey, numEntries, isPartialPrefix); assertEquals(expectedNumKeys, statuses.size()); - System.out.println("BEGIN:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("BEGIN:::keyPrefix---> {} :::---> {}", keyPrefix, startKey); for (int i = 0; i < statuses.size() - 1; i++) { OzoneFileStatus stCurr = statuses.get(i); OzoneFileStatus stNext = statuses.get(i + 1); - System.out.println("status:" + stCurr); - assertTrue(stCurr.getPath().compareTo(stNext.getPath()) < 0); + LOG.info("status: {}", stCurr); + assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0); } if (!statuses.isEmpty()) { OzoneFileStatus stNext = statuses.get(statuses.size() - 1); - System.out.println("status:" + stNext); + LOG.info("status: {}", stNext); } - System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("END:::keyPrefix---> {}:::---> {}", keyPrefix, startKey); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 763991d87592..37a726beddb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.PrintStream; import java.io.UnsupportedEncodingException; +import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -71,6 +72,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_LISTING_PAGE_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -81,6 +83,7 @@ import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -738,6 +741,34 @@ public void testLinkBucketOrphan() throws Exception { } } + @Test + @Timeout(10) + public void testListBucket() throws Exception { + final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; + OzoneConfiguration clientConf = + getClientConfForOFS(hostPrefix, cluster.getConf()); + int pageSize = 20; + clientConf.setInt(OZONE_FS_LISTING_PAGE_SIZE, pageSize); + URI uri = FileSystem.getDefaultUri(clientConf); + clientConf.setBoolean(String.format("fs.%s.impl.disable.cache", uri.getScheme()), true); + OzoneFsShell shell = new OzoneFsShell(clientConf); + + String volName = "testlistbucket"; + int numBuckets = pageSize; + + try { + generateBuckets("/" + volName, numBuckets); + out.reset(); + int res = ToolRunner.run(shell, new String[]{"-ls", "/" + volName}); + assertEquals(0, res); + String r = out.toString(DEFAULT_ENCODING); + assertThat(r).matches("(?s)^Found " + numBuckets + " items.*"); + + } finally { + shell.close(); + } + } + @Test public void testDeleteTrashNoSkipTrash() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 93b7c92902b6..c0872db0fd61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -22,6 +22,8 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditAction; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -68,10 +70,16 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName()); auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(keyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (keyArgs.getType() != null) ? keyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null); + if (keyArgs.hasType()) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, keyArgs.getType().name()); + } + if (keyArgs.hasFactor() && keyArgs.getFactor() != HddsProtos.ReplicationFactor.ZERO) { + auditMap.put(OzoneConsts.REPLICATION_FACTOR, keyArgs.getFactor().name()); + } + if (keyArgs.hasEcReplicationConfig()) { + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + ECReplicationConfig.toString(keyArgs.getEcReplicationConfig())); + } return auditMap; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index c09d87af1d56..b031c6106ef2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -735,6 +735,8 @@ protected OmKeyInfo prepareFileInfo( dbKeyInfo.getMetadata().clear(); dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf( keyArgs.getMetadataList())); + + dbKeyInfo.setFileEncryptionInfo(encInfo); return dbKeyInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 25b225eeba52..d3ded0a9ae58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -34,6 +34,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; @@ -48,6 +50,8 @@ */ public class OMSnapshotPurgeRequest extends OMClientRequest { + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @@ -94,6 +98,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() .get(snapTableKey); + if (fromSnapshot == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } + SnapshotInfo nextSnapshot = SnapshotUtils .getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); @@ -102,8 +113,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, trxnLogIndex, updatedSnapInfos, true); updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); + // Remove and close snapshot's RocksDB instance from SnapshotCache. ozoneManager.getOmSnapshotManager().getSnapshotCache() .invalidate(snapTableKey); + // Update SnapshotInfoTable cache. + ozoneManager.getMetadataManager().getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 863bf5f62b8b..e77543b1548f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -84,8 +84,9 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, updateSnapInfo(metadataManager, batchOperation, updatedPreviousAndGlobalSnapInfos); for (String dbKey: snapshotDbKeys) { + // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager - .getSnapshotInfoTable().get(dbKey); + .getSnapshotInfoTable().getSkipCache(dbKey); // Even though snapshot existed when SnapshotDeletingService // was running. It might be deleted in the previous run and // the DB might not have been updated yet. So snapshotInfo @@ -96,8 +97,7 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Delete Snapshot checkpoint directory. deleteCheckpointDirectory(omMetadataManager, snapshotInfo); - omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, - dbKey); + omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 89823995d0cd..2041fa791a76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -40,6 +40,7 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; @@ -148,6 +149,10 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + } + try { while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 380619d6b87c..54dd96e6c883 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -69,6 +69,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -79,8 +80,6 @@ * Tests OMSnapshotPurgeRequest class. */ public class TestOMSnapshotPurgeRequestAndResponse { - - private BatchOperation batchOperation; private List checkpointPaths = new ArrayList<>(); private OzoneManager ozoneManager; @@ -179,7 +178,6 @@ private void createSnapshotCheckpoint(String snapshotName) throws Exception { private void createSnapshotCheckpoint(String volume, String bucket, String snapshotName) throws Exception { - batchOperation = omMetadataManager.getStore().initBatchOperation(); OMRequest omRequest = OMRequestTestUtils .createSnapshotRequest(volume, bucket, snapshotName); // Pre-Execute OMSnapshotCreateRequest. @@ -190,9 +188,10 @@ private void createSnapshotCheckpoint(String volume, OMSnapshotCreateResponse omClientResponse = (OMSnapshotCreateResponse) omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); // Add to batch and commit to DB. - omClientResponse.addToDBBatch(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); - batchOperation.close(); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omClientResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } String key = SnapshotInfo.getTableKey(volume, bucket, snapshotName); SnapshotInfo snapshotInfo = @@ -228,9 +227,10 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); // Commit to DB. - batchOperation = omMetadataManager.getStore().initBatchOperation(); - omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); - omMetadataManager.getStore().commitBatchOperation(batchOperation); + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } } @Test @@ -240,7 +240,20 @@ public void testValidateAndUpdateCache() throws Exception { assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); OMRequest snapshotPurgeRequest = createPurgeKeysRequest( snapshotDbKeysToPurge); - purgeSnapshots(snapshotPurgeRequest); + + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + for (String snapshotTableKey: snapshotDbKeysToPurge) { + assertNull(omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey)); + } + + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } // Check if the entries are deleted. assertTrue(omMetadataManager.getSnapshotInfoTable().isEmpty()); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 2fa5f70444e7..506862f48862 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -876,9 +876,9 @@ public List listStatus(String pathStr, boolean recursive, } OFSPath ofsStartPath = new OFSPath(startPath, config); if (ofsPath.isVolume()) { - String startBucket = ofsStartPath.getBucketName(); + String startBucketPath = ofsStartPath.getNonKeyPath(); return listStatusVolume(ofsPath.getVolumeName(), - recursive, startBucket, numEntries, uri, workingDir, username); + recursive, startBucketPath, numEntries, uri, workingDir, username); } if (ofsPath.isSnapshotPath()) { diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index d54bc8663918..1dab3d583519 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -3894,7 +3894,7 @@ packages: /axios/0.27.2: resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} dependencies: - follow-redirects: 1.15.2 + follow-redirects: 1.15.6 form-data: 4.0.0 transitivePeerDependencies: - debug @@ -4224,6 +4224,7 @@ packages: /bindings/1.5.0: resolution: {integrity: sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==} + requiresBuild: true dependencies: file-uri-to-path: 1.0.0 dev: false @@ -7592,6 +7593,7 @@ packages: /file-uri-to-path/1.0.0: resolution: {integrity: sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==} + requiresBuild: true dev: false optional: true @@ -7742,8 +7744,8 @@ packages: readable-stream: 2.3.8 dev: false - /follow-redirects/1.15.2: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -7752,8 +7754,8 @@ packages: optional: true dev: false - /follow-redirects/1.15.2_debug@4.3.4: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} + /follow-redirects/1.15.6_debug@4.3.4: + resolution: {integrity: sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==} engines: {node: '>=4.0'} peerDependencies: debug: '*' @@ -8990,7 +8992,7 @@ packages: engines: {node: '>=8.0.0'} dependencies: eventemitter3: 4.0.7 - follow-redirects: 1.15.2_debug@4.3.4 + follow-redirects: 1.15.6_debug@4.3.4 requires-port: 1.0.0 transitivePeerDependencies: - debug @@ -11433,6 +11435,7 @@ packages: /nan/2.17.0: resolution: {integrity: sha512-2ZTgtl0nJsO0KQCjEpxcIr5D+Yv90plTitZt9JBfQvVJDS5seMl3FOvsh3+9CoYWXf/1l5OaZzzF6nDm4cagaQ==} + requiresBuild: true dev: false optional: true diff --git a/pom.xml b/pom.xml index 7bbd3eeddc44..a78927e42f27 100644 --- a/pom.xml +++ b/pom.xml @@ -117,7 +117,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.15 3.2.2 1.26.0 - 2.8.0 + 2.10.1 1.5.2-5 1.0.13 2.11.0 @@ -226,7 +226,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.24 4.13.1 5.10.1 - 3.7.2 + 3.8.4 0.5.1 @@ -308,11 +308,13 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.9.3 1.1.8 1.4.9 - 1.0.1 + 1.4.0 5.1.0 1.1.10.5 + 1.2.0 + 9.37.2 @@ -1222,6 +1224,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.zookeeper zookeeper ${zookeeper.version} + + + ch.qos.logback + logback-core + + + ch.qos.logback + logback-classic + + org.slf4j @@ -1561,6 +1573,16 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs snappy-java ${snappy-java.version} + + org.apache.hadoop.thirdparty + hadoop-shaded-guava + ${hadoop-shaded-guava.version} + + + com.nimbusds + nimbus-jose-jwt + ${com.nimbusds.nimbus-jose-jwt.version} +