> deletedBlocksMap = new HashMap<>();
- deletedBlocksMap.put(containerID, new LinkedList<>());
+ Random random = new Random();
+ long localId = random.nextLong();
+ deletedBlocksMap.put(containerID, new LinkedList<>(
+ Collections.singletonList(localId)));
addTransactions(deletedBlocksMap, true);
+ blocks = getTransactions(txNum * BLOCKS_PER_TXN * ONE);
+ // Only newly added Blocks will be sent, as previously sent transactions
+ // that have not yet timed out will not be sent.
+ Assertions.assertEquals(1, blocks.size());
+ Assertions.assertEquals(1, blocks.get(0).getLocalIDCount());
+ Assertions.assertEquals(blocks.get(0).getLocalID(0), localId);
+ // Lets the SCM delete the transaction and wait for the DN reply
+ // to timeout, thus allowing the transaction to resend the
+ deletedBlockLog.setScmCommandTimeoutMs(-1L);
// get should return two transactions for the same container
blocks = getTransactions(txNum * BLOCKS_PER_TXN * ONE);
Assertions.assertEquals(2, blocks.size());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java
new file mode 100644
index 000000000000..3bd7ad00f6a8
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMBlockDeletingService.java
@@ -0,0 +1,177 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.block;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
+import org.apache.hadoop.hdds.scm.ScmConfig;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.ha.SCMContext;
+import org.apache.hadoop.hdds.scm.ha.SCMServiceManager;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.mockito.ArgumentCaptor;
+
+import java.time.Clock;
+import java.time.ZoneOffset;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anySet;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test SCMBlockDeletingService.
+ */
+public class TestSCMBlockDeletingService {
+ private SCMBlockDeletingService service;
+ private EventPublisher eventPublisher;
+ private List datanodeDetails;
+ private OzoneConfiguration conf;
+ private NodeManager nodeManager;
+ private ScmBlockDeletingServiceMetrics metrics;
+
+ @BeforeEach
+ public void setup() throws Exception {
+ nodeManager = mock(NodeManager.class);
+ eventPublisher = mock(EventPublisher.class);
+ conf = new OzoneConfiguration();
+ metrics = ScmBlockDeletingServiceMetrics.create();
+ when(nodeManager.getTotalDatanodeCommandCount(any(),
+ any())).thenReturn(0);
+ SCMServiceManager scmServiceManager = mock(SCMServiceManager.class);
+ SCMContext scmContext = mock(SCMContext.class);
+
+ DatanodeDeletedBlockTransactions ddbt =
+ new DatanodeDeletedBlockTransactions();
+ DatanodeDetails datanode1 = MockDatanodeDetails.randomDatanodeDetails();
+ DatanodeDetails datanode2 = MockDatanodeDetails.randomDatanodeDetails();
+ DatanodeDetails datanode3 = MockDatanodeDetails.randomDatanodeDetails();
+ datanodeDetails = Arrays.asList(datanode1, datanode2, datanode3);
+ when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(
+ datanodeDetails);
+ DeletedBlocksTransaction tx1 = createTestDeleteTxn(1, Arrays.asList(1L), 1);
+ ddbt.addTransactionToDN(datanode1.getUuid(), tx1);
+ ddbt.addTransactionToDN(datanode2.getUuid(), tx1);
+ ddbt.addTransactionToDN(datanode3.getUuid(), tx1);
+ DeletedBlockLog mockDeletedBlockLog = mock(DeletedBlockLog.class);
+ when(mockDeletedBlockLog.getTransactions(
+ anyInt(), anySet())).thenReturn(ddbt);
+
+ service = spy(new SCMBlockDeletingService(
+ mockDeletedBlockLog, nodeManager, eventPublisher, scmContext,
+ scmServiceManager, conf, conf.getObject(ScmConfig.class), metrics, Clock.system(
+ ZoneOffset.UTC), mock(ReconfigurationHandler.class)));
+ when(service.shouldRun()).thenReturn(true);
+ }
+
+ @AfterEach
+ public void stop() {
+ service.stop();
+ ScmBlockDeletingServiceMetrics.unRegister();
+ }
+
+ @Test
+ public void testCall() throws Exception {
+ callDeletedBlockTransactionScanner();
+
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CommandForDatanode.class);
+
+ // Three Datanode is healthy and in-service, and the task queue is empty,
+ // so the transaction will send to all three Datanode
+ verify(eventPublisher, times(3)).fireEvent(
+ eq(SCMEvents.DATANODE_COMMAND), argumentCaptor.capture());
+ List actualCommands = argumentCaptor.getAllValues();
+ List actualDnIds = actualCommands.stream()
+ .map(CommandForDatanode::getDatanodeId)
+ .collect(Collectors.toList());
+ Set expectedDnIdsSet = datanodeDetails.stream()
+ .map(DatanodeDetails::getUuid).collect(Collectors.toSet());
+
+ assertEquals(expectedDnIdsSet, new HashSet<>(actualDnIds));
+ assertEquals(datanodeDetails.size(),
+ metrics.getNumBlockDeletionCommandSent());
+ // Echo Command has one Transaction
+ assertEquals(datanodeDetails.size() * 1,
+ metrics.getNumBlockDeletionTransactionSent());
+ }
+
+ private void callDeletedBlockTransactionScanner() throws Exception {
+ service.getTasks().poll().call();
+ }
+
+ @Test
+ public void testLimitCommandSending() throws Exception {
+ DatanodeConfiguration dnConf =
+ conf.getObject(DatanodeConfiguration.class);
+ int pendingCommandLimit = dnConf.getBlockDeleteQueueLimit();
+
+ // The number of commands pending on all Datanodes has reached the limit.
+ when(nodeManager.getTotalDatanodeCommandCount(any(),
+ any())).thenReturn(pendingCommandLimit);
+ assertEquals(0,
+ service.getDatanodesWithinCommandLimit(datanodeDetails).size());
+
+ // The number of commands pending on all Datanodes is 0
+ when(nodeManager.getTotalDatanodeCommandCount(any(),
+ any())).thenReturn(0);
+ assertEquals(datanodeDetails.size(),
+ service.getDatanodesWithinCommandLimit(datanodeDetails).size());
+
+ // The number of commands pending on first Datanodes has reached the limit.
+ DatanodeDetails fullDatanode = datanodeDetails.get(0);
+ when(nodeManager.getTotalDatanodeCommandCount(fullDatanode,
+ Type.deleteBlocksCommand)).thenReturn(pendingCommandLimit);
+ Set includeNodes =
+ service.getDatanodesWithinCommandLimit(datanodeDetails);
+ assertEquals(datanodeDetails.size() - 1,
+ includeNodes.size());
+ assertFalse(includeNodes.contains(fullDatanode));
+ }
+
+ private DeletedBlocksTransaction createTestDeleteTxn(
+ long txnID, List blocks, long containerID) {
+ return DeletedBlocksTransaction.newBuilder().setTxID(txnID)
+ .setContainerID(containerID).addAllLocalID(blocks).setCount(0).build();
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMDeleteBlocksCommandStatusManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMDeleteBlocksCommandStatusManager.java
new file mode 100644
index 000000000000..888cb42fd7de
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestSCMDeleteBlocksCommandStatusManager.java
@@ -0,0 +1,256 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.block;
+
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.BeforeEach;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+import static org.apache.hadoop.hdds.scm.block.SCMDeletedBlockTransactionStatusManager.SCMDeleteBlocksCommandStatusManager;
+import static org.apache.hadoop.hdds.scm.block.SCMDeletedBlockTransactionStatusManager.SCMDeleteBlocksCommandStatusManager.CmdStatus.SENT;
+import static org.apache.hadoop.hdds.scm.block.SCMDeletedBlockTransactionStatusManager.SCMDeleteBlocksCommandStatusManager.CmdStatus.TO_BE_SENT;
+import static org.apache.hadoop.hdds.scm.block.SCMDeletedBlockTransactionStatusManager.SCMDeleteBlocksCommandStatusManager.CmdStatusData;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+/**
+ * A test for SCMDeleteBlocksCommandStatusManager.
+ */
+public class TestSCMDeleteBlocksCommandStatusManager {
+
+ private SCMDeleteBlocksCommandStatusManager manager;
+ private UUID dnId1;
+ private UUID dnId2;
+ private long scmCmdId1;
+ private long scmCmdId2;
+ private long scmCmdId3;
+ private long scmCmdId4;
+ private Set deletedBlocksTxIds1;
+ private Set deletedBlocksTxIds2;
+ private Set deletedBlocksTxIds3;
+ private Set deletedBlocksTxIds4;
+
+ @BeforeEach
+ public void setup() throws Exception {
+ manager = new SCMDeleteBlocksCommandStatusManager();
+ // Create test data
+ dnId1 = UUID.randomUUID();
+ dnId2 = UUID.randomUUID();
+ scmCmdId1 = 1L;
+ scmCmdId2 = 2L;
+ scmCmdId3 = 3L;
+ scmCmdId4 = 4L;
+ deletedBlocksTxIds1 = new HashSet<>();
+ deletedBlocksTxIds1.add(100L);
+ deletedBlocksTxIds2 = new HashSet<>();
+ deletedBlocksTxIds2.add(200L);
+ deletedBlocksTxIds3 = new HashSet<>();
+ deletedBlocksTxIds3.add(300L);
+ deletedBlocksTxIds4 = new HashSet<>();
+ deletedBlocksTxIds4.add(400L);
+ }
+
+ @Test
+ public void testRecordScmCommand() {
+ CmdStatusData statusData =
+ SCMDeleteBlocksCommandStatusManager.createScmCmdStatusData(
+ dnId1, scmCmdId1, deletedBlocksTxIds1);
+
+ manager.recordScmCommand(statusData);
+
+ assertNotNull(manager.getScmCmdStatusRecord().get(dnId1));
+ assertEquals(1, manager.getScmCmdStatusRecord().get(dnId1).size());
+ CmdStatusData cmdStatusData =
+ manager.getScmCmdStatusRecord().get(dnId1).get(scmCmdId1);
+ assertNotNull(cmdStatusData);
+ assertEquals(dnId1, statusData.getDnId());
+ assertEquals(scmCmdId1, statusData.getScmCmdId());
+ assertEquals(deletedBlocksTxIds1, statusData.getDeletedBlocksTxIds());
+ // The default status is `CmdStatus.TO_BE_SENT`
+ assertEquals(TO_BE_SENT, statusData.getStatus());
+ }
+
+ @Test
+ public void testOnSent() {
+ CmdStatusData statusData =
+ SCMDeleteBlocksCommandStatusManager.createScmCmdStatusData(
+ dnId1, scmCmdId1, deletedBlocksTxIds1);
+ manager.recordScmCommand(statusData);
+
+ Map dnStatusRecord =
+ manager.getScmCmdStatusRecord().get(dnId1);
+ // After the Command is sent by SCM, the status of the Command
+ // will change from TO_BE_SENT to SENT
+ assertEquals(TO_BE_SENT, dnStatusRecord.get(scmCmdId1).getStatus());
+ manager.onSent(dnId1, scmCmdId1);
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId1).getStatus());
+ }
+
+ @Test
+ public void testUpdateStatusByDNCommandStatus() {
+ // Test all Status update by Datanode Heartbeat report.
+ // SENT -> PENDING_EXECUTED: The DeleteBlocksCommand is sent and received
+ // by the Datanode, but the command is not executed by the Datanode,
+ // the command is waiting to be executed.
+
+ // SENT -> NEED_RESEND: The DeleteBlocksCommand is sent and lost before
+ // it is received by the DN.
+ // SENT -> EXECUTED: The DeleteBlocksCommand has been sent to Datanode,
+ // executed by DN, and executed successfully.
+ //
+ // PENDING_EXECUTED -> PENDING_EXECUTED: The DeleteBlocksCommand continues
+ // to wait to be executed by Datanode.
+ // PENDING_EXECUTED -> NEED_RESEND: The DeleteBlocksCommand waited for a
+ // while and was executed, but the execution failed; Or the
+ // DeleteBlocksCommand was lost while waiting(such as the Datanode restart)
+ //
+ // PENDING_EXECUTED -> EXECUTED: The Command waits for a period of
+ // time on the DN and is executed successfully.
+
+ recordAndSentCommand(manager, dnId1,
+ Arrays.asList(scmCmdId1, scmCmdId2, scmCmdId3, scmCmdId4),
+ Arrays.asList(deletedBlocksTxIds1, deletedBlocksTxIds2,
+ deletedBlocksTxIds3, deletedBlocksTxIds4));
+
+ Map dnStatusRecord =
+ manager.getScmCmdStatusRecord().get(dnId1);
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId1).getStatus());
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId2).getStatus());
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId3).getStatus());
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId4).getStatus());
+
+ // SENT -> PENDING_EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId1,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.PENDING);
+ // SENT -> EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId2,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.EXECUTED);
+ // SENT -> NEED_RESEND
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId3,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.FAILED);
+ // SENT -> PENDING_EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId4,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.PENDING);
+
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId1).getStatus());
+ assertNull(dnStatusRecord.get(scmCmdId2));
+ assertNull(dnStatusRecord.get(scmCmdId3));
+ assertEquals(SENT, dnStatusRecord.get(scmCmdId4).getStatus());
+ }
+
+ @Test
+ public void testCleanSCMCommandForDn() {
+ // Transactions in states EXECUTED and NEED_RESEND will be cleaned up
+ // directly, while transactions in states PENDING_EXECUTED and SENT
+ // will be cleaned up after timeout
+ recordAndSentCommand(manager, dnId1,
+ Arrays.asList(scmCmdId1, scmCmdId2, scmCmdId3, scmCmdId4),
+ Arrays.asList(deletedBlocksTxIds1, deletedBlocksTxIds2,
+ deletedBlocksTxIds3, deletedBlocksTxIds4));
+
+ // SENT -> PENDING_EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId1,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.PENDING);
+ // SENT -> EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId2,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.EXECUTED);
+ // SENT -> NEED_RESEND
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId3,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.FAILED);
+
+ Map dnStatusRecord =
+ manager.getScmCmdStatusRecord().get(dnId1);
+ assertNotNull(dnStatusRecord.get(scmCmdId1));
+ assertNull(dnStatusRecord.get(scmCmdId2));
+ assertNull(dnStatusRecord.get(scmCmdId3));
+ assertNotNull(dnStatusRecord.get(scmCmdId4));
+
+ manager.cleanTimeoutSCMCommand(dnId1, Long.MAX_VALUE);
+
+ // scmCmdId1 is PENDING_EXECUTED will be cleaned up after timeout
+ assertNotNull(dnStatusRecord.get(scmCmdId1));
+ assertNull(dnStatusRecord.get(scmCmdId3));
+ assertNull(dnStatusRecord.get(scmCmdId2));
+ // scmCmdId4 is SENT will be cleaned up after timeout
+ assertNotNull(dnStatusRecord.get(scmCmdId4));
+
+ manager.cleanTimeoutSCMCommand(dnId1, -1);
+ assertNull(dnStatusRecord.get(scmCmdId1));
+ assertNull(dnStatusRecord.get(scmCmdId4));
+ }
+
+ @Test
+ public void testCleanAllTimeoutSCMCommand() {
+ // Test All EXECUTED and NEED_RESEND status in the DN will be cleaned up
+
+ // Transactions in states EXECUTED and NEED_RESEND will be cleaned up
+ // directly, while transactions in states PENDING_EXECUTED and SENT
+ // will be cleaned up after timeout
+ recordAndSentCommand(manager, dnId1, Arrays.asList(scmCmdId1),
+ Arrays.asList(deletedBlocksTxIds1));
+ recordAndSentCommand(manager, dnId2, Arrays.asList(scmCmdId2),
+ Arrays.asList(deletedBlocksTxIds2));
+
+ Map dn1StatusRecord =
+ manager.getScmCmdStatusRecord().get(dnId1);
+ Map dn2StatusRecord =
+ manager.getScmCmdStatusRecord().get(dnId2);
+
+ // Only let the scmCmdId1 have a Heartbeat report, its status will be
+ // updated, the scmCmdId2 still in SENT status.
+ // SENT -> PENDING_EXECUTED
+ manager.updateStatusByDNCommandStatus(dnId1, scmCmdId1,
+ StorageContainerDatanodeProtocolProtos.CommandStatus.Status.PENDING);
+
+ manager.cleanAllTimeoutSCMCommand(Long.MAX_VALUE);
+ // scmCmdId1 is PENDING_EXECUTED will be cleaned up after timeout
+ assertNotNull(dn1StatusRecord.get(scmCmdId1));
+ assertNotNull(dn2StatusRecord.get(scmCmdId2));
+
+ // scmCmdId2 is SENT will be cleaned up after timeout
+ manager.cleanAllTimeoutSCMCommand(-1);
+ assertNull(dn1StatusRecord.get(scmCmdId1));
+ assertNull(dn2StatusRecord.get(scmCmdId2));
+
+ }
+
+ private void recordAndSentCommand(
+ SCMDeleteBlocksCommandStatusManager statusManager,
+ UUID dnId, List scmCmdIds, List> txIds) {
+ assertEquals(scmCmdIds.size(), txIds.size());
+ for (int i = 0; i < scmCmdIds.size(); i++) {
+ long scmCmdId = scmCmdIds.get(i);
+ Set deletedBlocksTxIds = txIds.get(i);
+ CmdStatusData statusData =
+ SCMDeleteBlocksCommandStatusManager.createScmCmdStatusData(
+ dnId, scmCmdId, deletedBlocksTxIds);
+ statusManager.recordScmCommand(statusData);
+ statusManager.onSent(dnId, scmCmdId);
+ }
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 98638ebe009d..794dedceef06 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -227,7 +227,7 @@ private void populateNodeMetric(DatanodeDetails datanodeDetails, int x) {
NODES[x % NODES.length].capacity - NODES[x % NODES.length].used;
newStat.set(
(NODES[x % NODES.length].capacity),
- (NODES[x % NODES.length].used), remaining);
+ (NODES[x % NODES.length].used), remaining, 0, 100000);
this.nodeMetricMap.put(datanodeDetails, newStat);
aggregateStat.add(newStat);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index 4bc3cf43cf6e..56d02dabb5fa 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -1207,7 +1207,8 @@ private double createCluster() {
datanodeCapacity = (long) (datanodeUsedSpace / nodeUtilizations.get(i));
}
SCMNodeStat stat = new SCMNodeStat(datanodeCapacity, datanodeUsedSpace,
- datanodeCapacity - datanodeUsedSpace);
+ datanodeCapacity - datanodeUsedSpace, 0,
+ datanodeCapacity - datanodeUsedSpace - 1);
nodesInCluster.get(i).setScmNodeStat(stat);
clusterUsedSpace += datanodeUsedSpace;
clusterCapacity += datanodeCapacity;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
index 7e734042d883..bb6f17bcc105 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestFindTargetStrategy.java
@@ -56,11 +56,11 @@ public void testFindTargetGreedyByUsage() {
//create three datanodes with different usageinfo
DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 0, 40, 0, 30));
DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 0, 60, 0, 30));
DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 0, 80, 0, 30));
//insert in ascending order
overUtilizedDatanodes.add(dui1);
@@ -98,11 +98,11 @@ public void testFindTargetGreedyByUsage() {
public void testResetPotentialTargets() {
// create three datanodes with different usage infos
DatanodeUsageInfo dui1 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 30, 70, 0, 50));
DatanodeUsageInfo dui2 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 20, 80, 0, 60));
DatanodeUsageInfo dui3 = new DatanodeUsageInfo(MockDatanodeDetails
- .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90));
+ .randomDatanodeDetails(), new SCMNodeStat(100, 10, 90, 0, 70));
List potentialTargets = new ArrayList<>();
potentialTargets.add(dui1);
@@ -179,18 +179,18 @@ public void testFindTargetGreedyByNetworkTopology() {
List overUtilizedDatanodes = new ArrayList<>();
//set the farthest target with the lowest usage info
overUtilizedDatanodes.add(
- new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90)));
+ new DatanodeUsageInfo(target5, new SCMNodeStat(100, 0, 90, 0, 80)));
//set the tree targets, which have the same network topology distance
//to source , with different usage info
overUtilizedDatanodes.add(
- new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20)));
+ new DatanodeUsageInfo(target2, new SCMNodeStat(100, 0, 20, 0, 10)));
overUtilizedDatanodes.add(
- new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40)));
+ new DatanodeUsageInfo(target3, new SCMNodeStat(100, 0, 40, 0, 30)));
overUtilizedDatanodes.add(
- new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60)));
+ new DatanodeUsageInfo(target4, new SCMNodeStat(100, 0, 60, 0, 50)));
//set the nearest target with the highest usage info
overUtilizedDatanodes.add(
- new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10)));
+ new DatanodeUsageInfo(target1, new SCMNodeStat(100, 0, 10, 0, 5)));
FindTargetGreedyByNetworkTopology findTargetGreedyByNetworkTopology =
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 910fe75ede6c..e51f9731ad4a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -103,13 +103,13 @@ public void chooseDatanodes() throws SCMException {
.thenReturn(new ArrayList<>(datanodes));
when(mockNodeManager.getNodeStat(any()))
- .thenReturn(new SCMNodeMetric(100L, 0L, 100L));
+ .thenReturn(new SCMNodeMetric(100L, 0L, 100L, 0, 90));
when(mockNodeManager.getNodeStat(datanodes.get(2)))
- .thenReturn(new SCMNodeMetric(100L, 90L, 10L));
+ .thenReturn(new SCMNodeMetric(100L, 90L, 10L, 0, 9));
when(mockNodeManager.getNodeStat(datanodes.get(3)))
- .thenReturn(new SCMNodeMetric(100L, 80L, 20L));
+ .thenReturn(new SCMNodeMetric(100L, 80L, 20L, 0, 19));
when(mockNodeManager.getNodeStat(datanodes.get(4)))
- .thenReturn(new SCMNodeMetric(100L, 70L, 30L));
+ .thenReturn(new SCMNodeMetric(100L, 70L, 30L, 0, 20));
when(mockNodeManager.getNodeByUuid(any(UUID.class))).thenAnswer(
invocation -> datanodes.stream()
.filter(dn -> dn.getUuid().equals(invocation.getArgument(0)))
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
index b1c12cdf71c1..1ab60015efef 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm.ha;
+import java.net.InetSocketAddress;
import java.nio.file.Path;
import java.time.Clock;
import java.time.ZoneOffset;
@@ -40,21 +41,31 @@
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.server.DivisionInfo;
-import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.MethodOrderer;
import org.junit.jupiter.api.Assertions;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestMethodOrder;
import org.junit.jupiter.api.io.TempDir;
import java.io.IOException;
+import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
+import static java.util.Collections.singletonList;
+import static org.assertj.core.api.Assumptions.assumeThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
@@ -64,27 +75,35 @@
/**
* Test cases to verify {@link org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl}.
*/
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
class TestSCMHAManagerImpl {
- @TempDir
+ private static final String LEADER_SCM_ID = "leader";
+ private static final int LEADER_PORT = 9894;
+ private static final String FOLLOWER_SCM_ID = "follower";
+
private Path storageBaseDir;
private String clusterID;
private SCMHAManager primarySCMHAManager;
+ private SCMRatisServer follower;
- @BeforeEach
- void setup() throws IOException, InterruptedException,
+ @BeforeAll
+ void setup(@TempDir Path tempDir) throws IOException, InterruptedException,
TimeoutException {
+ storageBaseDir = tempDir;
clusterID = UUID.randomUUID().toString();
- OzoneConfiguration conf = getConfig("scm1", 9894);
- final StorageContainerManager scm = getMockStorageContainerManager(conf);
- SCMRatisServerImpl.initialize(clusterID, scm.getScmId(),
- scm.getScmNodeDetails(), conf);
- scm.getScmHAManager().start();
+ final StorageContainerManager scm = getMockStorageContainerManager(LEADER_SCM_ID, LEADER_PORT);
+ SCMRatisServerImpl.initialize(clusterID, LEADER_SCM_ID, scm.getScmNodeDetails(), scm.getConfiguration());
primarySCMHAManager = scm.getScmHAManager();
+ primarySCMHAManager.start();
final DivisionInfo ratisDivision = primarySCMHAManager.getRatisServer()
.getDivision().getInfo();
// Wait for Ratis Server to be ready
waitForSCMToBeReady(ratisDivision);
+ StorageContainerManager followerSCM = getMockStorageContainerManager(FOLLOWER_SCM_ID, 9898);
+ follower = followerSCM.getScmHAManager()
+ .getRatisServer();
}
private OzoneConfiguration getConfig(String scmId, int ratisPort) {
@@ -97,42 +116,61 @@ private OzoneConfiguration getConfig(String scmId, int ratisPort) {
return conf;
}
- public void waitForSCMToBeReady(DivisionInfo ratisDivision)
+ private void waitForSCMToBeReady(DivisionInfo ratisDivision)
throws TimeoutException,
InterruptedException {
GenericTestUtils.waitFor(ratisDivision::isLeaderReady,
1000, 10000);
}
- @AfterEach
- public void cleanup() throws IOException {
+ @AfterAll
+ void cleanup() throws IOException {
+ follower.stop();
primarySCMHAManager.stop();
}
@Test
- public void testAddSCM() throws IOException, InterruptedException {
- Assertions.assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
+ @Order(1)
+ void testAddSCM() throws IOException {
+ Assertions.assertEquals(1, getPeerCount());
- final StorageContainerManager scm2 = getMockStorageContainerManager(
- getConfig("scm2", 9898));
- try {
- scm2.getScmHAManager().getRatisServer().start();
- final AddSCMRequest request = new AddSCMRequest(
- clusterID, scm2.getScmId(),
- "localhost:" + scm2.getScmHAManager().getRatisServer()
- .getDivision().getRaftServer().getServerRpc()
- .getInetSocketAddress().getPort());
- primarySCMHAManager.addSCM(request);
- Assertions.assertEquals(2, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
- } finally {
- scm2.getScmHAManager().getRatisServer().stop();
- }
+ follower.start();
+ final AddSCMRequest request = new AddSCMRequest(
+ clusterID, FOLLOWER_SCM_ID, getFollowerAddress());
+ primarySCMHAManager.addSCM(request);
+ Assertions.assertEquals(2, getPeerCount());
}
@Test
- public void testHARingRemovalErrors() throws IOException,
+ @Order(2) // requires testAddSCM
+ void testRemoveSCM() throws IOException {
+ assumeThat(getPeerCount()).isEqualTo(2);
+
+ final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest(
+ clusterID, FOLLOWER_SCM_ID, getFollowerAddress());
+ primarySCMHAManager.removeSCM(removeSCMRequest);
+ assertEquals(1, getPeerCount());
+ }
+
+ private int getPeerCount() {
+ return primarySCMHAManager.getRatisServer()
+ .getDivision().getGroup().getPeers().size();
+ }
+
+ private String getRaftServerAddress(SCMRatisServer ratisServer) {
+ return "localhost:" + ratisServer.getDivision()
+ .getRaftServer()
+ .getServerRpc()
+ .getInetSocketAddress()
+ .getPort();
+ }
+
+ private String getFollowerAddress() {
+ return getRaftServerAddress(follower);
+ }
+
+ @Test
+ void testHARingRemovalErrors() throws IOException,
AuthenticationException {
OzoneConfiguration config = new OzoneConfiguration();
config.set(ScmConfigKeys.OZONE_SCM_PRIMORDIAL_NODE_ID_KEY, "scm1");
@@ -160,39 +198,9 @@ public void testHARingRemovalErrors() throws IOException,
scm2.getScmHAManager().getRatisServer().stop();
}
}
- @Test
- public void testRemoveSCM() throws IOException, InterruptedException {
- Assertions.assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
-
- final StorageContainerManager scm2 = getMockStorageContainerManager(
- getConfig("scm2", 9898));
- try {
- scm2.getScmHAManager().getRatisServer().start();
- final AddSCMRequest addSCMRequest = new AddSCMRequest(
- clusterID, scm2.getScmId(),
- "localhost:" + scm2.getScmHAManager().getRatisServer()
- .getDivision().getRaftServer().getServerRpc()
- .getInetSocketAddress().getPort());
- primarySCMHAManager.addSCM(addSCMRequest);
- Assertions.assertEquals(2, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
-
- final RemoveSCMRequest removeSCMRequest = new RemoveSCMRequest(
- clusterID, scm2.getScmId(), "localhost:" +
- scm2.getScmHAManager().getRatisServer().getDivision()
- .getRaftServer().getServerRpc().getInetSocketAddress().getPort());
- primarySCMHAManager.removeSCM(removeSCMRequest);
- Assertions.assertEquals(1, primarySCMHAManager.getRatisServer()
- .getDivision().getGroup().getPeers().size());
- } finally {
- scm2.getScmHAManager().getRatisServer().stop();
- }
- }
- private StorageContainerManager getMockStorageContainerManager(
- OzoneConfiguration conf) throws IOException {
- final String scmID = UUID.randomUUID().toString();
+ private StorageContainerManager getMockStorageContainerManager(String scmID, int port) throws IOException {
+ OzoneConfiguration conf = getConfig(scmID, port);
final DBStore dbStore = mock(DBStore.class);
final SCMContext scmContext = mock(SCMContext.class);
@@ -217,6 +225,7 @@ private StorageContainerManager getMockStorageContainerManager(
mock(SCMDatanodeProtocolServer.class);
when(scm.getClusterId()).thenReturn(clusterID);
+ when(scm.getConfiguration()).thenReturn(conf);
when(scm.getScmId()).thenReturn(scmID);
when(scm.getScmMetadataStore()).thenReturn(metadataStore);
when(scm.getScmNodeDetails()).thenReturn(nodeDetails);
@@ -235,12 +244,33 @@ private StorageContainerManager getMockStorageContainerManager(
when(scmHANodeDetails.getLocalNodeDetails()).thenReturn(nodeDetails);
when(blockManager.getDeletedBlockLog()).thenReturn(deletedBlockLog);
when(dbStore.initBatchOperation()).thenReturn(batchOperation);
- when(nodeDetails.getRatisHostPortStr()).thenReturn("localhost:" +
- conf.get(ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY));
+ when(nodeDetails.getRatisHostPortStr()).thenReturn("localhost:" + port);
when(scm.getSystemClock()).thenReturn(Clock.system(ZoneOffset.UTC));
+ if (FOLLOWER_SCM_ID.equals(scmID)) {
+ final SCMNodeDetails leaderNodeDetails = mock(SCMNodeDetails.class);
+ final List peerNodeDetails = singletonList(leaderNodeDetails);
+ when(scmHANodeDetails.getPeerNodeDetails()).thenReturn(peerNodeDetails);
+ when(leaderNodeDetails.getNodeId()).thenReturn(LEADER_SCM_ID);
+ when(leaderNodeDetails.getGrpcPort()).thenReturn(LEADER_PORT);
+ when(leaderNodeDetails.getRatisHostPortStr()).thenReturn("localhost:" + LEADER_PORT);
+ InetSocketAddress rpcAddress = NetUtils.createSocketAddr("localhost", LEADER_PORT);
+ when(leaderNodeDetails.getRpcAddress()).thenReturn(rpcAddress);
+ when(leaderNodeDetails.getInetAddress()).thenReturn(rpcAddress.getAddress());
+ }
+
+ DBCheckpoint checkpoint = mock(DBCheckpoint.class);
+ SCMSnapshotProvider scmSnapshotProvider = mock(SCMSnapshotProvider.class);
+ when(scmSnapshotProvider.getSCMDBSnapshot(LEADER_SCM_ID))
+ .thenReturn(checkpoint);
+
final SCMHAManager manager = new SCMHAManagerImpl(conf,
- new SecurityConfig(conf), scm);
+ new SecurityConfig(conf), scm) {
+ @Override
+ protected SCMSnapshotProvider newScmSnapshotProvider(StorageContainerManager storageContainerManager) {
+ return scmSnapshotProvider;
+ }
+ };
when(scm.getScmHAManager()).thenReturn(manager);
return scm;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 168fdd11a57b..0f65cdb10870 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -47,6 +47,7 @@
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.HddsTestUtils;
+import org.apache.hadoop.hdds.scm.block.DeletedBlockLog;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
@@ -90,6 +91,7 @@ public class TestDeadNodeHandler {
private EventQueue eventQueue;
private String storageDir;
private SCMContext scmContext;
+ private DeletedBlockLog deletedBlockLog;
@BeforeEach
public void setup() throws IOException, AuthenticationException {
@@ -117,8 +119,9 @@ public void setup() throws IOException, AuthenticationException {
pipelineManager.setPipelineProvider(RATIS,
mockRatisProvider);
containerManager = scm.getContainerManager();
+ deletedBlockLog = Mockito.mock(DeletedBlockLog.class);
deadNodeHandler = new DeadNodeHandler(nodeManager,
- Mockito.mock(PipelineManager.class), containerManager);
+ Mockito.mock(PipelineManager.class), containerManager, deletedBlockLog);
healthyReadOnlyNodeHandler =
new HealthyReadOnlyNodeHandler(nodeManager,
pipelineManager);
@@ -134,6 +137,7 @@ public void teardown() {
}
@Test
+ @SuppressWarnings("checkstyle:MethodLength")
public void testOnMessage() throws Exception {
//GIVEN
DatanodeDetails datanode1 = MockDatanodeDetails.randomDatanodeDetails();
@@ -233,6 +237,9 @@ public void testOnMessage() throws Exception {
Assertions.assertFalse(
nodeManager.getClusterNetworkTopologyMap().contains(datanode1));
+ Mockito.verify(deletedBlockLog, Mockito.times(0))
+ .onDatanodeDead(datanode1.getUuid());
+
Set container1Replicas = containerManager
.getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
Assertions.assertEquals(2, container1Replicas.size());
@@ -260,6 +267,9 @@ public void testOnMessage() throws Exception {
Assertions.assertEquals(0,
nodeManager.getCommandQueueCount(datanode1.getUuid(), cmd.getType()));
+ Mockito.verify(deletedBlockLog, Mockito.times(1))
+ .onDatanodeDead(datanode1.getUuid());
+
container1Replicas = containerManager
.getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
Assertions.assertEquals(1, container1Replicas.size());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index e3da551c3edc..a3decb0efb54 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -954,16 +954,17 @@ scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf),
@Test
public void testProcessCommandQueueReport()
- throws IOException, NodeNotFoundException {
+ throws IOException, NodeNotFoundException, AuthenticationException {
OzoneConfiguration conf = new OzoneConfiguration();
SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class);
when(scmStorageConfig.getClusterID()).thenReturn("xyz111");
EventPublisher eventPublisher = mock(EventPublisher.class);
HDDSLayoutVersionManager lvm =
new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion());
+ createNodeManager(getConf());
SCMNodeManager nodeManager = new SCMNodeManager(conf,
scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf),
- SCMContext.emptyContext(), lvm);
+ scmContext, lvm);
LayoutVersionProto layoutInfo = toLayoutVersionProto(
lvm.getMetadataLayoutVersion(), lvm.getSoftwareLayoutVersion());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java
new file mode 100644
index 000000000000..d5d9208adae4
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestWritableRatisContainerProvider.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.pipeline;
+
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.RandomPipelineChoosePolicy;
+import org.junit.jupiter.api.RepeatedTest;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.Mock;
+import org.mockito.junit.jupiter.MockitoExtension;
+import org.mockito.junit.jupiter.MockitoSettings;
+import org.mockito.quality.Strictness;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.util.Arrays.asList;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.emptySet;
+import static java.util.Collections.singletonList;
+import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+@ExtendWith(MockitoExtension.class)
+@MockitoSettings(strictness = Strictness.LENIENT)
+class TestWritableRatisContainerProvider {
+
+ private static final ReplicationConfig REPLICATION_CONFIG =
+ RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE);
+ private static final String OWNER = "owner";
+ private static final int CONTAINER_SIZE = 1234;
+ private static final ExcludeList NO_EXCLUSION = new ExcludeList();
+
+ private final OzoneConfiguration conf = new OzoneConfiguration();
+ private final PipelineChoosePolicy policy = new RandomPipelineChoosePolicy();
+ private final AtomicLong containerID = new AtomicLong(1);
+
+ @Mock
+ private PipelineManager pipelineManager;
+
+ @Mock
+ private ContainerManager containerManager;
+
+ @Test
+ void returnsExistingContainer() throws Exception {
+ Pipeline pipeline = MockPipeline.createPipeline(3);
+ ContainerInfo existingContainer = pipelineHasContainer(pipeline);
+
+ existingPipelines(pipeline);
+
+ ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION);
+
+ assertSame(existingContainer, container);
+ verifyPipelineNotCreated();
+ }
+
+ @RepeatedTest(100)
+ void skipsPipelineWithoutContainer() throws Exception {
+ Pipeline pipeline = MockPipeline.createPipeline(3);
+ ContainerInfo existingContainer = pipelineHasContainer(pipeline);
+
+ Pipeline pipelineWithoutContainer = MockPipeline.createPipeline(3);
+ existingPipelines(pipelineWithoutContainer, pipeline);
+
+ ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION);
+
+ assertSame(existingContainer, container);
+ verifyPipelineNotCreated();
+ }
+
+ @Test
+ void createsNewContainerIfNoneFound() throws Exception {
+ ContainerInfo newContainer = createNewContainerOnDemand();
+
+ ContainerInfo container = createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION);
+
+ assertSame(newContainer, container);
+ verifyPipelineCreated();
+ }
+
+ @Test
+ void failsIfContainerCannotBeCreated() throws Exception {
+ throwWhenCreatePipeline();
+
+ assertThrows(IOException.class,
+ () -> createSubject().getContainer(CONTAINER_SIZE, REPLICATION_CONFIG, OWNER, NO_EXCLUSION));
+
+ verifyPipelineCreated();
+ }
+
+ private void existingPipelines(Pipeline... pipelines) {
+ existingPipelines(new ArrayList<>(asList(pipelines)));
+ }
+
+ private void existingPipelines(List pipelines) {
+ when(pipelineManager.getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet()))
+ .thenReturn(pipelines);
+ }
+
+ private ContainerInfo pipelineHasContainer(Pipeline pipeline) {
+ ContainerInfo container = new ContainerInfo.Builder()
+ .setContainerID(containerID.getAndIncrement())
+ .setPipelineID(pipeline.getId())
+ .build();
+
+ when(containerManager.getMatchingContainer(CONTAINER_SIZE, OWNER, pipeline, emptySet()))
+ .thenReturn(container);
+
+ return container;
+ }
+
+ private ContainerInfo createNewContainerOnDemand() throws IOException {
+ Pipeline newPipeline = MockPipeline.createPipeline(3);
+ when(pipelineManager.createPipeline(REPLICATION_CONFIG))
+ .thenReturn(newPipeline);
+
+ when(pipelineManager.getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet()))
+ .thenReturn(emptyList())
+ .thenReturn(new ArrayList<>(singletonList(newPipeline)));
+
+ return pipelineHasContainer(newPipeline);
+ }
+
+ private void throwWhenCreatePipeline() throws IOException {
+ when(pipelineManager.createPipeline(REPLICATION_CONFIG))
+ .thenThrow(new SCMException(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE));
+ }
+
+ private WritableRatisContainerProvider createSubject() {
+ return new WritableRatisContainerProvider(conf,
+ pipelineManager, containerManager, policy);
+ }
+
+ private void verifyPipelineCreated() throws IOException {
+ verify(pipelineManager, times(2))
+ .getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet());
+ verify(pipelineManager)
+ .createPipeline(REPLICATION_CONFIG);
+ }
+
+ private void verifyPipelineNotCreated() throws IOException {
+ verify(pipelineManager, times(1))
+ .getPipelines(REPLICATION_CONFIG, OPEN, emptySet(), emptySet());
+ verify(pipelineManager, never())
+ .createPipeline(REPLICATION_CONFIG);
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
index 6ba2fc440a4f..9c9bfad582f7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestDatanodeMetrics.java
@@ -31,13 +31,13 @@
public class TestDatanodeMetrics {
@Test
public void testSCMNodeMetric() {
- SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L);
+ SCMNodeStat stat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
assertEquals((long) stat.getCapacity().get(), 100L);
assertEquals(10L, (long) stat.getScmUsed().get());
assertEquals(90L, (long) stat.getRemaining().get());
SCMNodeMetric metric = new SCMNodeMetric(stat);
- SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L);
+ SCMNodeStat newStat = new SCMNodeStat(100L, 10L, 90L, 0, 80);
assertEquals(100L, (long) stat.getCapacity().get());
assertEquals(10L, (long) stat.getScmUsed().get());
assertEquals(90L, (long) stat.getRemaining().get());
@@ -53,8 +53,8 @@ public void testSCMNodeMetric() {
assertTrue(metric.isGreater(zeroMetric.get()));
// Another case when nodes have similar weight
- SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L);
- SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L);
+ SCMNodeStat stat1 = new SCMNodeStat(10000000L, 50L, 9999950L, 0, 100000);
+ SCMNodeStat stat2 = new SCMNodeStat(10000000L, 51L, 9999949L, 0, 100000);
assertTrue(new SCMNodeMetric(stat2).isGreater(stat1));
}
}
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
index d46513b24bbd..b967fa0658c0 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/UsageInfoSubcommand.java
@@ -155,8 +155,16 @@ private void printInfo(DatanodeUsage info) {
+ " B", StringUtils.byteDesc(info.getRemaining()));
System.out.printf("%-13s: %s %n", "Remaining %",
PERCENT_FORMAT.format(info.getRemainingRatio()));
- System.out.printf("%-13s: %d %n%n", "Container(s)",
+ System.out.printf("%-13s: %d %n", "Container(s)",
info.getContainerCount());
+ System.out.printf("%-24s: %s (%s) %n", "Container Pre-allocated",
+ info.getCommitted() + " B", StringUtils.byteDesc(info.getCommitted()));
+ System.out.printf("%-24s: %s (%s) %n", "Remaining Allocatable",
+ (info.getRemaining() - info.getCommitted()) + " B",
+ StringUtils.byteDesc((info.getRemaining() - info.getCommitted())));
+ System.out.printf("%-24s: %s (%s) %n%n", "Free Space To Spare",
+ info.getFreeSpaceToSpare() + " B",
+ StringUtils.byteDesc(info.getFreeSpaceToSpare()));
}
/**
@@ -181,6 +189,8 @@ private static class DatanodeUsage {
private long capacity = 0;
private long used = 0;
private long remaining = 0;
+ private long committed = 0;
+ private long freeSpaceToSpare = 0;
private long containerCount = 0;
DatanodeUsage(HddsProtos.DatanodeUsageInfoProto proto) {
@@ -196,9 +206,15 @@ private static class DatanodeUsage {
if (proto.hasRemaining()) {
remaining = proto.getRemaining();
}
+ if (proto.hasCommitted()) {
+ committed = proto.getCommitted();
+ }
if (proto.hasContainerCount()) {
containerCount = proto.getContainerCount();
}
+ if (proto.hasFreeSpaceToSpare()) {
+ freeSpaceToSpare = proto.getFreeSpaceToSpare();
+ }
}
public DatanodeDetails getDatanodeDetails() {
@@ -220,6 +236,12 @@ public long getOzoneUsed() {
public long getRemaining() {
return remaining;
}
+ public long getCommitted() {
+ return committed;
+ }
+ public long getFreeSpaceToSpare() {
+ return freeSpaceToSpare;
+ }
public long getContainerCount() {
return containerCount;
diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index 0cc8ed9be639..a52a0a7ed8f5 100644
--- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -19,6 +19,7 @@
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.commons.codec.CharEncoding;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.client.ScmClient;
@@ -97,6 +98,38 @@ public void testCorrectJsonValuesInReport() throws IOException {
json.get(0).get("containerCount").longValue());
}
+ @Test
+ public void testOutputDataFieldsAligning() throws IOException {
+ // given
+ ScmClient scmClient = mock(ScmClient.class);
+ Mockito.when(scmClient.getDatanodeUsageInfo(
+ Mockito.anyBoolean(), Mockito.anyInt()))
+ .thenAnswer(invocation -> getUsageProto());
+
+ CommandLine c = new CommandLine(cmd);
+ c.parseArgs("-m");
+
+ // when
+ cmd.execute(scmClient);
+
+ // then
+ String output = outContent.toString(CharEncoding.UTF_8);
+ Assertions.assertTrue(output.contains("UUID :"));
+ Assertions.assertTrue(output.contains("IP Address :"));
+ Assertions.assertTrue(output.contains("Hostname :"));
+ Assertions.assertTrue(output.contains("Capacity :"));
+ Assertions.assertTrue(output.contains("Total Used :"));
+ Assertions.assertTrue(output.contains("Total Used % :"));
+ Assertions.assertTrue(output.contains("Ozone Used :"));
+ Assertions.assertTrue(output.contains("Ozone Used % :"));
+ Assertions.assertTrue(output.contains("Remaining :"));
+ Assertions.assertTrue(output.contains("Remaining % :"));
+ Assertions.assertTrue(output.contains("Container(s) :"));
+ Assertions.assertTrue(output.contains("Container Pre-allocated :"));
+ Assertions.assertTrue(output.contains("Remaining Allocatable :"));
+ Assertions.assertTrue(output.contains("Free Space To Spare :"));
+ }
+
private List getUsageProto() {
List result = new ArrayList<>();
result.add(HddsProtos.DatanodeUsageInfoProto.newBuilder()
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java
index 3da23688418f..13ba57169878 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java
@@ -186,13 +186,10 @@ private List getChunkInfos(OmKeyLocationInfo
LOG.debug("Initializing BlockInputStream for get key to access {}",
blockID.getContainerID());
}
- xceiverClientSpi =
- getXceiverClientFactory().acquireClientForReadData(pipeline);
+ xceiverClientSpi = getXceiverClientFactory().acquireClientForReadData(pipeline);
- ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
- .getDatanodeBlockIDProtobuf();
ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
- .getBlock(xceiverClientSpi, datanodeBlockID, token);
+ .getBlock(xceiverClientSpi, blockID, token, pipeline.getReplicaIndexes());
chunks = response.getBlockData().getChunksList();
} finally {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
index 09f9c7d037e9..016121ce1a9b 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java
@@ -149,13 +149,9 @@ protected List getChunkInfos(
LOG.debug("Initializing BlockInputStream for get key to access {}",
blockID.getContainerID());
}
- xceiverClientSpi =
- getXceiverClientFactory().acquireClientForReadData(pipeline);
-
- ContainerProtos.DatanodeBlockID datanodeBlockID = blockID
- .getDatanodeBlockIDProtobuf();
+ xceiverClientSpi = getXceiverClientFactory().acquireClientForReadData(pipeline);
ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls
- .getBlock(xceiverClientSpi, datanodeBlockID, token);
+ .getBlock(xceiverClientSpi, blockID, token, pipeline.getReplicaIndexes());
chunks = response.getBlockData().getChunksList();
} finally {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java
index 241754a57f19..2cf2ab0cf9c2 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java
@@ -37,7 +37,6 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
-import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -248,8 +247,7 @@ Collection getFailedServers() {
@VisibleForTesting
Pipeline createSingleECBlockPipeline(Pipeline ecPipeline,
DatanodeDetails node, int replicaIndex) {
- Map indiciesForSinglePipeline = new HashMap<>();
- indiciesForSinglePipeline.put(node, replicaIndex);
+ Map indiciesForSinglePipeline = Collections.singletonMap(node, replicaIndex);
return Pipeline.newBuilder()
.setId(ecPipeline.getId())
.setReplicationConfig(ecPipeline.getReplicationConfig())
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
index 4843c1c45e6c..76fa1e394f6c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java
@@ -25,6 +25,7 @@
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.XceiverClientFactory;
import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream;
import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo;
@@ -58,9 +59,9 @@ private static List createStreams(
OmKeyInfo keyInfo,
List blockInfos,
XceiverClientFactory xceiverClientFactory,
- boolean verifyChecksum,
Function retryFunction,
- BlockInputStreamFactory blockStreamFactory) {
+ BlockInputStreamFactory blockStreamFactory,
+ OzoneClientConfig config) throws IOException {
List partStreams = new ArrayList<>();
for (OmKeyLocationInfo omKeyLocationInfo : blockInfos) {
if (LOG.isDebugEnabled()) {
@@ -91,9 +92,9 @@ private static List createStreams(
omKeyLocationInfo,
omKeyLocationInfo.getPipeline(),
omKeyLocationInfo.getToken(),
- verifyChecksum,
xceiverClientFactory,
- retry);
+ retry,
+ config);
partStreams.add(stream);
}
return partStreams;
@@ -117,13 +118,13 @@ private static BlockLocationInfo getBlockLocationInfo(OmKeyInfo newKeyInfo,
private static LengthInputStream getFromOmKeyInfo(
OmKeyInfo keyInfo,
XceiverClientFactory xceiverClientFactory,
- boolean verifyChecksum,
Function retryFunction,
BlockInputStreamFactory blockStreamFactory,
- List locationInfos) {
+ List locationInfos,
+ OzoneClientConfig config) throws IOException {
List streams = createStreams(keyInfo,
- locationInfos, xceiverClientFactory, verifyChecksum, retryFunction,
- blockStreamFactory);
+ locationInfos, xceiverClientFactory, retryFunction,
+ blockStreamFactory, config);
KeyInputStream keyInputStream =
new KeyInputStream(keyInfo.getKeyName(), streams);
return new LengthInputStream(keyInputStream, keyInputStream.getLength());
@@ -134,20 +135,22 @@ private static LengthInputStream getFromOmKeyInfo(
*/
public static LengthInputStream getFromOmKeyInfo(OmKeyInfo keyInfo,
XceiverClientFactory xceiverClientFactory,
- boolean verifyChecksum, Function retryFunction,
- BlockInputStreamFactory blockStreamFactory) {
+ Function retryFunction,
+ BlockInputStreamFactory blockStreamFactory,
+ OzoneClientConfig config) throws IOException {
List keyLocationInfos = keyInfo
.getLatestVersionLocations().getBlocksLatestVersionOnly();
- return getFromOmKeyInfo(keyInfo, xceiverClientFactory, verifyChecksum,
- retryFunction, blockStreamFactory, keyLocationInfos);
+ return getFromOmKeyInfo(keyInfo, xceiverClientFactory,
+ retryFunction, blockStreamFactory, keyLocationInfos, config);
}
public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo,
- XceiverClientFactory xceiverClientFactory, boolean verifyChecksum,
+ XceiverClientFactory xceiverClientFactory,
Function retryFunction,
- BlockInputStreamFactory blockStreamFactory) {
+ BlockInputStreamFactory blockStreamFactory,
+ OzoneClientConfig config) throws IOException {
List keyLocationInfos = keyInfo
.getLatestVersionLocations().getBlocksLatestVersionOnly();
@@ -162,7 +165,8 @@ public static List getStreamsFromKeyInfo(OmKeyInfo keyInfo,
// Create a KeyInputStream for each part.
for (List locationInfo : partsToBlocksMap.values()) {
lengthInputStreams.add(getFromOmKeyInfo(keyInfo, xceiverClientFactory,
- verifyChecksum, retryFunction, blockStreamFactory, locationInfo));
+ retryFunction, blockStreamFactory, locationInfo,
+ config));
}
return lengthInputStreams;
}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 8d61f8ef8609..222823c454a7 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -50,6 +50,7 @@
import org.apache.hadoop.hdds.scm.XceiverClientFactory;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.ClientTrustManager;
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
import org.apache.hadoop.hdds.security.x509.certificate.client.CACertificateProvider;
import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -1834,11 +1835,16 @@ public OzoneOutputStream createMultipartKey(
long size, int partNumber, String uploadID) throws IOException {
final OpenKeySession openKey = newMultipartOpenKey(
volumeName, bucketName, keyName, size, partNumber, uploadID, false);
+ return createMultipartOutputStream(openKey, uploadID, partNumber);
+ }
+
+ private OzoneOutputStream createMultipartOutputStream(
+ OpenKeySession openKey, String uploadID, int partNumber
+ ) throws IOException {
KeyOutputStream keyOutputStream = createKeyOutputStream(openKey)
.setMultipartNumber(partNumber)
.setMultipartUploadID(uploadID)
.setIsMultipartKey(true)
- .setAtomicKeyCreation(isS3GRequest.get())
.build();
return createOutputStream(openKey, keyOutputStream);
}
@@ -1854,29 +1860,25 @@ public OzoneDataStreamOutput createMultipartStreamKey(
throws IOException {
final OpenKeySession openKey = newMultipartOpenKey(
volumeName, bucketName, keyName, size, partNumber, uploadID, true);
- // Amazon S3 never adds partial objects, So for S3 requests we need to
- // set atomicKeyCreation to true
- // refer: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
- KeyDataStreamOutput keyOutputStream =
- new KeyDataStreamOutput.Builder()
- .setHandler(openKey)
- .setXceiverClientManager(xceiverClientManager)
- .setOmClient(ozoneManagerClient)
- .setReplicationConfig(openKey.getKeyInfo().getReplicationConfig())
- .setMultipartNumber(partNumber)
- .setMultipartUploadID(uploadID)
- .setIsMultipartKey(true)
- .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
- .setConfig(clientConfig)
- .setAtomicKeyCreation(isS3GRequest.get())
- .build();
- keyOutputStream
- .addPreallocateBlocks(
- openKey.getKeyInfo().getLatestVersionLocations(),
- openKey.getOpenVersion());
- final OzoneOutputStream out = createSecureOutputStream(
- openKey, keyOutputStream, null);
- return new OzoneDataStreamOutput(out != null ? out : keyOutputStream);
+ final ByteBufferStreamOutput out;
+ ReplicationConfig replicationConfig = openKey.getKeyInfo().getReplicationConfig();
+ if (replicationConfig.getReplicationType() == HddsProtos.ReplicationType.RATIS) {
+ KeyDataStreamOutput keyOutputStream = newKeyOutputStreamBuilder()
+ .setHandler(openKey)
+ .setReplicationConfig(replicationConfig)
+ .setMultipartNumber(partNumber)
+ .setMultipartUploadID(uploadID)
+ .setIsMultipartKey(true)
+ .build();
+ keyOutputStream.addPreallocateBlocks(
+ openKey.getKeyInfo().getLatestVersionLocations(),
+ openKey.getOpenVersion());
+ final OzoneOutputStream secureOut = createSecureOutputStream(openKey, keyOutputStream, null);
+ out = secureOut != null ? secureOut : keyOutputStream;
+ } else {
+ out = createMultipartOutputStream(openKey, uploadID, partNumber);
+ }
+ return new OzoneDataStreamOutput(out);
}
@Override
@@ -2221,9 +2223,8 @@ private OzoneInputStream createInputStream(
if (feInfo == null) {
LengthInputStream lengthInputStream = KeyInputStream
- .getFromOmKeyInfo(keyInfo, xceiverClientManager,
- clientConfig.isChecksumVerify(), retryFunction,
- blockInputStreamFactory);
+ .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction,
+ blockInputStreamFactory, clientConfig);
try {
final GDPRSymmetricKey gk = getGDPRSymmetricKey(
keyInfo.getMetadata(), Cipher.DECRYPT_MODE);
@@ -2238,9 +2239,8 @@ private OzoneInputStream createInputStream(
} else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) {
// Regular Key with FileEncryptionInfo
LengthInputStream lengthInputStream = KeyInputStream
- .getFromOmKeyInfo(keyInfo, xceiverClientManager,
- clientConfig.isChecksumVerify(), retryFunction,
- blockInputStreamFactory);
+ .getFromOmKeyInfo(keyInfo, xceiverClientManager, retryFunction,
+ blockInputStreamFactory, clientConfig);
final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
final CryptoInputStream cryptoIn =
new CryptoInputStream(lengthInputStream.getWrappedStream(),
@@ -2250,9 +2250,8 @@ private OzoneInputStream createInputStream(
} else {
// Multipart Key with FileEncryptionInfo
List lengthInputStreams = KeyInputStream
- .getStreamsFromKeyInfo(keyInfo, xceiverClientManager,
- clientConfig.isChecksumVerify(), retryFunction,
- blockInputStreamFactory);
+ .getStreamsFromKeyInfo(keyInfo, xceiverClientManager, retryFunction,
+ blockInputStreamFactory, clientConfig);
final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
List cryptoInputStreams = new ArrayList<>();
@@ -2273,25 +2272,33 @@ private OzoneDataStreamOutput createDataStreamOutput(OpenKeySession openKey)
throws IOException {
final ReplicationConfig replicationConfig
= openKey.getKeyInfo().getReplicationConfig();
+ final ByteBufferStreamOutput out;
+ if (replicationConfig.getReplicationType() == HddsProtos.ReplicationType.RATIS) {
+ KeyDataStreamOutput keyOutputStream = newKeyOutputStreamBuilder()
+ .setHandler(openKey)
+ .setReplicationConfig(replicationConfig)
+ .build();
+ keyOutputStream.addPreallocateBlocks(
+ openKey.getKeyInfo().getLatestVersionLocations(),
+ openKey.getOpenVersion());
+ final OzoneOutputStream secureOut = createSecureOutputStream(openKey, keyOutputStream, null);
+ out = secureOut != null ? secureOut : keyOutputStream;
+ } else {
+ out = createOutputStream(openKey);
+ }
+ return new OzoneDataStreamOutput(out);
+ }
+
+ private KeyDataStreamOutput.Builder newKeyOutputStreamBuilder() {
// Amazon S3 never adds partial objects, So for S3 requests we need to
// set atomicKeyCreation to true
// refer: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
- KeyDataStreamOutput keyOutputStream =
- new KeyDataStreamOutput.Builder()
- .setHandler(openKey)
- .setXceiverClientManager(xceiverClientManager)
- .setOmClient(ozoneManagerClient)
- .setReplicationConfig(replicationConfig)
- .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
- .setConfig(clientConfig)
- .setAtomicKeyCreation(isS3GRequest.get())
- .build();
- keyOutputStream
- .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(),
- openKey.getOpenVersion());
- final OzoneOutputStream out = createSecureOutputStream(
- openKey, keyOutputStream, null);
- return new OzoneDataStreamOutput(out != null ? out : keyOutputStream);
+ return new KeyDataStreamOutput.Builder()
+ .setXceiverClientManager(xceiverClientManager)
+ .setOmClient(ozoneManagerClient)
+ .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
+ .setConfig(clientConfig)
+ .setAtomicKeyCreation(isS3GRequest.get());
}
private OzoneOutputStream createOutputStream(OpenKeySession openKey)
diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
index 28e9b8ac3c61..df6ef9bc0506 100644
--- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
+++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestKeyInputStreamEC.java
@@ -20,8 +20,10 @@
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream;
@@ -40,7 +42,6 @@
import static org.apache.hadoop.ozone.OzoneConsts.MB;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -49,6 +50,8 @@
*/
public class TestKeyInputStreamEC {
+ private OzoneConfiguration conf = new OzoneConfiguration();
+
@Test
public void testReadAgainstLargeBlockGroup() throws IOException {
int dataBlocks = 10;
@@ -68,10 +71,13 @@ public void testReadAgainstLargeBlockGroup() throws IOException {
BlockInputStreamFactory mockStreamFactory =
mock(BlockInputStreamFactory.class);
when(mockStreamFactory.create(any(), any(), any(), any(),
- anyBoolean(), any(), any())).thenReturn(blockInputStream);
+ any(), any(), any())).thenReturn(blockInputStream);
+ OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+ clientConfig.setChecksumVerify(true);
try (LengthInputStream kis = KeyInputStream.getFromOmKeyInfo(keyInfo,
- null, true, null, mockStreamFactory)) {
+ null, null, mockStreamFactory,
+ clientConfig)) {
byte[] buf = new byte[100];
int readBytes = kis.read(buf, 0, 100);
Assertions.assertEquals(100, readBytes);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
index babeb3054875..1767cdb6dc3a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java
@@ -721,6 +721,47 @@ public static String normalizeKey(String keyName,
return keyName;
}
+ /**
+ * Normalizes a given path up to the bucket level.
+ *
+ * This method takes a path as input and normalises uptil the bucket level.
+ * It handles empty, removes leading slashes, and splits the path into
+ * segments. It then extracts the volume and bucket names, forming a
+ * normalized path with a single slash. Finally, any remaining segments are
+ * joined as the key name, returning the complete standardized path.
+ *
+ * @param path The path string to be normalized.
+ * @return The normalized path string.
+ */
+ public static String normalizePathUptoBucket(String path) {
+ if (path == null || path.isEmpty()) {
+ return OM_KEY_PREFIX; // Handle empty path
+ }
+
+ // Remove leading slashes
+ path = path.replaceAll("^/*", "");
+
+ String[] segments = path.split(OM_KEY_PREFIX, -1);
+
+ String volumeName = segments[0];
+ String bucketName = segments.length > 1 ? segments[1] : "";
+
+ // Combine volume and bucket.
+ StringBuilder normalizedPath = new StringBuilder(volumeName);
+ if (!bucketName.isEmpty()) {
+ normalizedPath.append(OM_KEY_PREFIX).append(bucketName);
+ }
+
+ // Add remaining segments as the key
+ if (segments.length > 2) {
+ normalizedPath.append(OM_KEY_PREFIX).append(
+ String.join(OM_KEY_PREFIX,
+ Arrays.copyOfRange(segments, 2, segments.length)));
+ }
+
+ return normalizedPath.toString();
+ }
+
/**
* For a given service ID, return list of configured OM hosts.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 5dd7579eb916..9b844cc74fdf 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -579,13 +579,20 @@ private OMConfigKeys() {
= TimeUnit.DAYS.toMillis(7);
public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL
- = "ozone.om.snapshot.diff.cleanup.service.run.internal";
+ = "ozone.om.snapshot.diff.cleanup.service.run.interval";
+ public static final String
+ OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL
+ = "ozone.om.snapshot.cache.cleanup.service.run.interval";
public static final long
OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT
= TimeUnit.MINUTES.toMillis(1);
+ public static final long
+ OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL_DEFAULT
+ = TimeUnit.MINUTES.toMillis(1);
public static final String OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT
= "ozone.om.snapshot.diff.cleanup.service.timeout";
+
public static final long
OZONE_OM_SNAPSHOT_DIFF_CLEANUP_SERVICE_TIMEOUT_DEFAULT
= TimeUnit.MINUTES.toMillis(5);
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java
index 3d14e266daa4..29bf4deb2a08 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReportOzone.java
@@ -101,16 +101,12 @@ public String toString() {
.append(" and snapshot: ")
.append(getLaterSnapshotName())
.append(LINE_SEPARATOR);
- if (!getDiffList().isEmpty()) {
- for (DiffReportEntry entry : getDiffList()) {
- str.append(entry.toString()).append(LINE_SEPARATOR);
- }
- if (StringUtils.isNotEmpty(token)) {
- str.append("Next token: ")
- .append(token);
- }
- } else {
- str.append("No diff or no more diff for the request parameters.");
+ for (DiffReportEntry entry : getDiffList()) {
+ str.append(entry.toString()).append(LINE_SEPARATOR);
+ }
+ if (StringUtils.isNotEmpty(token)) {
+ str.append("Next token: ")
+ .append(token);
}
return str.toString();
}
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index 53f1a63d97ae..f5c32ff3aa2c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -84,7 +84,8 @@ OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
OZONE-SITE.XML_hdds.container.report.interval=60s
OZONE-SITE.XML_ozone.scm.close.container.wait.duration=5s
-OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=true
+# Ratis streaming is disabled to ensure coverage for both cases
+OZONE-SITE.XML_dfs.container.ratis.datastream.enabled=false
HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/dn@EXAMPLE.COM
HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab
@@ -114,7 +115,7 @@ OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.keytab=/etc/security/keytabs/s3g.key
OZONE-SITE.XML_ozone.s3g.http.auth.kerberos.principal=HTTP/s3g@EXAMPLE.COM
OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.keytab=/etc/security/keytabs/httpfs.keytab
OZONE-SITE.XML_ozone.httpfs.http.auth.kerberos.principal=HTTP/httpfs@EXAMPLE.COM
-OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=HTTP/recon@EXAMPLE.COM
+OZONE-SITE.XML_ozone.recon.http.auth.kerberos.principal=*
OZONE-SITE.XML_ozone.recon.http.auth.kerberos.keytab=/etc/security/keytabs/recon.keytab
CORE-SITE.XML_hadoop.http.authentication.simple.anonymous.allowed=false
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index 41f4fa9a580f..6d213f9b5fbe 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -39,6 +39,15 @@ execute_robot_test scm security
execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-ofs-bucket ozonefs/ozonefs.robot
+## Exclude virtual-host tests. This is tested separately as it requires additional config.
+exclude="--exclude virtual-host"
+for bucket in encrypted; do
+ execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3
+ # some tests are independent of the bucket type, only need to be run once
+ ## Exclude virtual-host.robot
+ exclude="--exclude virtual-host --exclude no-bucket-type"
+done
+
#expects 4 pipelines, should be run before
#admincli which creates STANDALONE pipeline
execute_robot_test scm recon
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index c13ca1bb80e0..505cb1ae77c9 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -304,9 +304,10 @@ get_output_name() {
save_container_logs() {
local output_name=$(get_output_name)
- local c
- for c in $(docker-compose ps "$@" | cut -f1 -d' ' | tail -n +3); do
- docker logs "${c}" >> "$RESULT_DIR/docker-${output_name}${c}.log" 2>&1
+ local id
+ for i in $(docker-compose ps -a -q "$@"); do
+ local c=$(docker ps -a --filter "id=${i}" --format "{{ .Names }}")
+ docker logs "${i}" >> "$RESULT_DIR/docker-${output_name}${c}.log" 2>&1
done
}
diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt
index b4f83965f4a1..49473afb73d1 100644
--- a/hadoop-ozone/dist/src/main/license/jar-report.txt
+++ b/hadoop-ozone/dist/src/main/license/jar-report.txt
@@ -163,7 +163,10 @@ share/ozone/lib/jsp-api.jar
share/ozone/lib/jsr305.jar
share/ozone/lib/jsr311-api.jar
share/ozone/lib/kerb-core.jar
+share/ozone/lib/kerb-crypto.jar
+share/ozone/lib/kerb-util.jar
share/ozone/lib/kerby-asn1.jar
+share/ozone/lib/kerby-config.jar
share/ozone/lib/kerby-pkix.jar
share/ozone/lib/kerby-util.jar
share/ozone/lib/kotlin-stdlib-common.jar
diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
index 098d27980c3a..3b7a676f28bf 100644
--- a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml
@@ -36,10 +36,6 @@
-
-
-
-
@@ -125,10 +121,6 @@
-
-
-
-
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
similarity index 70%
rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index b5d83704833c..a83c7bf16a2e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -18,14 +18,17 @@
package org.apache.hadoop.fs.ozone;
+import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.InvalidPathException;
+import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
@@ -53,6 +56,8 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
import org.apache.hadoop.ozone.om.TrashPolicyOzone;
import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -62,17 +67,14 @@
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.TestClock;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.event.Level;
@@ -88,38 +90,48 @@
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
import static org.apache.hadoop.fs.CommonPathCapabilities.FS_ACLS;
import static org.apache.hadoop.fs.CommonPathCapabilities.FS_CHECKSUMS;
import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX;
+import static org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames.OP_CREATE;
+import static org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames.OP_GET_FILE_STATUS;
+import static org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames.OP_MKDIRS;
+import static org.apache.hadoop.fs.StorageStatistics.CommonStatisticNames.OP_OPEN;
import static org.apache.hadoop.fs.contract.ContractTestUtils.assertHasPathCapabilities;
import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE;
+import static org.apache.hadoop.fs.ozone.Constants.OZONE_DEFAULT_USER;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertThrows;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
/**
* Ozone file system tests that are not covered by contract tests.
*/
-@RunWith(Parameterized.class)
-public class TestOzoneFileSystem {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+abstract class AbstractOzoneFileSystemTest {
private static final float TRASH_INTERVAL = 0.05f; // 3 seconds
@@ -131,59 +143,35 @@ public class TestOzoneFileSystem {
private static final PathFilter EXCLUDE_TRASH =
p -> !p.toUri().getPath().startsWith(TRASH_ROOT.toString());
+ private String fsRoot;
- @Parameterized.Parameters
- public static Collection