diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index a373c21e89a..b03b7d7ad65 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -29,7 +29,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -41,9 +40,7 @@ import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -293,29 +290,15 @@ public BackgroundTaskResult call() throws Exception { } } - // Once files are deleted... replace deleting entries with deleted - // entries + // Once blocks are deleted... remove the blockID from blockDataTable. try(BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { - Table< String, ChunkInfoList > deletedBlocksTable = - meta.getStore().getDeletedBlocksTable(); for (String entry : succeedBlocks) { - List< ContainerProtos.ChunkInfo > chunkList = - blockDataTable.get(entry).getChunks(); - String blockId = entry.substring( - OzoneConsts.DELETING_KEY_PREFIX.length()); - - deletedBlocksTable.putWithBatch( - batch, blockId, - new ChunkInfoList(chunkList)); blockDataTable.deleteWithBatch(batch, entry); } - int deleteBlockCount = succeedBlocks.size(); containerData.updateAndCommitDBCounters(meta, batch, deleteBlockCount); - - // update count of pending deletion blocks and block count in // in-memory container status. containerData.decrPendingDeletionBlocks(deleteBlockCount); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 2fb577c79f9..2eb6a394e06 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -20,8 +20,7 @@ import java.io.File; import java.io.IOException; -import java.time.Duration; -import java.util.Iterator; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.UUID; @@ -38,37 +37,40 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; +import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; +import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -76,12 +78,15 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; - +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Tests to test block deleting service. @@ -92,9 +97,12 @@ public class TestBlockDeletingService { private static File testRoot; private static String scmId; private static String clusterID; - private Handler handler; + private static String datanodeUuid; + private static MutableConfigurationSource conf; private final ChunkLayOutVersion layout; + private int blockLimitPerTask; + private static VolumeSet volumeSet; public TestBlockDeletingService(ChunkLayOutVersion layout) { this.layout = layout; @@ -114,6 +122,10 @@ public static void init() throws IOException { } scmId = UUID.randomUUID().toString(); clusterID = UUID.randomUUID().toString(); + conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); + datanodeUuid = UUID.randomUUID().toString(); + volumeSet = new MutableVolumeSet(datanodeUuid, conf); } @AfterClass @@ -121,31 +133,45 @@ public static void cleanup() throws IOException { FileUtils.deleteDirectory(testRoot); } + private static final DispatcherContext WRITE_STAGE = + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build(); + + private static final DispatcherContext COMMIT_STAGE = + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build(); + /** * A helper method to create some blocks and put them under deletion * state for testing. This method directly updates container.db and * creates some fake chunk files for testing. */ private void createToDeleteBlocks(ContainerSet containerSet, - MutableConfigurationSource conf, int numOfContainers, + int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException { + ChunkManager chunkManager; + if (layout == FILE_PER_BLOCK) { + chunkManager = new FilePerBlockStrategy(true); + } else { + chunkManager = new FilePerChunkStrategy(true, null); + } + byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8); + ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr)); for (int x = 0; x < numOfContainers; x++) { - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); long containerID = ContainerTestHelper.getTestContainerID(); - KeyValueContainerData data = new KeyValueContainerData(containerID, - layout, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); + KeyValueContainerData data = + new KeyValueContainerData(containerID, layout, + ContainerTestHelper.CONTAINER_MAX_SIZE, + UUID.randomUUID().toString(), datanodeUuid); data.closeContainer(); KeyValueContainer container = new KeyValueContainer(data, conf); - container.create(new MutableVolumeSet(scmId, clusterID, conf), + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId); containerSet.addContainer(container); data = (KeyValueContainerData) containerSet.getContainer( containerID).getContainerData(); - - long blockLength = 100; + long chunkLength = 100; try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { BlockID blockID = @@ -155,30 +181,35 @@ private void createToDeleteBlocks(ContainerSet containerSet, BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); for (int k = 0; k < numOfChunksPerBlock; k++) { + final String chunkName = String.format("block.%d.chunk.%d", j, k); + final long offset = k * chunkLength; ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + k) - .setLen(blockLength) - .setOffset(0) + .setChunkName(chunkName) + .setLen(chunkLength) + .setOffset(offset) .setChecksumData(Checksum.getNoChecksumDataProto()) .build(); chunks.add(info); + ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); + ChunkBuffer chunkData = buffer.duplicate(0, (int) chunkLength); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, + WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, + COMMIT_STAGE); } kd.setChunks(chunks); metadata.getStore().getBlockDataTable().put( deleteStateName, kd); container.getContainerData().incrPendingDeletionBlocks(1); } - container.getContainerData().setKeyCount(numOfBlocksPerContainer); - container.getContainerData().setBytesUsed( - blockLength * numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. metadata.getStore().getMetadataTable().put( OzoneConsts.BLOCK_COUNT, (long)numOfBlocksPerContainer); metadata.getStore().getMetadataTable().put( OzoneConsts.CONTAINER_BYTES_USED, - blockLength * numOfBlocksPerContainer); + chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer); metadata.getStore().getMetadataTable().put( OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long)numOfBlocksPerContainer); @@ -207,21 +238,23 @@ private int getUnderDeletionBlocksCount(ReferenceCountedDB meta) MetadataKeyFilters.getDeletingKeyFilter()).size(); } - private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException { - return db.getStore().getDeletedBlocksTable() - .getRangeKVs(null, 100).size(); - } @Test public void testBlockDeletion() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); + this.blockLimitPerTask = + conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, + OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT); ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - + createToDeleteBlocks(containerSet, 1, 3, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); svc.start(); GenericTestUtils.waitFor(svc::isStarted, 100, 3000); @@ -240,40 +273,43 @@ public void testBlockDeletion() throws Exception { .get(containerData.get(0).getContainerID()).getContainerData()) .getDeleteTransactionId(); - + long containerSpace = containerData.get(0).getBytesUsed(); // Number of deleted blocks in container should be equal to 0 before // block delete + Assert.assertEquals(0, transactionId); // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, - meta.getStore().getMetadataTable() - .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); - Assert.assertEquals(0, getDeletedBlocksCount(meta)); + Assert.assertEquals(3, meta.getStore().getMetadataTable() + .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); + + // Container contains 3 blocks. So, space used by the container + // should be greater than zero. + Assert.assertTrue(containerSpace > 0); // An interval will delete 1 * 2 blocks deleteAndWait(svc, 1); - Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(2, getDeletedBlocksCount(meta)); - deleteAndWait(svc, 2); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); + // After first interval 2 blocks will be deleted. Hence, current space + // used by the container should be less than the space used by the + // container initially(before running deletion services). + Assert.assertTrue(containerData.get(0).getBytesUsed() < containerSpace); - deleteAndWait(svc, 3); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); + deleteAndWait(svc, 2); + // After deletion of all 3 blocks, space used by the containers + // should be zero. + containerSpace = containerData.get(0).getBytesUsed(); + Assert.assertTrue(containerSpace == 0); // Check finally DB counters. // Not checking bytes used, as handler is a mock call. + Assert.assertEquals(0, meta.getStore().getMetadataTable() + .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); Assert.assertEquals(0, - meta.getStore().getMetadataTable() - .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); - Assert.assertEquals(0, - meta.getStore().getMetadataTable() - .get(OzoneConsts.BLOCK_COUNT).longValue()); + meta.getStore().getMetadataTable().get(OzoneConsts.BLOCK_COUNT) + .longValue()); } svc.shutdown(); @@ -282,19 +318,20 @@ public void testBlockDeletion() throws Exception { @Test @SuppressWarnings("java:S2699") // waitFor => assertion with timeout public void testShutdownService() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeConfiguration datanodeConfiguration = conf.getObject( - DatanodeConfiguration.class); - datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(500)); - conf.setFromObject(datanodeConfiguration); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, + TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); ContainerSet containerSet = new ContainerSet(); // Create 1 container with 100 blocks - createToDeleteBlocks(containerSet, conf, 1, 100, 1); - + createToDeleteBlocks(containerSet, 1, 100, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); @@ -309,15 +346,19 @@ public void testShutdownService() throws Exception { @Test public void testBlockDeletionTimeout() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - + createToDeleteBlocks(containerSet, 1, 3, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); // set timeout value as 1ns to trigger timeout behavior long timeout = 1; - OzoneContainer ozoneContainer = mockDependencies(containerSet); + OzoneContainer ozoneContainer = + mockDependencies(containerSet, keyValueHandler); BlockDeletingService svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, conf); @@ -338,7 +379,7 @@ public void testBlockDeletionTimeout() throws Exception { // test for normal case that doesn't have timeout limitation timeout = 0; - createToDeleteBlocks(containerSet, conf, 1, 3, 1); + createToDeleteBlocks(containerSet, 1, 3, 1); svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS, conf); @@ -369,19 +410,21 @@ public void testBlockDeletionTimeout() throws Exception { } private BlockDeletingServiceTestImpl getBlockDeletingService( - ContainerSet containerSet, ConfigurationSource conf) { - OzoneContainer ozoneContainer = mockDependencies(containerSet); - return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf); + ContainerSet containerSet, ConfigurationSource config, + KeyValueHandler keyValueHandler) { + OzoneContainer ozoneContainer = + mockDependencies(containerSet, keyValueHandler); + return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, config); } - private OzoneContainer mockDependencies(ContainerSet containerSet) { + private OzoneContainer mockDependencies(ContainerSet containerSet, + KeyValueHandler keyValueHandler) { OzoneContainer ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); ContainerDispatcher dispatcher = mock(ContainerDispatcher.class); when(ozoneContainer.getDispatcher()).thenReturn(dispatcher); - handler = mock(KeyValueHandler.class); - when(dispatcher.getHandler(any())).thenReturn(handler); + when(dispatcher.getHandler(any())).thenReturn(keyValueHandler); return ozoneContainer; } @@ -396,7 +439,6 @@ public void testContainerThrottle() throws Exception { // // Each time only 1 container can be processed, so each time // 1 block from 1 container can be deleted. - OzoneConfiguration conf = new OzoneConfiguration(); // Process 1 container per interval conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -404,28 +446,54 @@ public void testContainerThrottle() throws Exception { conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1); ContainerSet containerSet = new ContainerSet(); + int containerCount = 2; int chunksPerBlock = 10; int blocksPerContainer = 1; - createToDeleteBlocks(containerSet, conf, containerCount, blocksPerContainer, + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, chunksPerBlock); - + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); - + List containerData = Lists.newArrayList(); + containerSet.listContainer(0L, containerCount, containerData); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); - for (int i = 1; i <= containerCount; i++) { - deleteAndWait(service, i); - verify(handler, times(i * blocksPerContainer)) - .deleteBlock(any(), any()); - } + + // Deleting one of the two containers and its single block. + // Hence, space used by the container of whose block has been + // deleted should be zero. + deleteAndWait(service, 1); + Assert.assertTrue((containerData.get(0).getBytesUsed() == 0) + || containerData.get(1).getBytesUsed() == 0); + + Assert.assertFalse((containerData.get(0).getBytesUsed() == 0) && ( + containerData.get(1).getBytesUsed() == 0)); + + // Deleting the second container. Hence, space used by both the + // containers should be zero. + deleteAndWait(service, 2); + + Assert.assertTrue((containerData.get(1).getBytesUsed() == 0) && ( + containerData.get(1).getBytesUsed() == 0)); } finally { service.shutdown(); } } + public long currentBlockSpace(List containerData, + int totalContainers) { + long totalSpaceUsed = 0; + for (int i = 0; i < totalContainers; i++) { + totalSpaceUsed += containerData.get(i).getBytesUsed(); + } + return totalSpaceUsed; + } @Test(timeout = 30000) public void testBlockThrottle() throws Exception { @@ -439,92 +507,54 @@ public void testBlockThrottle() throws Exception { // Each time containers can be all scanned, but only 2 blocks // per container can be actually deleted. So it requires 2 waves // to cleanup all blocks. - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - int blockLimitPerTask = 2; + blockLimitPerTask = 2; conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, blockLimitPerTask); ContainerSet containerSet = new ContainerSet(); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); int containerCount = 5; int blocksPerContainer = 3; - createToDeleteBlocks(containerSet, conf, containerCount, + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, 1); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); - + List containerData = Lists.newArrayList(); + containerSet.listContainer(0L, containerCount, containerData); + long blockSpace = containerData.get(0).getBytesUsed() / blocksPerContainer; + long totalContainerSpace = + containerCount * containerData.get(0).getBytesUsed(); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Total blocks = 3 * 5 = 15 // block per task = 2 // number of containers = 5 // each interval will at most runDeletingTasks 5 * 2 = 10 blocks + + // Deleted space of 10 blocks should be equal to (initial total space + // of container - current total space of container). deleteAndWait(service, 1); - verify(handler, times(blockLimitPerTask * containerCount)) - .deleteBlock(any(), any()); + Assert.assertEquals(blockLimitPerTask * containerCount * blockSpace, + (totalContainerSpace - currentBlockSpace(containerData, + containerCount))); // There is only 5 blocks left to runDeletingTasks + + // (Deleted space of previous 10 blocks + these left 5 blocks) should + // be equal to (initial total space of container + // - current total space of container(it will be zero as all blocks + // in all the containers are deleted)). deleteAndWait(service, 2); - verify(handler, times( - blocksPerContainer * containerCount)) - .deleteBlock(any(), any()); + Assert.assertEquals(blocksPerContainer * containerCount * blockSpace, + (totalContainerSpace - currentBlockSpace(containerData, + containerCount))); } finally { service.shutdown(); } } - - @Test - public void testDeletedChunkInfo() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 2, 3); - - List containerData = Lists.newArrayList(); - containerSet.listContainer(0L, 1, containerData); - - try(ReferenceCountedDB meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf)) { - - // Collect all ChunkInfo from blocks marked for deletion. - List> deletingBlocks = - meta.getStore().getBlockDataTable() - .getRangeKVs(null, 100, - MetadataKeyFilters.getDeletingKeyFilter()); - - // Delete all blocks marked for deletion. - BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf); - svc.start(); - GenericTestUtils.waitFor(svc::isStarted, 100, 3000); - deleteAndWait(svc, 1); - svc.shutdown(); - - // Get deleted blocks from their table, and check their ChunkInfo lists - // against those we saved for them before deletion. - List> deletedBlocks = - meta.getStore().getDeletedBlocksTable() - .getRangeKVs(null, 100); - - Assert.assertEquals(deletingBlocks.size(), deletedBlocks.size()); - - Iterator> - deletingBlocksIter = deletingBlocks.iterator(); - Iterator> - deletedBlocksIter = deletedBlocks.iterator(); - - while(deletingBlocksIter.hasNext() && deletedBlocksIter.hasNext()) { - List deletingChunks = - deletingBlocksIter.next().getValue().getChunks(); - List deletedChunks = - deletedBlocksIter.next().getValue().asList(); - - // On each element of each list, this call uses the equals method - // for ChunkInfos generated by protobuf. - // This checks their internal fields for equality. - Assert.assertEquals(deletingChunks, deletedChunks); - } - } - } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 01fa3bf372c..00ebcb01120 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -27,11 +27,14 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -52,11 +55,19 @@ import java.io.File; import java.io.IOException; import java.net.URL; -import java.util.*; +import java.util.List; +import java.util.UUID; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; +import java.util.Arrays; import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -225,8 +236,22 @@ public void testReadWithoutMetadata() throws Exception { @Test public void testDelete() throws Exception { final long numBlocksToDelete = TestDB.NUM_PENDING_DELETION_BLOCKS; + String datanodeUuid = UUID.randomUUID().toString(); + ContainerSet containerSet = makeContainerSet(); + VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); + long initialTotalSpace = newKvData().getBytesUsed(); + long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; + + runBlockDeletingService(keyValueHandler); - runBlockDeletingService(); + long currentTotalSpace = newKvData().getBytesUsed(); + long numberOfBlocksDeleted = + (initialTotalSpace - currentTotalSpace) / blockSpace; // Expected values after blocks with #deleting# prefix in original DB are // deleted. @@ -242,7 +267,7 @@ public void testDelete() throws Exception { assertEquals(expectedDeletingBlocks, countDeletingBlocks(refCountedDB)); assertEquals(expectedDeletedBlocks, - countDeletedBlocks(refCountedDB)); + TestDB.NUM_DELETED_BLOCKS + numberOfBlocksDeleted); assertEquals(expectedRegularBlocks, countUnprefixedBlocks(refCountedDB)); @@ -269,6 +294,14 @@ public void testDelete() throws Exception { */ @Test public void testReadDeletedBlockChunkInfo() throws Exception { + String datanodeUuid = UUID.randomUUID().toString(); + ContainerSet containerSet = makeContainerSet(); + VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = @@ -290,25 +323,22 @@ public void testReadDeletedBlockChunkInfo() throws Exception { Assert.assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size()); - runBlockDeletingService(); + long initialTotalSpace = newKvData().getBytesUsed(); + long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; - // After the block deleting service runs, get the updated list of - // deleted blocks. - deletedBlocks = refCountedDB.getStore() - .getDeletedBlocksTable().getRangeKVs(null, 100); + runBlockDeletingService(keyValueHandler); - int numPostUpgradeDeletesFound = 0; - for(Table.KeyValue chunkListKV: deletedBlocks) { - if (!preUpgradeBlocks.contains(chunkListKV.getKey())) { - numPostUpgradeDeletesFound++; - Assert.assertNotNull(chunkListKV.getValue()); - } - } + long currentTotalSpace = newKvData().getBytesUsed(); + + // After the block deleting service runs, get the number of + // deleted blocks. + long numberOfBlocksDeleted = + (initialTotalSpace - currentTotalSpace) / blockSpace; // The blocks that were originally marked for deletion should now be // deleted. Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, - numPostUpgradeDeletesFound); + numberOfBlocksDeleted); } } @@ -448,21 +478,22 @@ public void testReadDeletedBlocks() throws Exception { } } - private void runBlockDeletingService() throws Exception { + private void runBlockDeletingService(KeyValueHandler keyValueHandler) + throws Exception { conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, - metadataDir.getAbsolutePath()); + metadataDir.getAbsolutePath()); - OzoneContainer container = makeMockOzoneContainer(); + OzoneContainer container = makeMockOzoneContainer(keyValueHandler); BlockDeletingServiceTestImpl service = - new BlockDeletingServiceTestImpl(container, 1000, conf); + new BlockDeletingServiceTestImpl(container, 1000, conf); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); service.runDeletingTasks(); - GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == 1, - 100, 3000); + GenericTestUtils + .waitFor(() -> service.getTimesOfProcessed() == 1, 100, 3000); } private ContainerSet makeContainerSet() throws Exception { @@ -473,7 +504,8 @@ private ContainerSet makeContainerSet() throws Exception { return containerSet; } - private OzoneContainer makeMockOzoneContainer() throws Exception { + private OzoneContainer makeMockOzoneContainer(KeyValueHandler keyValueHandler) + throws Exception { ContainerSet containerSet = makeContainerSet(); OzoneContainer ozoneContainer = mock(OzoneContainer.class); @@ -481,8 +513,7 @@ private OzoneContainer makeMockOzoneContainer() throws Exception { when(ozoneContainer.getWriteChannel()).thenReturn(null); ContainerDispatcher dispatcher = mock(ContainerDispatcher.class); when(ozoneContainer.getDispatcher()).thenReturn(dispatcher); - KeyValueHandler handler = mock(KeyValueHandler.class); - when(dispatcher.getHandler(any())).thenReturn(handler); + when(dispatcher.getHandler(any())).thenReturn(keyValueHandler); return ozoneContainer; }