diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index b130f48776c1..e04956c8f638 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -248,21 +248,21 @@ public enum ChecksumCombineMode { private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED"; @Config(key = "incremental.chunk.list", - defaultValue = "false", + defaultValue = "true", type = ConfigType.BOOLEAN, description = "Client PutBlock request can choose incremental chunk " + "list rather than full chunk list to optimize performance. " + - "Critical to HBase.", + "Critical to HBase. EC does not support this feature.", tags = ConfigTag.CLIENT) private boolean incrementalChunkList = true; @Config(key = "stream.putblock.piggybacking", - defaultValue = "false", + defaultValue = "true", type = ConfigType.BOOLEAN, description = "Allow PutBlock to be piggybacked in WriteChunk " + "requests if the chunk is small.", tags = ConfigTag.CLIENT) - private boolean enablePutblockPiggybacking = false; + private boolean enablePutblockPiggybacking = true; @PostConstruct public void validate() { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index aab70a692e30..0c0a4ffe1510 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -149,6 +149,7 @@ public class BlockOutputStream extends OutputStream { private Pipeline pipeline; private final ContainerClientMetrics clientMetrics; private boolean allowPutBlockPiggybacking; + private boolean supportIncrementalChunkList; private CompletableFuture lastFlushFuture; private CompletableFuture allPendingFlushFutures = CompletableFuture.completedFuture(null); @@ -189,8 +190,13 @@ public BlockOutputStream( } this.containerBlockData = BlockData.newBuilder().setBlockID( blkIDBuilder.build()).addMetadata(keyValue); + this.pipeline = pipeline; // tell DataNode I will send incremental chunk list - if (config.getIncrementalChunkList()) { + // EC does not support incremental chunk list. + this.supportIncrementalChunkList = config.getIncrementalChunkList() && + this instanceof RatisBlockOutputStream && allDataNodesSupportPiggybacking(); + LOG.debug("incrementalChunkList is {}", supportIncrementalChunkList); + if (supportIncrementalChunkList) { this.containerBlockData.addMetadata(INCREMENTAL_CHUNK_LIST_KV); this.lastChunkBuffer = DIRECT_BUFFER_POOL.getBuffer(config.getStreamBufferSize()); this.lastChunkOffset = 0; @@ -223,16 +229,17 @@ public BlockOutputStream( checksum = new Checksum(config.getChecksumType(), config.getBytesPerChecksum()); this.clientMetrics = clientMetrics; - this.pipeline = pipeline; this.streamBufferArgs = streamBufferArgs; this.allowPutBlockPiggybacking = config.getEnablePutblockPiggybacking() && allDataNodesSupportPiggybacking(); + LOG.debug("PutBlock piggybacking is {}", allowPutBlockPiggybacking); } private boolean allDataNodesSupportPiggybacking() { // return true only if all DataNodes in the pipeline are on a version // that supports PutBlock piggybacking. for (DatanodeDetails dn : pipeline.getNodes()) { + LOG.debug("dn = {}, version = {}", dn, dn.getCurrentVersion()); if (dn.getCurrentVersion() < COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue()) { return false; @@ -532,7 +539,7 @@ CompletableFuture executePutBlock(boolean close, BlockData blockData = containerBlockData.build(); LOG.debug("sending PutBlock {}", blockData); - if (config.getIncrementalChunkList()) { + if (supportIncrementalChunkList) { // remove any chunks in the containerBlockData list. // since they are sent. containerBlockData.clearChunks(); @@ -866,7 +873,7 @@ private CompletableFuture writeChunkToContainer( try { BlockData blockData = null; - if (config.getIncrementalChunkList()) { + if (supportIncrementalChunkList) { updateBlockDataForWriteChunk(chunk); } else { containerBlockData.addChunks(chunkInfo); @@ -880,7 +887,7 @@ private CompletableFuture writeChunkToContainer( blockData = containerBlockData.build(); LOG.debug("piggyback chunk list {}", blockData); - if (config.getIncrementalChunkList()) { + if (supportIncrementalChunkList) { // remove any chunks in the containerBlockData list. // since they are sent. containerBlockData.clearChunks(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index e5ffbd024b1f..01f508d257c1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -391,6 +391,9 @@ public static DatanodeDetails.Builder newBuilder( } if (datanodeDetailsProto.hasCurrentVersion()) { builder.setCurrentVersion(datanodeDetailsProto.getCurrentVersion()); + } else { + // fallback to version 1 if not present + builder.setCurrentVersion(DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue()); } return builder; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java index b05deaa0d668..aeb1e207e704 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/protocol/TestDatanodeDetails.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.protocol; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.junit.jupiter.api.Test; @@ -48,6 +49,24 @@ void protoIncludesNewPortsOnlyForV1() { assertPorts(protoV1, ALL_PORTS); } + @Test + public void testNewBuilderCurrentVersion() { + // test that if the current version is not set (Ozone 1.4.0 and earlier), + // it falls back to SEPARATE_RATIS_PORTS_AVAILABLE + DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); + HddsProtos.DatanodeDetailsProto.Builder protoBuilder = + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + protoBuilder.clearCurrentVersion(); + DatanodeDetails dn2 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); + assertEquals(DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(), dn2.getCurrentVersion()); + + // test that if the current version is set, it is used + protoBuilder = + dn.toProtoBuilder(DEFAULT_VERSION.toProtoValue()); + DatanodeDetails dn3 = DatanodeDetails.newBuilder(protoBuilder.build()).build(); + assertEquals(DatanodeVersion.CURRENT.toProtoValue(), dn3.getCurrentVersion()); + } + public static void assertPorts(HddsProtos.DatanodeDetailsProto dn, Set expectedPorts) throws IllegalArgumentException { assertEquals(expectedPorts.size(), dn.getPortsCount()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index 03050308a946..5118f4ba5650 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.ozone.container.metadata.DatanodeStoreWithIncrementalChunkList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -501,8 +502,6 @@ static PendingDelete countPendingDeletesSchemaV2( Table delTxTable = schemaTwoStore.getDeleteTransactionTable(); - final Table blockDataTable - = schemaTwoStore.getBlockDataTable(); try (TableIterator> iterator = delTxTable.iterator()) { @@ -515,7 +514,7 @@ static PendingDelete countPendingDeletesSchemaV2( // counted towards bytes used and total block count above. pendingDeleteBlockCountTotal += localIDs.size(); pendingDeleteBytes += computePendingDeleteBytes( - localIDs, containerData, blockDataTable); + localIDs, containerData, schemaTwoStore); } } @@ -525,12 +524,12 @@ static PendingDelete countPendingDeletesSchemaV2( static long computePendingDeleteBytes(List localIDs, KeyValueContainerData containerData, - Table blockDataTable) { + DatanodeStoreWithIncrementalChunkList store) { long pendingDeleteBytes = 0; for (long id : localIDs) { try { final String blockKey = containerData.getBlockKey(id); - final BlockData blockData = blockDataTable.get(blockKey); + final BlockData blockData = store.getBlockByID(null, blockKey); if (blockData != null) { pendingDeleteBytes += blockData.getSize(); } @@ -544,23 +543,21 @@ static long computePendingDeleteBytes(List localIDs, } static PendingDelete countPendingDeletesSchemaV3( - DatanodeStoreSchemaThreeImpl schemaThreeStore, + DatanodeStoreSchemaThreeImpl store, KeyValueContainerData containerData) throws IOException { long pendingDeleteBlockCountTotal = 0; long pendingDeleteBytes = 0; - final Table blockDataTable - = schemaThreeStore.getBlockDataTable(); try ( TableIterator> - iter = schemaThreeStore.getDeleteTransactionTable() + iter = store.getDeleteTransactionTable() .iterator(containerData.containerPrefix())) { while (iter.hasNext()) { DeletedBlocksTransaction delTx = iter.next().getValue(); final List localIDs = delTx.getLocalIDList(); pendingDeleteBlockCountTotal += localIDs.size(); pendingDeleteBytes += computePendingDeleteBytes( - localIDs, containerData, blockDataTable); + localIDs, containerData, store); } } return new PendingDelete(pendingDeleteBlockCountTotal, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index dd18636ec00e..7b3852011d3d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -343,7 +343,7 @@ public List listBlock(Container container, long startLocalID, int .getSequentialRangeKVs(startKey, count, cData.containerPrefix(), cData.getUnprefixedKeyFilter()); for (Table.KeyValue entry : range) { - result.add(entry.getValue()); + result.add(db.getStore().getCompleteBlockData(entry.getValue(), null, entry.getKey())); } return result; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java index af500b2b6b8d..eb7b6e7378a6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingTask.java @@ -327,6 +327,8 @@ private ContainerBackgroundTaskResult deleteViaTransactionStore( try { Table blockDataTable = meta.getStore().getBlockDataTable(); + Table lastChunkInfoTable = + meta.getStore().getLastChunkInfoTable(); DeleteTransactionStore txnStore = (DeleteTransactionStore) meta.getStore(); Table deleteTxns = @@ -376,8 +378,11 @@ private ContainerBackgroundTaskResult deleteViaTransactionStore( for (DeletedBlocksTransaction delTx : deletedBlocksTxs) { deleter.apply(deleteTxns, batch, delTx.getTxID()); for (Long blk : delTx.getLocalIDList()) { + // delete from both blockDataTable and lastChunkInfoTable. blockDataTable.deleteWithBatch(batch, containerData.getBlockKey(blk)); + lastChunkInfoTable.deleteWithBatch(batch, + containerData.getBlockKey(blk)); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java index 35f8bf832227..d791d9bbeab3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java @@ -135,6 +135,11 @@ default BlockData getBlockByID(BlockID blockID, // check block data table BlockData blockData = getBlockDataTable().get(blockKey); + return getCompleteBlockData(blockData, blockID, blockKey); + } + + default BlockData getCompleteBlockData(BlockData blockData, + BlockID blockID, String blockKey) throws IOException { if (blockData == null) { throw new StorageContainerException( NO_SUCH_BLOCK_ERR_MSG + " BlockID : " + blockID, NO_SUCH_BLOCK); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java index 1f5c442601b8..2b34fae73989 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.FixedLengthStringCodec; @@ -31,6 +32,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.rocksdb.LiveFileMetaData; import java.io.File; @@ -106,6 +108,9 @@ public void removeKVContainerData(long containerID) throws IOException { try (BatchOperation batch = getBatchHandler().initBatchOperation()) { getMetadataTable().deleteBatchWithPrefix(batch, prefix); getBlockDataTable().deleteBatchWithPrefix(batch, prefix); + if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { + getLastChunkInfoTable().deleteBatchWithPrefix(batch, prefix); + } getDeleteTransactionTable().deleteBatchWithPrefix(batch, prefix); getBatchHandler().commitBatchOperation(batch); } @@ -118,6 +123,10 @@ public void dumpKVContainerData(long containerID, File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir), prefix); getBlockDataTable().dumpToFileWithPrefix( getTableDumpFile(getBlockDataTable(), dumpDir), prefix); + if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { + getLastChunkInfoTable().dumpToFileWithPrefix( + getTableDumpFile(getLastChunkInfoTable(), dumpDir), prefix); + } getDeleteTransactionTable().dumpToFileWithPrefix( getTableDumpFile(getDeleteTransactionTable(), dumpDir), prefix); @@ -129,6 +138,10 @@ public void loadKVContainerData(File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir)); getBlockDataTable().loadFromFile( getTableDumpFile(getBlockDataTable(), dumpDir)); + if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.HBASE_SUPPORT)) { + getLastChunkInfoTable().loadFromFile( + getTableDumpFile(getLastChunkInfoTable(), dumpDir)); + } getDeleteTransactionTable().loadFromFile( getTableDumpFile(getDeleteTransactionTable(), dumpDir)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java index d3fa7584f360..a71bb93963a5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreWithIncrementalChunkList.java @@ -56,11 +56,9 @@ public DatanodeStoreWithIncrementalChunkList(ConfigurationSource config, @Override - public BlockData getBlockByID(BlockID blockID, - String blockKey) throws IOException { + public BlockData getCompleteBlockData(BlockData blockData, + BlockID blockID, String blockKey) throws IOException { BlockData lastChunk = null; - // check block data table - BlockData blockData = getBlockDataTable().get(blockKey); if (blockData == null || isPartialChunkList(blockData)) { // check last chunk table lastChunk = getLastChunkInfoTable().get(blockKey); @@ -190,18 +188,29 @@ private void moveLastChunkToBlockData(BatchOperation batch, long localID, private void putBlockWithPartialChunks(BatchOperation batch, long localID, BlockData data, KeyValueContainerData containerData) throws IOException { + String blockKey = containerData.getBlockKey(localID); + BlockData blockData = getBlockDataTable().get(blockKey); if (data.getChunks().size() == 1) { // Case (3.1) replace/update the last chunk info table getLastChunkInfoTable().putWithBatch( - batch, containerData.getBlockKey(localID), data); + batch, blockKey, data); + // If the block does not exist in the block data table because it is the first chunk + // and the chunk is not full, then add an empty block data to populate the block table. + // This is required because some of the test code and utilities expect the block to be + // present in the block data table, they don't check the last chunk info table. + if (blockData == null) { + // populate blockDataTable with empty chunk list + blockData = new BlockData(data.getBlockID()); + blockData.addMetadata(INCREMENTAL_CHUNK_LIST, ""); + blockData.setBlockCommitSequenceId(data.getBlockCommitSequenceId()); + getBlockDataTable().putWithBatch(batch, blockKey, blockData); + } } else { int lastChunkIndex = data.getChunks().size() - 1; // received more than one chunk this time List lastChunkInfo = Collections.singletonList( data.getChunks().get(lastChunkIndex)); - BlockData blockData = getBlockDataTable().get( - containerData.getBlockKey(localID)); if (blockData == null) { // Case 3.2: if the block does not exist in the block data table List chunkInfos = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 98d7388310b3..addf3396eff5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -32,6 +32,7 @@ import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Stream; @@ -288,19 +289,49 @@ public void testKeyMetadata() throws Exception { // Clean up assertTrue(fs.delete(file, false)); - // Wait for KeyDeletingService to finish to avoid interfering other tests - Table deletedTable = omMetadataManager.getDeletedTable(); - GenericTestUtils.waitFor( - () -> { - try { - return deletedTable.isEmpty(); - } catch (IOException e) { - return false; - } - }, 250, 10000); + waitForEmptyDeletedTable(); } } + private void waitForEmptyDeletedTable() + throws TimeoutException, InterruptedException { + // Wait for KeyDeletingService to finish to avoid interfering other tests + OMMetadataManager omMetadataManager = + cluster.getOzoneManager().getMetadataManager(); + Table deletedTable = omMetadataManager.getDeletedTable(); + GenericTestUtils.waitFor( + () -> { + try { + return deletedTable.isEmpty(); + } catch (IOException e) { + return false; + } + }, 250, 10000); + } + + @Test + public void testEmptyHsync() throws Exception { + // Check that deletedTable should not have keys with the same block as in + // keyTable's when a key is hsync()'ed then close()'d. + + // Set the fs.defaultFS + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + + final Path file = new Path(dir, "file-hsync-empty"); + try (FileSystem fs = FileSystem.get(CONF)) { + try (FSDataOutputStream outputStream = fs.create(file, true)) { + outputStream.write(new byte[0], 0, 0); + outputStream.hsync(); + } + } + waitForEmptyDeletedTable(); + } + @Test public void testKeyHSyncThenClose() throws Exception { // Check that deletedTable should not have keys with the same block as in diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java index e7e0337b5f9f..bf20b4ecc0b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -19,12 +19,12 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -65,18 +65,10 @@ public static File getChunksLocationPath(MiniOzoneCluster cluster, Container con // the container. KeyValueContainerData containerData = (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); - BlockIterator keyValueBlockIterator = - db.getStore().getBlockIterator(containerID)) { - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; - } - } + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf())) { + BlockID blockID = new BlockID(containerID, localID); + String blockKey = containerData.getBlockKey(localID); + BlockData blockData = db.getStore().getBlockByID(blockID, blockKey); assertNotNull(blockData, "Block not found"); // Get the location of the chunk file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 00654d943f78..192c933f53c6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -557,23 +557,32 @@ private void clearBlocksTable(Container container) throws IOException { = BlockUtils.getDB( (KeyValueContainerData) container.getContainerData(), conf)) { - List> - blocks = dbHandle.getStore().getBlockDataTable().getRangeKVs( - ((KeyValueContainerData) container.getContainerData()). - startKeyEmpty(), - Integer.MAX_VALUE, - ((KeyValueContainerData) container.getContainerData()). - containerPrefix(), - ((KeyValueContainerData) container.getContainerData()). - getUnprefixedKeyFilter()); - try (BatchOperation batch = dbHandle.getStore().getBatchHandler() - .initBatchOperation()) { - for (Table.KeyValue kv : blocks) { - String blk = kv.getKey(); - dbHandle.getStore().getBlockDataTable().deleteWithBatch(batch, blk); - } - dbHandle.getStore().getBatchHandler().commitBatchOperation(batch); + Table table = dbHandle.getStore().getBlockDataTable(); + clearTable(dbHandle, table, container); + + table = dbHandle.getStore().getLastChunkInfoTable(); + clearTable(dbHandle, table, container); + } + } + + private void clearTable(DBHandle dbHandle, Table table, Container container) + throws IOException { + List> + blocks = table.getRangeKVs( + ((KeyValueContainerData) container.getContainerData()). + startKeyEmpty(), + Integer.MAX_VALUE, + ((KeyValueContainerData) container.getContainerData()). + containerPrefix(), + ((KeyValueContainerData) container.getContainerData()). + getUnprefixedKeyFilter()); + try (BatchOperation batch = dbHandle.getStore().getBatchHandler() + .initBatchOperation()) { + for (Table.KeyValue kv : blocks) { + String blk = kv.getKey(); + table.deleteWithBatch(batch, blk); } + dbHandle.getStore().getBatchHandler().commitBatchOperation(batch); } }