diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index de92d1793fb2..b79ae0d456fb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -332,7 +332,8 @@ private void markBlocksForDeletionSchemaV3( DeletionMarker schemaV3Marker = (table, batch, tid, txn) -> { Table delTxTable = (Table) table; - delTxTable.putWithBatch(batch, containerData.deleteTxnKey(tid), txn); + delTxTable.putWithBatch(batch, containerData.getDeleteTxnKey(tid), + txn); }; markBlocksForDeletionTransaction(containerData, delTX, newDeletionBlocks, @@ -402,10 +403,10 @@ private void markBlocksForDeletionSchemaV1( try (BatchOperation batch = containerDB.getStore().getBatchHandler() .initBatchOperation()) { for (Long blkLong : delTX.getLocalIDList()) { - String blk = containerData.blockKey(blkLong); + String blk = containerData.getBlockKey(blkLong); BlockData blkInfo = blockDataTable.get(blk); if (blkInfo != null) { - String deletingKey = containerData.deletingBlockKey(blkLong); + String deletingKey = containerData.getDeletingBlockKey(blkLong); if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) { if (LOG.isDebugEnabled()) { @@ -457,15 +458,15 @@ private void updateMetaData(KeyValueContainerData containerData, if (delTX.getTxID() > containerData.getDeleteTransactionId()) { // Update in DB pending delete key count and delete transaction ID. metadataTable - .putWithBatch(batchOperation, containerData.latestDeleteTxnKey(), - delTX.getTxID()); + .putWithBatch(batchOperation, + containerData.getLatestDeleteTxnKey(), delTX.getTxID()); } long pendingDeleteBlocks = containerData.getNumPendingDeletionBlocks() + newDeletionBlocks; metadataTable .putWithBatch(batchOperation, - containerData.pendingDeleteBlockCountKey(), + containerData.getPendingDeleteBlockCountKey(), pendingDeleteBlocks); // update pending deletion blocks count and delete transaction ID in diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 115fc7f37313..27b138da95b8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -277,7 +277,7 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) private BlockData getBlockDataFromDB(DBHandle db, BlockData block) throws IOException { String blockKey = - onDiskContainerData.blockKey(block.getBlockID().getLocalID()); + onDiskContainerData.getBlockKey(block.getBlockID().getLocalID()); return db.getStore().getBlockDataTable().get(blockKey); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index 19a13a6db5b8..58862925c587 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -306,11 +306,12 @@ public void updateAndCommitDBCounters(DBHandle db, Table metadataTable = db.getStore().getMetadataTable(); // Set Bytes used and block count key. - metadataTable.putWithBatch(batchOperation, bytesUsedKey(), + metadataTable.putWithBatch(batchOperation, getBytesUsedKey(), getBytesUsed() - releasedBytes); - metadataTable.putWithBatch(batchOperation, blockCountKey(), + metadataTable.putWithBatch(batchOperation, getBlockCountKey(), getBlockCount() - deletedBlockCount); - metadataTable.putWithBatch(batchOperation, pendingDeleteBlockCountKey(), + metadataTable.putWithBatch(batchOperation, + getPendingDeleteBlockCountKey(), getNumPendingDeletionBlocks() - deletedBlockCount); db.getStore().getBatchHandler().commitBatchOperation(batchOperation); @@ -328,39 +329,39 @@ public void setReplicaIndex(int replicaIndex) { // to container schemas, we should use them instead of using // raw const variables defined. - public String blockKey(long localID) { + public String getBlockKey(long localID) { return formatKey(Long.toString(localID)); } - public String deletingBlockKey(long localID) { + public String getDeletingBlockKey(long localID) { return formatKey(DELETING_KEY_PREFIX + localID); } - public String deleteTxnKey(long txnID) { + public String getDeleteTxnKey(long txnID) { return formatKey(Long.toString(txnID)); } - public String latestDeleteTxnKey() { + public String getLatestDeleteTxnKey() { return formatKey(DELETE_TRANSACTION_KEY); } - public String bcsIdKey() { + public String getBcsIdKey() { return formatKey(BLOCK_COMMIT_SEQUENCE_ID); } - public String blockCountKey() { + public String getBlockCountKey() { return formatKey(BLOCK_COUNT); } - public String bytesUsedKey() { + public String getBytesUsedKey() { return formatKey(CONTAINER_BYTES_USED); } - public String pendingDeleteBlockCountKey() { + public String getPendingDeleteBlockCountKey() { return formatKey(PENDING_DELETE_BLOCK_COUNT); } - public String deletingBlockKeyPrefix() { + public String getDeletingBlockKeyPrefix() { return formatKey(DELETING_KEY_PREFIX); } @@ -370,7 +371,7 @@ public KeyPrefixFilter getUnprefixedKeyFilter() { } public KeyPrefixFilter getDeletingBlockKeyFilter() { - return new KeyPrefixFilter().addFilter(deletingBlockKeyPrefix()); + return new KeyPrefixFilter().addFilter(getDeletingBlockKeyPrefix()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java index c6395de27db5..595aa925a4fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerMetadataInspector.java @@ -229,15 +229,15 @@ private JsonObject getDBMetadataJson(Table metadataTable, JsonObject dBMetadata = new JsonObject(); dBMetadata.addProperty(OzoneConsts.BLOCK_COUNT, - metadataTable.get(containerData.blockCountKey())); + metadataTable.get(containerData.getBlockCountKey())); dBMetadata.addProperty(OzoneConsts.CONTAINER_BYTES_USED, - metadataTable.get(containerData.bytesUsedKey())); + metadataTable.get(containerData.getBytesUsedKey())); dBMetadata.addProperty(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, - metadataTable.get(containerData.pendingDeleteBlockCountKey())); + metadataTable.get(containerData.getPendingDeleteBlockCountKey())); dBMetadata.addProperty(OzoneConsts.DELETE_TRANSACTION_KEY, - metadataTable.get(containerData.latestDeleteTxnKey())); + metadataTable.get(containerData.getLatestDeleteTxnKey())); dBMetadata.addProperty(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, - metadataTable.get(containerData.bcsIdKey())); + metadataTable.get(containerData.getBcsIdKey())); return dBMetadata; } @@ -341,7 +341,7 @@ private boolean checkAndRepair(JsonObject parent, BooleanSupplier keyRepairAction = () -> { boolean repaired = false; try { - metadataTable.put(containerData.blockCountKey(), + metadataTable.put(containerData.getBlockCountKey(), blockCountAggregate.getAsLong()); repaired = true; } catch (IOException ex) { @@ -376,7 +376,7 @@ private boolean checkAndRepair(JsonObject parent, BooleanSupplier keyRepairAction = () -> { boolean repaired = false; try { - metadataTable.put(containerData.bytesUsedKey(), + metadataTable.put(containerData.getBytesUsedKey(), usedBytesAggregate.getAsLong()); repaired = true; } catch (IOException ex) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index b23a49556f76..bc3d96d9b211 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -246,7 +246,7 @@ private static void populateContainerMetadata( // Set pending deleted block count. Long pendingDeleteBlockCount = metadataTable.get(kvContainerData - .pendingDeleteBlockCountKey()); + .getPendingDeleteBlockCountKey()); if (pendingDeleteBlockCount != null) { kvContainerData.incrPendingDeletionBlocks( pendingDeleteBlockCount); @@ -263,7 +263,7 @@ private static void populateContainerMetadata( // Set delete transaction id. Long delTxnId = - metadataTable.get(kvContainerData.latestDeleteTxnKey()); + metadataTable.get(kvContainerData.getLatestDeleteTxnKey()); if (delTxnId != null) { kvContainerData .updateDeleteTransactionId(delTxnId); @@ -271,7 +271,7 @@ private static void populateContainerMetadata( // Set BlockCommitSequenceId. Long bcsId = metadataTable.get( - kvContainerData.bcsIdKey()); + kvContainerData.getBcsIdKey()); if (bcsId != null) { kvContainerData .updateBlockCommitSequenceId(bcsId); @@ -280,7 +280,7 @@ private static void populateContainerMetadata( // Set bytes used. // commitSpace for Open Containers relies on usedBytes Long bytesUsed = - metadataTable.get(kvContainerData.bytesUsedKey()); + metadataTable.get(kvContainerData.getBytesUsedKey()); if (bytesUsed != null) { isBlockMetadataSet = true; kvContainerData.setBytesUsed(bytesUsed); @@ -288,7 +288,7 @@ private static void populateContainerMetadata( // Set block count. Long blockCount = metadataTable.get( - kvContainerData.blockCountKey()); + kvContainerData.getBlockCountKey()); if (blockCount != null) { isBlockMetadataSet = true; kvContainerData.setBlockCount(blockCount); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 67b38ff68d2c..d822ce51198d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -158,7 +158,7 @@ public static long persistPutBlock(KeyValueContainer container, // If block exists in cache, blockCount should not be incremented. if (!isBlockInCache) { if (db.getStore().getBlockDataTable().get( - containerData.blockKey(localID)) == null) { + containerData.getBlockKey(localID)) == null) { // Block does not exist in DB => blockCount needs to be // incremented when the block is added into DB. incrBlockCount = true; @@ -166,10 +166,10 @@ public static long persistPutBlock(KeyValueContainer container, } db.getStore().getBlockDataTable().putWithBatch( - batch, containerData.blockKey(localID), data); + batch, containerData.getBlockKey(localID), data); if (bcsId != 0) { db.getStore().getMetadataTable().putWithBatch( - batch, containerData.bcsIdKey(), bcsId); + batch, containerData.getBcsIdKey(), bcsId); } // Set Bytes used, this bytes used will be updated for every write and @@ -179,13 +179,13 @@ public static long persistPutBlock(KeyValueContainer container, // is only used to compute the bytes used. This is done to keep the // current behavior and avoid DB write during write chunk operation. db.getStore().getMetadataTable().putWithBatch( - batch, containerData.bytesUsedKey(), + batch, containerData.getBytesUsedKey(), containerData.getBytesUsed()); // Set Block Count for a container. if (incrBlockCount) { db.getStore().getMetadataTable().putWithBatch( - batch, containerData.blockCountKey(), + batch, containerData.getBlockCountKey(), containerData.getBlockCount() + 1); } @@ -327,7 +327,7 @@ public List listBlock(Container container, long startLocalID, int try (DBHandle db = BlockUtils.getDB(cData, config)) { result = new ArrayList<>(); String startKey = (startLocalID == -1) ? cData.startKeyEmpty() - : cData.blockKey(startLocalID); + : cData.getBlockKey(startLocalID); List> range = db.getStore().getBlockDataTable() .getSequentialRangeKVs(startKey, count, @@ -352,7 +352,7 @@ public void shutdown() { private BlockData getBlockByID(DBHandle db, BlockID blockID, KeyValueContainerData containerData) throws IOException { - String blockKey = containerData.blockKey(blockID.getLocalID()); + String blockKey = containerData.getBlockKey(blockID.getLocalID()); BlockData blockData = db.getStore().getBlockDataTable().get(blockKey); if (blockData == null) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index ee57666a0aaa..6dc0191cad3a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -440,7 +440,8 @@ public ContainerBackgroundTaskResult deleteViaSchema3( Deleter schema3Deleter = (table, batch, tid) -> { Table delTxTable = (Table) table; - delTxTable.deleteWithBatch(batch, containerData.deleteTxnKey(tid)); + delTxTable.deleteWithBatch(batch, + containerData.getDeleteTxnKey(tid)); }; Table deleteTxns = ((DeleteTransactionStore) meta.getStore()) @@ -502,7 +503,7 @@ private ContainerBackgroundTaskResult deleteViaTransactionStore( deleter.apply(deleteTxns, batch, delTx.getTxID()); for (Long blk : delTx.getLocalIDList()) { blockDataTable.deleteWithBatch(batch, - containerData.blockKey(blk)); + containerData.getBlockKey(blk)); } } @@ -550,7 +551,7 @@ private DeleteTransactionStats deleteTransactions( long bytesReleased = 0; for (DeletedBlocksTransaction entry : delBlocks) { for (Long blkLong : entry.getLocalIDList()) { - String blk = containerData.blockKey(blkLong); + String blk = containerData.getBlockKey(blkLong); BlockData blkInfo = blockDataTable.get(blk); LOG.debug("Deleting block {}", blkLong); if (blkInfo == null) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 357cd7c0c9a3..314cbc317125 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -218,7 +218,8 @@ private void createPendingDeleteBlocksSchema1(int numOfBlocksPerContainer, try (DBHandle metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { blockID = ContainerTestHelper.getTestBlockID(containerID); - String deleteStateName = data.deletingBlockKey(blockID.getLocalID()); + String deleteStateName = data.getDeletingBlockKey( + blockID.getLocalID()); BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); putChunksInBlock(numOfChunksPerBlock, j, chunks, buffer, chunkManager, @@ -250,7 +251,7 @@ private void createPendingDeleteBlocksViaTxn(int numOfBlocksPerContainer, container, blockID); kd.setChunks(chunks); try (DBHandle metadata = BlockUtils.getDB(data, conf)) { - String blockKey = data.blockKey(blockID.getLocalID()); + String blockKey = data.getBlockKey(blockID.getLocalID()); metadata.getStore().getBlockDataTable().put(blockKey, kd); } catch (IOException exception) { LOG.info("Exception = " + exception); @@ -285,7 +286,7 @@ private void createTxn(KeyValueContainerData data, List containerBlocks, DatanodeStoreSchemaThreeImpl dnStoreThreeImpl = (DatanodeStoreSchemaThreeImpl) ds; dnStoreThreeImpl.getDeleteTransactionTable() - .putWithBatch(batch, data.deleteTxnKey(txnID), dtx); + .putWithBatch(batch, data.getDeleteTxnKey(txnID), dtx); } else { DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds; @@ -334,12 +335,12 @@ private void updateMetaData(KeyValueContainerData data, container.getContainerData().setBlockCount(numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. metadata.getStore().getMetadataTable() - .put(data.blockCountKey(), (long) numOfBlocksPerContainer); + .put(data.getBlockCountKey(), (long) numOfBlocksPerContainer); metadata.getStore().getMetadataTable() - .put(data.bytesUsedKey(), + .put(data.getBytesUsedKey(), chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer); metadata.getStore().getMetadataTable() - .put(data.pendingDeleteBlockCountKey(), + .put(data.getPendingDeleteBlockCountKey(), (long) numOfBlocksPerContainer); } catch (IOException exception) { LOG.warn("Meta Data update was not successful for container: " @@ -455,7 +456,7 @@ public void testBlockDeletion() throws Exception { // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta, data)); Assert.assertEquals(3, meta.getStore().getMetadataTable() - .get(data.pendingDeleteBlockCountKey()).longValue()); + .get(data.getPendingDeleteBlockCountKey()).longValue()); // Container contains 3 blocks. So, space used by the container // should be greater than zero. @@ -485,9 +486,9 @@ public void testBlockDeletion() throws Exception { // Check finally DB counters. // Not checking bytes used, as handler is a mock call. Assert.assertEquals(0, meta.getStore().getMetadataTable() - .get(data.pendingDeleteBlockCountKey()).longValue()); + .get(data.getPendingDeleteBlockCountKey()).longValue()); Assert.assertEquals(0, - meta.getStore().getMetadataTable().get(data.blockCountKey()) + meta.getStore().getMetadataTable().get(data.getBlockCountKey()) .longValue()); Assert.assertEquals(3, deletingServiceMetrics.getSuccessCount() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 7aab0af64ea8..da9dd88c4176 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -239,14 +239,14 @@ public void testReadWithoutMetadata() throws Exception { try (DBHandle db = BlockUtils.getDB(cData, conf)) { Table metadataTable = db.getStore().getMetadataTable(); - metadataTable.delete(cData.blockCountKey()); - assertNull(metadataTable.get(cData.blockCountKey())); + metadataTable.delete(cData.getBlockCountKey()); + assertNull(metadataTable.get(cData.getBlockCountKey())); - metadataTable.delete(cData.bytesUsedKey()); - assertNull(metadataTable.get(cData.bytesUsedKey())); + metadataTable.delete(cData.getBytesUsedKey()); + assertNull(metadataTable.get(cData.getBytesUsedKey())); - metadataTable.delete(cData.pendingDeleteBlockCountKey()); - assertNull(metadataTable.get(cData.pendingDeleteBlockCountKey())); + metadataTable.delete(cData.getPendingDeleteBlockCountKey()); + assertNull(metadataTable.get(cData.getPendingDeleteBlockCountKey())); } // Create a new container data object, and fill in its metadata by @@ -317,7 +317,7 @@ public void testDelete() throws Exception { Table metadataTable = refCountedDB.getStore().getMetadataTable(); assertEquals(expectedRegularBlocks + expectedDeletingBlocks, - (long)metadataTable.get(cData.blockCountKey())); + (long)metadataTable.get(cData.getBlockCountKey())); } } @@ -401,7 +401,7 @@ public void testReadBlockData() throws Exception { // Test encoding keys and decoding database values. for (String blockID: TestDB.BLOCK_IDS) { - String blockKey = cData.blockKey(Long.parseLong(blockID)); + String blockKey = cData.getBlockKey(Long.parseLong(blockID)); BlockData blockData = blockDataTable.get(blockKey); Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID); } @@ -444,7 +444,8 @@ public void testReadDeletingBlockData() throws Exception { refCountedDB.getStore().getBlockDataTable(); for (String blockID: TestDB.DELETING_BLOCK_IDS) { - String blockKey = cData.deletingBlockKey(Long.parseLong(blockID)); + String blockKey = cData.getDeletingBlockKey( + Long.parseLong(blockID)); BlockData blockData = blockDataTable.get(blockKey); Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID); } @@ -464,7 +465,7 @@ public void testReadDeletingBlockData() throws Exception { // Apply the deleting prefix to the saved block IDs so we can compare // them to the retrieved keys. List expectedKeys = TestDB.DELETING_BLOCK_IDS.stream() - .map(key -> cData.deletingBlockKey(Long.parseLong(key))) + .map(key -> cData.getDeletingBlockKey(Long.parseLong(key))) .collect(Collectors.toList()); Assert.assertEquals(expectedKeys, decodedKeys); @@ -496,11 +497,11 @@ public void testReadMetadata() throws Exception { refCountedDB.getStore().getMetadataTable(); Assert.assertEquals(TestDB.KEY_COUNT, - metadataTable.get(cData.blockCountKey()).longValue()); + metadataTable.get(cData.getBlockCountKey()).longValue()); Assert.assertEquals(TestDB.BYTES_USED, - metadataTable.get(cData.bytesUsedKey()).longValue()); + metadataTable.get(cData.getBytesUsedKey()).longValue()); Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, - metadataTable.get(cData.pendingDeleteBlockCountKey()) + metadataTable.get(cData.getPendingDeleteBlockCountKey()) .longValue()); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index d2741dc9cd02..3624309d9d4d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -317,9 +317,9 @@ private KeyValueContainer createTestContainer() throws IOException { // update delete related metadata db.getStore().getMetadataTable().putWithBatch(batch, - cData.latestDeleteTxnKey(), txn.getTxID()); + cData.getLatestDeleteTxnKey(), txn.getTxID()); db.getStore().getMetadataTable().putWithBatch(batch, - cData.pendingDeleteBlockCountKey(), + cData.getPendingDeleteBlockCountKey(), cData.getNumPendingDeletionBlocks() + BLOCKS_PER_TXN); db.getStore().getBatchHandler().commitBatchOperation(batch); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 366d75af5ba7..67a83a02a645 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -327,14 +327,14 @@ private void populate(long numberOfKeysToWrite) throws IOException { metadataStore.getStore().getBlockDataTable(); for (long i = 0; i < numberOfKeysToWrite; i++) { - blockDataTable.put(cData.blockKey(i), + blockDataTable.put(cData.getBlockKey(i), new BlockData(new BlockID(i, i))); } // As now when we put blocks, we increment block count and update in DB. // As for test, we are doing manually so adding key count to DB. metadataStore.getStore().getMetadataTable() - .put(cData.blockCountKey(), numberOfKeysToWrite); + .put(cData.getBlockCountKey(), numberOfKeysToWrite); } Map metadata = new HashMap<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java index 51e72839fad9..cf18fa8948db 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerIntegrityChecks.java @@ -172,10 +172,10 @@ protected KeyValueContainer createContainerWithBlocks(long containerId, blockData.setChunks(chunkList); // normal key - String key = containerData.blockKey(blockID.getLocalID()); + String key = containerData.getBlockKey(blockID.getLocalID()); if (i >= normalBlocks) { // deleted key - key = containerData.deletingBlockKey(blockID.getLocalID()); + key = containerData.getDeletingBlockKey(blockID.getLocalID()); } metadataStore.getStore().getBlockDataTable().put(key, blockData); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java index 8195e6f4eb1f..aea451bc3a0e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java @@ -293,8 +293,8 @@ public void setDBBlockAndByteCounts(KeyValueContainerData containerData, try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); // Don't care about in memory state. Just change the DB values. - metadataTable.put(containerData.blockCountKey(), blockCount); - metadataTable.put(containerData.bytesUsedKey(), byteCount); + metadataTable.put(containerData.getBlockCountKey(), blockCount); + metadataTable.put(containerData.getBytesUsedKey(), byteCount); } } @@ -303,10 +303,10 @@ public void checkDBBlockAndByteCounts(KeyValueContainerData containerData, try (DBHandle db = BlockUtils.getDB(containerData, getConf())) { Table metadataTable = db.getStore().getMetadataTable(); - long bytesUsed = metadataTable.get(containerData.bytesUsedKey()); + long bytesUsed = metadataTable.get(containerData.getBytesUsedKey()); Assert.assertEquals(expectedBytesUsed, bytesUsed); - long blockCount = metadataTable.get(containerData.blockCountKey()); + long blockCount = metadataTable.get(containerData.getBlockCountKey()); Assert.assertEquals(expectedBlockCount, blockCount); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index a17a5f7f8cb8..229e883ad842 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -161,11 +161,11 @@ private void markBlocksForDelete(KeyValueContainer keyValueContainer, metadataStore.getStore().getBlockDataTable(); Long localID = blockNames.get(i); - String blk = cData.blockKey(localID); + String blk = cData.getBlockKey(localID); BlockData blkInfo = blockDataTable.get(blk); blockDataTable.delete(blk); - blockDataTable.put(cData.deletingBlockKey(localID), blkInfo); + blockDataTable.put(cData.getDeletingBlockKey(localID), blkInfo); } if (setMetaData) { @@ -173,7 +173,8 @@ private void markBlocksForDelete(KeyValueContainer keyValueContainer, // and bytes used metadata values, so those do not change. Table metadataTable = metadataStore.getStore().getMetadataTable(); - metadataTable.put(cData.pendingDeleteBlockCountKey(), (long)count); + metadataTable.put(cData.getPendingDeleteBlockCountKey(), + (long)count); } } @@ -201,14 +202,14 @@ private List addBlocks(KeyValueContainer keyValueContainer, blockData.setChunks(chunkList); blkNames.add(localBlockID); metadataStore.getStore().getBlockDataTable() - .put(cData.blockKey(localBlockID), blockData); + .put(cData.getBlockKey(localBlockID), blockData); } if (setMetaData) { metadataStore.getStore().getMetadataTable() - .put(cData.blockCountKey(), (long)blockCount); + .put(cData.getBlockCountKey(), (long)blockCount); metadataStore.getStore().getMetadataTable() - .put(cData.bytesUsedKey(), blockCount * blockLen); + .put(cData.getBytesUsedKey(), blockCount * blockLen); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 5ae3bad21f1e..cceed793902f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -320,12 +320,13 @@ private long addBlocks(KeyValueContainer container, chunkList.add(info.getProtoBufMessage()); } blockData.setChunks(chunkList); - blockDataTable.put(cData.blockKey(blockID.getLocalID()), blockData); + blockDataTable.put(cData.getBlockKey(blockID.getLocalID()), + blockData); } // Set Block count and used bytes. - metadataTable.put(cData.blockCountKey(), (long) blocks); - metadataTable.put(cData.bytesUsedKey(), usedBytes); + metadataTable.put(cData.getBlockCountKey(), (long) blocks); + metadataTable.put(cData.getBytesUsedKey(), usedBytes); } // remaining available capacity of the container return (freeBytes - usedBytes); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 48d152e8da1c..6841fb0258f0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -111,7 +111,7 @@ public List getPendingDeletionBlocks(Long containerID) for (Table.KeyValue entry : kvs) { pendingDeletionBlocks - .add(entry.getKey().replace(cData.deletingBlockKeyPrefix(), "")); + .add(entry.getKey().replace(cData.getDeletingBlockKeyPrefix(), "")); } } return pendingDeletionBlocks; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 6fa5324af606..7ddb6e13d3b6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -268,7 +268,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { .getContainer(containerId1)).getContainerData(); try (DBHandle containerDb1 = BlockUtils.getDB(containerData1, conf)) { BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get( - containerData1.blockKey(locationList.get(0).getBlockID() + containerData1.getBlockKey(locationList.get(0).getBlockID() .getLocalID())); // The first Block could have 1 or 2 chunkSize of data int block1NumChunks = blockData1.getChunks().size(); @@ -287,7 +287,7 @@ private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception { .getContainer(containerId2)).getContainerData(); try (DBHandle containerDb2 = BlockUtils.getDB(containerData2, conf)) { BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get( - containerData2.blockKey(locationList.get(1).getBlockID() + containerData2.getBlockKey(locationList.get(1).getBlockID() .getLocalID())); // The second Block should have 0.5 chunkSize of data Assert.assertEquals(block2ExpectedChunkCount, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index a6566141d797..69be5c1bfee9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -1790,7 +1790,8 @@ public void testZReadKeyWithUnhealthyContainerReplica() throws Exception { KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData(); try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { - db.getStore().getMetadataTable().put(cData.bcsIdKey(), newBCSID); + db.getStore().getMetadataTable().put(cData.getBcsIdKey(), + newBCSID); } container.updateBlockCommitSequenceId(newBCSID); index++; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 7f00825e34e6..ac964834fc13 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -227,7 +227,7 @@ public void testValidateBCSIDOnDnRestart() throws Exception { // modify the bcsid for the container in the ROCKS DB thereby inducing // corruption db.getStore().getMetadataTable() - .put(keyValueContainerData.bcsIdKey(), 0L); + .put(keyValueContainerData.getBcsIdKey(), 0L); } // after the restart, there will be a mismatch in BCSID of what is recorded // in the and what is there in RockSDB and hence the container would be diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index f7dee1563706..c2c9e23f0516 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -443,7 +443,7 @@ private void verifyBlocksCreated( .getContainer(blockID.getContainerID()).getContainerData(); try (DBHandle db = BlockUtils.getDB(cData, conf)) { Assertions.assertNotNull(db.getStore().getBlockDataTable() - .get(cData.blockKey(blockID.getLocalID()))); + .get(cData.getBlockKey(blockID.getLocalID()))); } }, omKeyLocationInfoGroups); } @@ -461,12 +461,13 @@ private void verifyBlocksDeleted( Table blockDataTable = db.getStore().getBlockDataTable(); - String blockKey = cData.blockKey(blockID.getLocalID()); + String blockKey = cData.getBlockKey(blockID.getLocalID()); BlockData blockData = blockDataTable.get(blockKey); Assertions.assertNull(blockData); - String deletingKey = cData.deletingBlockKey(blockID.getLocalID()); + String deletingKey = cData.getDeletingBlockKey( + blockID.getLocalID()); Assertions.assertNull(blockDataTable.get(deletingKey)); } containerIdsWithDeletedBlocks.add(blockID.getContainerID());