Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,10 @@ public long putBlock(Container container, BlockData data,
.initBatchOperation()) {
db.getStore().getBlockDataTable().putWithBatch(
batch, Long.toString(data.getLocalID()), data);
db.getStore().getMetadataTable().putWithBatch(
batch, OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, bcsId);
if (bcsId != 0) {
db.getStore().getMetadataTable().putWithBatch(
batch, OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, bcsId);
}

// Set Bytes used, this bytes used will be updated for every write and
// only get committed for every put block. In this way, when datanode
Expand All @@ -151,7 +153,9 @@ public long putBlock(Container container, BlockData data,
db.getStore().getBatchHandler().commitBatchOperation(batch);
}

container.updateBlockCommitSequenceId(bcsId);
if (bcsId != 0) {
container.updateBlockCommitSequenceId(bcsId);
}
// Increment block count finally here for in-memory.
if (incrKeyCount) {
container.getContainerData().incrKeyCount();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,10 @@ public class TestBlockManagerImpl {
private KeyValueContainerData keyValueContainerData;
private KeyValueContainer keyValueContainer;
private BlockData blockData;
private BlockData blockData1;
private BlockManagerImpl blockManager;
private BlockID blockID;
private BlockID blockID1;

private final ChunkLayOutVersion layout;

Expand Down Expand Up @@ -120,11 +122,52 @@ public void setUp() throws Exception {
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);

// Creating BlockData
blockID1 = new BlockID(1L, 2L);
blockData1 = new BlockData(blockID1);
blockData1.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData1.addMetadata(OzoneConsts.OWNER,
OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList1 = new ArrayList<>();
ChunkInfo info1 = new ChunkInfo(String.format("%d.data.%d", blockID1
.getLocalID(), 0), 0, 1024);
chunkList1.add(info1.getProtoBufMessage());
blockData1.setChunks(chunkList1);
blockData1.setBlockCommitSequenceId(1);

// Create KeyValueContainerManager
blockManager = new BlockManagerImpl(config);

}

@Test
public void testPutBlock() throws Exception {
assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
//Put Block with bcsId != 0
blockManager.putBlock(keyValueContainer, blockData1);

BlockData fromGetBlockData;
//Check Container's bcsId
fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData1.getBlockID());
assertEquals(1, keyValueContainer.getContainerData().getKeyCount());
assertEquals(1,
keyValueContainer.getContainerData().getBlockCommitSequenceId());
assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());

//Put Block with bcsId == 0
blockManager.putBlock(keyValueContainer, blockData);

//Check Container's bcsId
fromGetBlockData = blockManager.getBlock(keyValueContainer,
blockData.getBlockID());
assertEquals(2, keyValueContainer.getContainerData().getKeyCount());
assertEquals(0, fromGetBlockData.getBlockCommitSequenceId());
assertEquals(1,
keyValueContainer.getContainerData().getBlockCommitSequenceId());

}

@Test
public void testPutAndGetBlock() throws Exception {
assertEquals(0, keyValueContainer.getContainerData().getKeyCount());
Expand Down