Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
Expand All @@ -64,6 +65,7 @@
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.DeleteTransactionStore;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus;
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
Expand Down Expand Up @@ -643,14 +645,19 @@ private void updateMetaData(KeyValueContainerData containerData,
containerData.getPendingDeleteBlockCountKey(),
pendingDeleteBlocks);

// update pending deletion blocks count and delete transaction ID in
// in-memory container status
long pendingBytes = containerData.getBlockPendingDeletionBytes() + delTX.getTotalBlockSize();
metadataTable
.putWithBatch(batchOperation,
containerData.getPendingDeleteBlockBytesKey(),
pendingBytes);
containerData.incrPendingDeletionBlocks(newDeletionBlocks, delTX.getTotalBlockSize());
// Update pending deletion blocks count, blocks bytes and delete transaction ID in in-memory container status.
// Persist pending bytes only if the feature is finalized.
if (VersionedDatanodeFeatures.isFinalized(
HDDSLayoutFeature.STORAGE_SPACE_DISTRIBUTION) && delTX.hasTotalBlockSize()) {
long pendingBytes = containerData.getBlockPendingDeletionBytes();
pendingBytes += delTX.getTotalBlockSize();
metadataTable
.putWithBatch(batchOperation,
containerData.getPendingDeleteBlockBytesKey(),
pendingBytes);
}
containerData.incrPendingDeletionBlocks(newDeletionBlocks,
delTX.hasTotalBlockSize() ? delTX.getTotalBlockSize() : 0);
containerData.updateDeleteTransactionId(delTX.getTxID());
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,13 +51,15 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
import org.apache.hadoop.ozone.container.common.interfaces.DBHandle;
import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.yaml.snakeyaml.nodes.Tag;

/**
Expand Down Expand Up @@ -385,8 +387,10 @@ public void updateAndCommitDBCounters(DBHandle db,
metadataTable.putWithBatch(batchOperation, getBlockCountKey(), b.getCount() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(),
b.getPendingDeletion() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(),
b.getPendingDeletionBytes() - releasedBytes);
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.STORAGE_SPACE_DISTRIBUTION)) {
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockBytesKey(),
b.getPendingDeletionBytes() - releasedBytes);
}

db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
}
Expand All @@ -397,7 +401,9 @@ public void resetPendingDeleteBlockCount(DBHandle db) throws IOException {
// Reset the metadata on disk.
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
metadataTable.put(getPendingDeleteBlockCountKey(), 0L);
metadataTable.put(getPendingDeleteBlockBytesKey(), 0L);
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.STORAGE_SPACE_DISTRIBUTION)) {
metadataTable.put(getPendingDeleteBlockBytesKey(), 0L);
}
}

// NOTE: Below are some helper functions to format keys according
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.OzoneConsts;
Expand All @@ -45,6 +46,7 @@
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreWithIncrementalChunkList;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -239,8 +241,10 @@ static ObjectNode getDBMetadataJson(Table<String, Long> metadataTable,
metadataTable.get(containerData.getBytesUsedKey()));
dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT,
metadataTable.get(containerData.getPendingDeleteBlockCountKey()));
dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
metadataTable.get(containerData.getPendingDeleteBlockBytesKey()));
if (metadataTable.get(containerData.getPendingDeleteBlockBytesKey()) != null) {
dBMetadata.put(OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
metadataTable.get(containerData.getPendingDeleteBlockBytesKey()));
}
dBMetadata.put(OzoneConsts.DELETE_TRANSACTION_KEY,
metadataTable.get(containerData.getLatestDeleteTxnKey()));
dBMetadata.put(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID,
Expand Down Expand Up @@ -434,28 +438,30 @@ private boolean checkAndRepair(ObjectNode parent,
errors.add(deleteCountError);
}

// check and repair if db delete bytes mismatches delete transaction
JsonNode pendingDeletionBlockSize = dBMetadata.path(
OzoneConsts.PENDING_DELETE_BLOCK_BYTES);
final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize);
final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES);
final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate);
if (dbDeleteBytes != deleteTransactionBytes) {
passed = false;
final BooleanSupplier deleteBytesRepairAction = () -> {
final String key = containerData.getPendingDeleteBlockBytesKey();
try {
metadataTable.put(key, deleteTransactionBytes);
} catch (IOException ex) {
LOG.error("Failed to reset {} for container {}.",
key, containerData.getContainerID(), ex);
}
return false;
};
final ObjectNode deleteBytesError = buildErrorAndRepair(
"dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction);
errors.add(deleteBytesError);
if (VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.STORAGE_SPACE_DISTRIBUTION)) {
// check and repair if db delete bytes mismatches delete transaction
JsonNode pendingDeletionBlockSize = dBMetadata.path(
OzoneConsts.PENDING_DELETE_BLOCK_BYTES);
final long dbDeleteBytes = jsonToLong(pendingDeletionBlockSize);
final JsonNode pendingDeleteBytesAggregate = aggregates.path(PendingDelete.BYTES);
final long deleteTransactionBytes = jsonToLong(pendingDeleteBytesAggregate);
if (dbDeleteBytes != deleteTransactionBytes) {
passed = false;
final BooleanSupplier deleteBytesRepairAction = () -> {
final String key = containerData.getPendingDeleteBlockBytesKey();
try {
metadataTable.put(key, deleteTransactionBytes);
} catch (IOException ex) {
LOG.error("Failed to reset {} for container {}.",
key, containerData.getContainerID(), ex);
}
return false;
};
final ObjectNode deleteBytesError = buildErrorAndRepair(
"dBMetadata." + OzoneConsts.PENDING_DELETE_BLOCK_BYTES,
pendingDeleteBytesAggregate, pendingDeletionBlockSize, deleteBytesRepairAction);
errors.add(deleteBytesError);
}
}

// check and repair chunks dir.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public class PendingDelete {
private final long count;
private final long bytes;

PendingDelete(long count, long bytes) {
public PendingDelete(long count, long bytes) {
this.count = count;
this.bytes = bytes;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerChecksumInfo;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
Expand All @@ -47,6 +48,7 @@
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

Expand Down Expand Up @@ -318,69 +320,24 @@ private static void populateContainerMetadata(
throws IOException {
Table<String, Long> metadataTable = store.getMetadataTable();

// Set pending deleted block count.
final long blockPendingDeletion;
long blockPendingDeletionBytes = 0L;
Long pendingDeletionBlockBytes = metadataTable.get(kvContainerData
.getPendingDeleteBlockBytesKey());
Long pendingDeleteBlockCount =
metadataTable.get(kvContainerData
.getPendingDeleteBlockCountKey());
if (pendingDeleteBlockCount != null) {
blockPendingDeletion = pendingDeleteBlockCount;
if (pendingDeletionBlockBytes != null) {
blockPendingDeletionBytes = pendingDeletionBlockBytes;
} else {
LOG.warn("Missing pendingDeleteBlocksize from {}: recalculate them from delete txn tables",
metadataTable.getName());
PendingDelete pendingDeletions = getAggregatePendingDelete(
store, kvContainerData, kvContainerData.getSchemaVersion());
blockPendingDeletionBytes = pendingDeletions.getBytes();
}
} else {
LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables",
metadataTable.getName());
PendingDelete pendingDeletions = getAggregatePendingDelete(
store, kvContainerData, kvContainerData.getSchemaVersion());
blockPendingDeletion = pendingDeletions.getCount();
blockPendingDeletionBytes = pendingDeletions.getBytes();
}
// Set delete transaction id.
Long delTxnId =
metadataTable.get(kvContainerData.getLatestDeleteTxnKey());
// Set pending deleted block count and bytes
PendingDelete pendingDeletions = populatePendingDeletionMetadata(kvContainerData, metadataTable, store);

// Set delete transaction id
Long delTxnId = metadataTable.get(kvContainerData.getLatestDeleteTxnKey());
if (delTxnId != null) {
kvContainerData
.updateDeleteTransactionId(delTxnId);
kvContainerData.updateDeleteTransactionId(delTxnId);
}

// Set BlockCommitSequenceId.
Long bcsId = metadataTable.get(
kvContainerData.getBcsIdKey());
// Set BlockCommitSequenceId
Long bcsId = metadataTable.get(kvContainerData.getBcsIdKey());
if (bcsId != null) {
kvContainerData
.updateBlockCommitSequenceId(bcsId);
}

// Set bytes used.
// commitSpace for Open Containers relies on usedBytes
final long blockBytes;
final long blockCount;
final Long metadataTableBytesUsed = metadataTable.get(kvContainerData.getBytesUsedKey());
// Set block count.
final Long metadataTableBlockCount = metadataTable.get(kvContainerData.getBlockCountKey());
if (metadataTableBytesUsed != null && metadataTableBlockCount != null) {
blockBytes = metadataTableBytesUsed;
blockCount = metadataTableBlockCount;
} else {
LOG.warn("Missing bytesUsed={} or blockCount={} from {}: recalculate them from block table",
metadataTableBytesUsed, metadataTableBlockCount, metadataTable.getName());
final ContainerData.BlockByteAndCounts b = getUsedBytesAndBlockCount(store, kvContainerData);
blockBytes = b.getBytes();
blockCount = b.getCount();
kvContainerData.updateBlockCommitSequenceId(bcsId);
}

kvContainerData.getStatistics().updateBlocks(blockBytes, blockCount);
kvContainerData.getStatistics().setBlockPendingDeletion(blockPendingDeletion, blockPendingDeletionBytes);
// Set block statistics
populateBlockStatistics(kvContainerData, metadataTable, store);
kvContainerData.getStatistics().setBlockPendingDeletion(pendingDeletions.getCount(), pendingDeletions.getBytes());

// If the container is missing a chunks directory, possibly due to the
// bug fixed by HDDS-6235, create it here.
Expand All @@ -404,6 +361,78 @@ private static void populateContainerMetadata(
populateContainerFinalizeBlock(kvContainerData, store);
}

private static PendingDelete populatePendingDeletionMetadata(
KeyValueContainerData kvContainerData, Table<String, Long> metadataTable,
DatanodeStore store) throws IOException {

Long pendingDeletionBlockBytes = metadataTable.get(kvContainerData.getPendingDeleteBlockBytesKey());
Long pendingDeleteBlockCount = metadataTable.get(kvContainerData.getPendingDeleteBlockCountKey());

if (!VersionedDatanodeFeatures.isFinalized(HDDSLayoutFeature.STORAGE_SPACE_DISTRIBUTION)) {
return handlePreDataDistributionFeature(pendingDeleteBlockCount, metadataTable, store, kvContainerData);
} else if (pendingDeleteBlockCount != null) {
return handlePostDataDistributionFeature(pendingDeleteBlockCount, pendingDeletionBlockBytes,
metadataTable, store, kvContainerData);
} else {
LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables",
metadataTable.getName());
return getAggregatePendingDelete(store, kvContainerData, kvContainerData.getSchemaVersion());
}
}

private static PendingDelete handlePreDataDistributionFeature(
Long pendingDeleteBlockCount, Table<String, Long> metadataTable,
DatanodeStore store, KeyValueContainerData kvContainerData) throws IOException {

if (pendingDeleteBlockCount != null) {
return new PendingDelete(pendingDeleteBlockCount, 0L);
} else {
LOG.warn("Missing pendingDeleteBlockCount/size from {}: recalculate them from delete txn tables",
metadataTable.getName());
return getAggregatePendingDelete(store, kvContainerData, kvContainerData.getSchemaVersion());
}
}

private static PendingDelete handlePostDataDistributionFeature(
Long pendingDeleteBlockCount, Long pendingDeletionBlockBytes,
Table<String, Long> metadataTable, DatanodeStore store,
KeyValueContainerData kvContainerData) throws IOException {

if (pendingDeletionBlockBytes != null) {
return new PendingDelete(pendingDeleteBlockCount, pendingDeletionBlockBytes);
} else {
LOG.warn("Missing pendingDeleteBlockSize from {}: recalculate them from delete txn tables",
metadataTable.getName());
PendingDelete pendingDeletions = getAggregatePendingDelete(
store, kvContainerData, kvContainerData.getSchemaVersion());
return new PendingDelete(pendingDeleteBlockCount, pendingDeletions.getBytes());
}
}

private static void populateBlockStatistics(
KeyValueContainerData kvContainerData, Table<String, Long> metadataTable,
DatanodeStore store) throws IOException {

final Long metadataTableBytesUsed = metadataTable.get(kvContainerData.getBytesUsedKey());
final Long metadataTableBlockCount = metadataTable.get(kvContainerData.getBlockCountKey());

final long blockBytes;
final long blockCount;

if (metadataTableBytesUsed != null && metadataTableBlockCount != null) {
blockBytes = metadataTableBytesUsed;
blockCount = metadataTableBlockCount;
} else {
LOG.warn("Missing bytesUsed={} or blockCount={} from {}: recalculate them from block table",
metadataTableBytesUsed, metadataTableBlockCount, metadataTable.getName());
final ContainerData.BlockByteAndCounts blockData = getUsedBytesAndBlockCount(store, kvContainerData);
blockBytes = blockData.getBytes();
blockCount = blockData.getCount();
}

kvContainerData.getStatistics().updateBlocks(blockBytes, blockCount);
}

/**
* Loads finalizeBlockLocalIds for container in memory.
* @param kvContainerData - KeyValueContainerData
Expand Down
Loading