Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,10 @@ public final List<ContainerBlockInfo> chooseContainerForBlockDeletion(
}
}
}
LOG.info("Chosen {}/{} blocks from {} candidate containers.",
(originalBlockCount - blockCount), blockCount, orderedList.size());
if (!orderedList.isEmpty()) {
LOG.info("Chosen {}/{} blocks from {} candidate containers.",
(originalBlockCount - blockCount), blockCount, orderedList.size());
}
return result;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -781,7 +781,7 @@ private long calculatePipelineBytesWritten(HddsProtos.PipelineID pipelineID) {
ContainerData containerData = container.getContainerData();
if (containerData.getOriginPipelineId()
.compareTo(pipelineID.getId()) == 0) {
bytesWritten += containerData.getWriteBytes();
bytesWritten += containerData.getStatistics().getWriteBytes();
}
}
return bytesWritten;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -849,57 +849,8 @@ private void clearPendingPutBlockCache() {
* Returns KeyValueContainerReport for the KeyValueContainer.
*/
@Override
public ContainerReplicaProto getContainerReport()
throws StorageContainerException {
ContainerReplicaProto.Builder ciBuilder =
ContainerReplicaProto.newBuilder();
ciBuilder.setContainerID(containerData.getContainerID())
.setReadCount(containerData.getReadCount())
.setWriteCount(containerData.getWriteCount())
.setReadBytes(containerData.getReadBytes())
.setWriteBytes(containerData.getWriteBytes())
.setKeyCount(containerData.getBlockCount())
.setUsed(containerData.getBytesUsed())
.setState(getHddsState())
.setReplicaIndex(containerData.getReplicaIndex())
.setDeleteTransactionId(containerData.getDeleteTransactionId())
.setBlockCommitSequenceId(containerData.getBlockCommitSequenceId())
.setOriginNodeId(containerData.getOriginNodeId())
.setIsEmpty(containerData.isEmpty());
return ciBuilder.build();
}

/**
* Returns LifeCycle State of the container.
* @return LifeCycle State of the container in HddsProtos format
* @throws StorageContainerException
*/
private ContainerReplicaProto.State getHddsState()
throws StorageContainerException {
ContainerReplicaProto.State state;
switch (containerData.getState()) {
case OPEN:
state = ContainerReplicaProto.State.OPEN;
break;
case CLOSING:
state = ContainerReplicaProto.State.CLOSING;
break;
case QUASI_CLOSED:
state = ContainerReplicaProto.State.QUASI_CLOSED;
break;
case CLOSED:
state = ContainerReplicaProto.State.CLOSED;
break;
case UNHEALTHY:
state = ContainerReplicaProto.State.UNHEALTHY;
break;
case DELETED:
state = ContainerReplicaProto.State.DELETED;
break;
default:
throw new StorageContainerException("Invalid Container state: " + containerData, INVALID_CONTAINER_STATE);
}
return state;
public ContainerReplicaProto getContainerReport() throws StorageContainerException {
return containerData.buildContainerReplicaProto();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.keyvalue;

import static java.lang.Math.max;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID;
import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_COUNT;
import static org.apache.hadoop.ozone.OzoneConsts.CHUNKS_PATH;
Expand All @@ -44,9 +45,10 @@
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
Expand Down Expand Up @@ -79,11 +81,6 @@ public class KeyValueContainerData extends ContainerData {

private String schemaVersion;

/**
* Number of pending deletion blocks in KeyValueContainer.
*/
private final AtomicLong numPendingDeletionBlocks;

private long deleteTransactionId;

private long blockCommitSequenceId;
Expand All @@ -110,7 +107,6 @@ public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion,
long size, String originPipelineId, String originNodeId) {
super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion,
size, originPipelineId, originNodeId);
this.numPendingDeletionBlocks = new AtomicLong(0);
this.deleteTransactionId = 0;
finalizedBlockSet = ConcurrentHashMap.newKeySet();
}
Expand All @@ -119,7 +115,6 @@ public KeyValueContainerData(KeyValueContainerData source) {
super(source);
Preconditions.checkArgument(source.getContainerType()
== ContainerProtos.ContainerType.KeyValueContainer);
this.numPendingDeletionBlocks = new AtomicLong(0);
this.deleteTransactionId = 0;
this.schemaVersion = source.getSchemaVersion();
finalizedBlockSet = ConcurrentHashMap.newKeySet();
Expand Down Expand Up @@ -240,23 +235,14 @@ public void setContainerDBType(String containerDBType) {
* @param numBlocks increment number
*/
public void incrPendingDeletionBlocks(long numBlocks) {
this.numPendingDeletionBlocks.addAndGet(numBlocks);
}

/**
* Decrease the count of pending deletion blocks.
*
* @param numBlocks decrement number
*/
public void decrPendingDeletionBlocks(long numBlocks) {
this.numPendingDeletionBlocks.addAndGet(-1 * numBlocks);
getStatistics().addBlockPendingDeletion(numBlocks);
}

/**
* Get the number of pending deletion blocks.
*/
public long getNumPendingDeletionBlocks() {
return this.numPendingDeletionBlocks.get();
return getStatistics().getBlockPendingDeletion();
}

/**
Expand All @@ -275,6 +261,39 @@ public long getDeleteTransactionId() {
return deleteTransactionId;
}

ContainerReplicaProto buildContainerReplicaProto() throws StorageContainerException {
return getStatistics().setContainerReplicaProto(ContainerReplicaProto.newBuilder())
.setContainerID(getContainerID())
.setState(getContainerReplicaProtoState(getState()))
.setIsEmpty(isEmpty())
.setOriginNodeId(getOriginNodeId())
.setReplicaIndex(getReplicaIndex())
.setBlockCommitSequenceId(getBlockCommitSequenceId())
.setDeleteTransactionId(getDeleteTransactionId())
.build();
}

// TODO remove one of the State from proto
static ContainerReplicaProto.State getContainerReplicaProtoState(ContainerDataProto.State state)
throws StorageContainerException {
switch (state) {
case OPEN:
return ContainerReplicaProto.State.OPEN;
case CLOSING:
return ContainerReplicaProto.State.CLOSING;
case QUASI_CLOSED:
return ContainerReplicaProto.State.QUASI_CLOSED;
case CLOSED:
return ContainerReplicaProto.State.CLOSED;
case UNHEALTHY:
return ContainerReplicaProto.State.UNHEALTHY;
case DELETED:
return ContainerReplicaProto.State.DELETED;
default:
throw new StorageContainerException("Invalid container state: " + state, INVALID_CONTAINER_STATE);
}
}

/**
* Add the given localID of a block to the finalizedBlockSet.
*/
Expand Down Expand Up @@ -315,7 +334,6 @@ public ContainerDataProto getProtoBufMessage() {
builder.setContainerID(this.getContainerID());
builder.setContainerPath(this.getContainerPath());
builder.setState(this.getState());
builder.setBlockCount(this.getBlockCount());

for (Map.Entry<String, String> entry : getMetadata().entrySet()) {
ContainerProtos.KeyValue.Builder keyValBuilder =
Expand All @@ -324,9 +342,7 @@ public ContainerDataProto getProtoBufMessage() {
.setValue(entry.getValue()).build());
}

if (this.getBytesUsed() >= 0) {
builder.setBytesUsed(this.getBytesUsed());
}
getStatistics().setContainerDataProto(builder);

if (this.getContainerType() != null) {
builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer);
Expand All @@ -353,20 +369,18 @@ public void updateAndCommitDBCounters(DBHandle db,
Table<String, Long> metadataTable = db.getStore().getMetadataTable();

// Set Bytes used and block count key.
metadataTable.putWithBatch(batchOperation, getBytesUsedKey(),
getBytesUsed() - releasedBytes);
metadataTable.putWithBatch(batchOperation, getBlockCountKey(),
getBlockCount() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation,
getPendingDeleteBlockCountKey(),
getNumPendingDeletionBlocks() - deletedBlockCount);
final BlockByteAndCounts b = getStatistics().getBlockByteAndCounts();
metadataTable.putWithBatch(batchOperation, getBytesUsedKey(), b.getBytes() - releasedBytes);
metadataTable.putWithBatch(batchOperation, getBlockCountKey(), b.getCount() - deletedBlockCount);
metadataTable.putWithBatch(batchOperation, getPendingDeleteBlockCountKey(),
b.getPendingDeletion() - deletedBlockCount);

db.getStore().getBatchHandler().commitBatchOperation(batchOperation);
}

public void resetPendingDeleteBlockCount(DBHandle db) throws IOException {
// Reset the in memory metadata.
numPendingDeletionBlocks.set(0);
getStatistics().resetBlockPendingDeletion();
// Reset the metadata on disk.
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
metadataTable.put(getPendingDeleteBlockCountKey(), 0L);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1450,6 +1450,7 @@ private void deleteInternal(Container container, boolean force)
long startTime = clock.millis();
container.writeLock();
try {
final ContainerData data = container.getContainerData();
if (container.getContainerData().getVolume().isFailed()) {
// if the volume in which the container resides fails
// don't attempt to delete/move it. When a volume fails,
Expand All @@ -1474,10 +1475,7 @@ private void deleteInternal(Container container, boolean force)
// container is unhealthy or over-replicated).
if (container.hasBlocks()) {
metrics.incContainerDeleteFailedNonEmpty();
LOG.error("Received container deletion command for container {} but" +
" the container is not empty with blockCount {}",
container.getContainerData().getContainerID(),
container.getContainerData().getBlockCount());
LOG.error("Received container deletion command for non-empty {}: {}", data, data.getStatistics());
// blocks table for future debugging.
// List blocks
logBlocksIfNonZero(container);
Expand Down
Loading
Loading