Skip to content
Closed
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,9 @@
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService.ContainerBlockInfo;

import java.util.LinkedList;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

Expand All @@ -40,33 +41,41 @@ public class RandomContainerDeletionChoosingPolicy
LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class);

@Override
public List<ContainerData> chooseContainerForBlockDeletion(int count,
Map<Long, ContainerData> candidateContainers)
public List<ContainerBlockInfo> chooseContainerForBlockDeletion(
int blockCount, Map<Long, ContainerData> candidateContainers)
throws StorageContainerException {
Preconditions.checkNotNull(candidateContainers,
"Internal assertion: candidate containers cannot be null");

int currentCount = 0;
List<ContainerData> result = new LinkedList<>();
List<ContainerBlockInfo> result = new ArrayList<>();
ContainerData[] values = new ContainerData[candidateContainers.size()];
// to get a shuffle list
ContainerData[] shuffled = candidateContainers.values().toArray(values);
ArrayUtils.shuffle(shuffled);

// Here we are returning containers based on totalBlocks which is basically
// number of blocks to be deleted in an interval. We are also considering
// the boundary case where the blocks of the last container exceeds the
// number of blocks to be deleted in an interval, there we return that
// container but with container we also return an integer so that total
// blocks don't exceed the number of blocks to be deleted in an interval.

for (ContainerData entry : shuffled) {
if (currentCount < count) {
result.add(entry);
currentCount++;
if (((KeyValueContainerData) entry).getNumPendingDeletionBlocks() > 0) {
blockCount -=
((KeyValueContainerData) entry).getNumPendingDeletionBlocks();
result.add(new ContainerBlockInfo(entry,
((KeyValueContainerData) entry).getNumPendingDeletionBlocks()));
if (blockCount <= 0) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Select container {} for block deletion, "
+ "pending deletion blocks num: {}.",
entry.getContainerID(),
+ "pending deletion blocks num: {}.", entry.getContainerID(),
((KeyValueContainerData) entry).getNumPendingDeletionBlocks());
}
} else {
break;
}
}

return result;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.ArrayList;
import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService.ContainerBlockInfo;

/**
* TopN Ordered choosing policy that choosing containers based on pending
Expand All @@ -49,43 +51,47 @@ public class TopNOrderedContainerDeletionChoosingPolicy
c1.getNumPendingDeletionBlocks());

@Override
public List<ContainerData> chooseContainerForBlockDeletion(int count,
Map<Long, ContainerData> candidateContainers)
public List<ContainerBlockInfo> chooseContainerForBlockDeletion(
int totalBlocks, Map<Long, ContainerData> candidateContainers)
throws StorageContainerException {

Preconditions.checkNotNull(candidateContainers,
"Internal assertion: candidate containers cannot be null");

List<ContainerData> result = new LinkedList<>();
List<ContainerBlockInfo> result = new ArrayList<>();
List<KeyValueContainerData> orderedList = new LinkedList<>();
for (ContainerData entry : candidateContainers.values()) {
orderedList.add((KeyValueContainerData)entry);
}
Collections.sort(orderedList, KEY_VALUE_CONTAINER_DATA_COMPARATOR);

// get top N list ordered by pending deletion blocks' number
int currentCount = 0;
// Here we are returning containers based on totalBlocks which is basically
// number of blocks to be deleted in an interval. We are also considering
// the boundary case where the blocks of the last container exceeds the
// number of blocks to be deleted in an interval, there we return that
// container but with container we also return an integer so that total
// blocks don't exceed the number of blocks to be deleted in an interval.

for (KeyValueContainerData entry : orderedList) {
if (currentCount < count) {
if (entry.getNumPendingDeletionBlocks() > 0) {
result.add(entry);
currentCount++;
if (LOG.isDebugEnabled()) {
LOG.debug(
"Select container {} for block deletion, "
+ "pending deletion blocks num: {}.",
entry.getContainerID(),
entry.getNumPendingDeletionBlocks());
}
} else {
LOG.debug("Stop looking for next container, there is no"
+ " pending deletion block contained in remaining containers.");
if (entry.getNumPendingDeletionBlocks() > 0) {
totalBlocks -= entry.getNumPendingDeletionBlocks();
result.add(
new ContainerBlockInfo(entry, entry.getNumPendingDeletionBlocks()));
if (totalBlocks <= 0) {
break;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Select container {} for block deletion, "
+ "pending deletion blocks num: {}.", entry.getContainerID(),
entry.getNumPendingDeletionBlocks());
}
} else {
LOG.debug("Stop looking for next container, there is no"
+ " pending deletion block contained in remaining containers.");
break;
}
}

return result;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import org.apache.hadoop.hdds.scm.container.common.helpers
.StorageContainerException;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService.ContainerBlockInfo;

import java.util.List;
import java.util.Map;
Expand All @@ -40,8 +41,8 @@ public interface ContainerDeletionChoosingPolicy {
* @return container data list
* @throws StorageContainerException
*/
List<ContainerData> chooseContainerForBlockDeletion(int count,
Map<Long, ContainerData> candidateContainers)
List<ContainerBlockInfo> chooseContainerForBlockDeletion(
int count, Map<Long, ContainerData> candidateContainers)
throws StorageContainerException;

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ public class DatanodeConfiguration {
private int replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT;

static final int CONTAINER_DELETE_THREADS_DEFAULT = 2;
static final int OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL = 1000;

/**
* The maximum number of threads used to delete containers on a datanode
Expand Down Expand Up @@ -93,6 +94,23 @@ public void setBlockDeletionInterval(Duration duration) {
this.blockDeletionInterval = duration.toMillis();
}

@Config(key = "block.deleting.limit.per.interval",
defaultValue = "1000",
type = ConfigType.INT,
tags = { ConfigTag.SCM, ConfigTag.DELETION },
description =
"Number of blocks to be deleted in an interval."
)
private int blockLimitPerInterval = OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL;

public int getBlockDeletionLimit() {
return blockLimitPerInterval;
}

public void setBlockDeletionLimit(int limit) {
this.blockLimitPerInterval = limit;
}

@PostConstruct
public void validate() {
if (replicationMaxStreams < 1) {
Expand Down
Loading