Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,8 @@
import com.google.common.base.Preconditions;
import org.apache.commons.collections.MapIterator;
import org.apache.commons.collections.map.LRUMap;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -158,19 +156,8 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType,

try {
long start = Time.monotonicNow();
DatanodeStore store;

if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
store = new DatanodeStoreSchemaOneImpl(conf,
containerID, containerDBPath);
} else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
store = new DatanodeStoreSchemaTwoImpl(conf,
containerID, containerDBPath);
} else {
throw new IllegalArgumentException(
"Unrecognized database schema version: " + schemaVersion);
}

DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID,
containerDBPath, schemaVersion, conf);
db = new ReferenceCountedDB(store, containerDBPath);
metrics.incDbOpenLatency(Time.monotonicNow() - start);
} catch (Exception e) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,17 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;

import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.container.metadata.DatanodeStore;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaOneImpl;
import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl;

import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_READ_METADATA_DB;

Expand All @@ -41,6 +46,55 @@ public final class BlockUtils {
private BlockUtils() {

}

/**
* Obtain a DB handler for a given container. This handler is not cached and
* the caller must close it after using it.
* If another thread attempts to open the same container when it is already
* opened by this thread, the other thread will get a RocksDB exception.
* @param containerID The containerID
* @param containerDBPath The absolute path to the container database folder
* @param schemaVersion The Container Schema version
* @param conf Configuration
* @return Handler to the given container.
* @throws IOException
*/
public static DatanodeStore getUncachedDatanodeStore(long containerID,
String containerDBPath, String schemaVersion,
ConfigurationSource conf) throws IOException {

DatanodeStore store;
if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) {
store = new DatanodeStoreSchemaOneImpl(conf,
containerID, containerDBPath);
} else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) {
store = new DatanodeStoreSchemaTwoImpl(conf,
containerID, containerDBPath);
} else {
throw new IllegalArgumentException(
"Unrecognized database schema version: " + schemaVersion);
}
return store;
}

/**
* Obtain a DB handler for a given container. This handler is not cached and
* the caller must close it after using it.
* If another thread attempts to open the same container when it is already
* opened by this thread, the other thread will get a RocksDB exception.
* @param containerData The container data
* @param conf Configuration
* @return
* @throws IOException
*/
public static DatanodeStore getUncachedDatanodeStore(
KeyValueContainerData containerData, ConfigurationSource conf)
throws IOException {
return getUncachedDatanodeStore(containerData.getContainerID(),
containerData.getDbFile().getAbsolutePath(),
containerData.getSchemaVersion(), conf);
}

/**
* Get a DB handler for a given container.
* If the handler doesn't exist in cache yet, first create one and
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -187,14 +187,23 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData,
kvContainerData.setSchemaVersion(OzoneConsts.SCHEMA_V1);
}


boolean isBlockMetadataSet = false;

try(ReferenceCountedDB containerDB = BlockUtils.getDB(kvContainerData,
config)) {

Table<String, Long> metadataTable =
containerDB.getStore().getMetadataTable();
ReferenceCountedDB cachedDB = null;
DatanodeStore store = null;
try {
try {
store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config);
} catch (IOException e) {
// If an exception is thrown, then it may indicate the RocksDB is
// already open in the container cache. As this code is only executed at
// DN startup, this should only happen in the tests.
cachedDB = BlockUtils.getDB(kvContainerData, config);
store = cachedDB.getStore();
LOG.warn("Attempt to get an uncached RocksDB handle failed and an " +
"instance was retrieved from the cache. This should only happen " +
"in tests");
}
Table<String, Long> metadataTable = store.getMetadataTable();

// Set pending deleted block count.
Long pendingDeleteBlockCount =
Expand All @@ -207,7 +216,7 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData,
MetadataKeyFilters.KeyPrefixFilter filter =
MetadataKeyFilters.getDeletingKeyFilter();
int numPendingDeletionBlocks =
containerDB.getStore().getBlockDataTable()
store.getBlockDataTable()
.getSequentialRangeKVs(null, Integer.MAX_VALUE, filter)
.size();
kvContainerData.incrPendingDeletionBlocks(numPendingDeletionBlocks);
Expand Down Expand Up @@ -244,61 +253,69 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData,
isBlockMetadataSet = true;
kvContainerData.setKeyCount(blockCount);
}
}

if (!isBlockMetadataSet) {
initializeUsedBytesAndBlockCount(kvContainerData, config);
if (!isBlockMetadataSet) {
initializeUsedBytesAndBlockCount(store, kvContainerData);
}
} finally {
if (cachedDB != null) {
// If we get a cached instance, calling close simply decrements the
// reference count.
cachedDB.close();
} else if (store != null) {
// We only stop the store if cacheDB is null, as otherwise we would
// close the rocksDB handle in the cache and the next reader would fail
try {
store.stop();
} catch (IOException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Unexpected exception closing the " +
"RocksDB when loading containers", e);
}
}
}
}


/**
* Initialize bytes used and block count.
* @param kvData
* @throws IOException
*/
private static void initializeUsedBytesAndBlockCount(
KeyValueContainerData kvData, ConfigurationSource config)
throws IOException {

private static void initializeUsedBytesAndBlockCount(DatanodeStore store,
KeyValueContainerData kvData) throws IOException {
final String errorMessage = "Failed to parse block data for" +
" Container " + kvData.getContainerID();

" Container " + kvData.getContainerID();
long blockCount = 0;
long usedBytes = 0;

try(ReferenceCountedDB db = BlockUtils.getDB(kvData, config)) {
// Count all regular blocks.
try (BlockIterator<BlockData> blockIter =
db.getStore().getBlockIterator(
MetadataKeyFilters.getUnprefixedKeyFilter())) {

while (blockIter.hasNext()) {
blockCount++;
try {
usedBytes += getBlockLength(blockIter.nextBlock());
} catch (IOException ex) {
LOG.error(errorMessage);
}
try (BlockIterator<BlockData> blockIter =
store.getBlockIterator(
MetadataKeyFilters.getUnprefixedKeyFilter())) {

while (blockIter.hasNext()) {
blockCount++;
try {
usedBytes += getBlockLength(blockIter.nextBlock());
} catch (IOException ex) {
LOG.error(errorMessage);
}
}
}

// Count all deleting blocks.
try (BlockIterator<BlockData> blockIter =
db.getStore().getBlockIterator(
MetadataKeyFilters.getDeletingKeyFilter())) {

while (blockIter.hasNext()) {
blockCount++;
try {
usedBytes += getBlockLength(blockIter.nextBlock());
} catch (IOException ex) {
LOG.error(errorMessage);
}
// Count all deleting blocks.
try (BlockIterator<BlockData> blockIter =
store.getBlockIterator(
MetadataKeyFilters.getDeletingKeyFilter())) {

while (blockIter.hasNext()) {
blockCount++;
try {
usedBytes += getBlockLength(blockIter.nextBlock());
} catch (IOException ex) {
LOG.error(errorMessage);
}
}
}

kvData.setBytesUsed(usedBytes);
kvData.setKeyCount(blockCount);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,10 @@ public void setup() throws Exception {
blkNames = addBlocks(keyValueContainer, false);
markBlocksForDelete(keyValueContainer, false, blkNames, i);
}

// Close the RocksDB instance for this container and remove from the cache
// so it does not affect the ContainerReader, which avoids using the cache
// at startup for performance reasons.
BlockUtils.removeDB(keyValueContainerData, conf);
}
}

Expand Down Expand Up @@ -250,6 +253,10 @@ public void testMultipleContainerReader() throws Exception {
blkNames = addBlocks(keyValueContainer, false);
markBlocksForDelete(keyValueContainer, false, blkNames, i);
}
// Close the RocksDB instance for this container and remove from the cache
// so it does not affect the ContainerReader, which avoids using the cache
// at startup for performance reasons.
BlockUtils.removeDB(keyValueContainerData, conf);
}

List<HddsVolume> hddsVolumes = volumeSets.getVolumesList();
Expand All @@ -271,6 +278,8 @@ public void testMultipleContainerReader() throws Exception {
" costs " + (System.currentTimeMillis() - startTime) / 1000 + "s");
Assert.assertEquals(containerCount,
containerSet.getContainerMap().entrySet().size());
Assert.assertEquals(containerCount, cache.size());
// There should be no open containers cached by the ContainerReader as it
// opens and closed them avoiding the cache.
Assert.assertEquals(0, cache.size());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ public void testBuildContainerMap() throws Exception {
Preconditions.checkState(freeBytes >= 0);
commitSpaceMap.put(getVolumeKey(myVolume),
Long.valueOf(volCommitBytes + freeBytes));
BlockUtils.removeDB(keyValueContainerData, conf);
}

DatanodeStateMachine stateMachine = Mockito.mock(
Expand Down Expand Up @@ -290,6 +291,7 @@ private long addBlocks(KeyValueContainer container,
metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, usedBytes);

// remaining available capacity of the container
db.close();
return (freeBytes - usedBytes);
}

Expand Down