diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index a7fa54a1797f..c56c7432adcb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -157,7 +157,7 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, try { long start = Time.monotonicNow(); DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID, - containerDBPath, schemaVersion, conf); + containerDBPath, schemaVersion, conf, false); db = new ReferenceCountedDB(store, containerDBPath); metrics.incDbOpenLatency(Time.monotonicNow() - start); } catch (Exception e) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 0a8d692afd95..e842d17f2ace 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -61,15 +61,15 @@ private BlockUtils() { */ public static DatanodeStore getUncachedDatanodeStore(long containerID, String containerDBPath, String schemaVersion, - ConfigurationSource conf) throws IOException { + ConfigurationSource conf, boolean readOnly) throws IOException { DatanodeStore store; if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) { store = new DatanodeStoreSchemaOneImpl(conf, - containerID, containerDBPath); + containerID, containerDBPath, readOnly); } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, - containerID, containerDBPath); + containerID, containerDBPath, readOnly); } else { throw new IllegalArgumentException( "Unrecognized database schema version: " + schemaVersion); @@ -88,11 +88,11 @@ public static DatanodeStore getUncachedDatanodeStore(long containerID, * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( - KeyValueContainerData containerData, ConfigurationSource conf) - throws IOException { + KeyValueContainerData containerData, ConfigurationSource conf, + boolean readOnly) throws IOException { return getUncachedDatanodeStore(containerData.getContainerID(), containerData.getDbFile().getAbsolutePath(), - containerData.getSchemaVersion(), conf); + containerData.getSchemaVersion(), conf, readOnly); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 1780b1ebf0e3..7c75108d7d83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -106,10 +106,10 @@ public static void createContainerMetaData(long containerID, DatanodeStore store; if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) { store = new DatanodeStoreSchemaOneImpl(conf, - containerID, dbFile.getAbsolutePath()); + containerID, dbFile.getAbsolutePath(), false); } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, - containerID, dbFile.getAbsolutePath()); + containerID, dbFile.getAbsolutePath(), false); } else { throw new IllegalArgumentException( "Unrecognized schema version for container: " + schemaVersion); @@ -192,7 +192,8 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, DatanodeStore store = null; try { try { - store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config); + store = BlockUtils.getUncachedDatanodeStore( + kvContainerData, config, true); } catch (IOException e) { // If an exception is thrown, then it may indicate the RocksDB is // already open in the container cache. As this code is only executed at diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index efbc24730af7..12921af1ead3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -77,6 +77,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private static final DBProfile DEFAULT_PROFILE = DBProfile.DISK; private static final Map OPTIONS_CACHE = new ConcurrentHashMap<>(); + private final boolean openReadOnly; /** * Constructs the metadata store and starts the DB services. @@ -85,7 +86,8 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { * @throws IOException - on Failure. */ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, - AbstractDatanodeDBDefinition dbDef) throws IOException { + AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) + throws IOException { // The same config instance is used on each datanode, so we can share the // corresponding column family options, providing a single shared cache @@ -97,6 +99,7 @@ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, this.dbDef = dbDef; this.containerID = containerID; + this.openReadOnly = openReadOnly; start(config); } @@ -121,6 +124,7 @@ public void start(ConfigurationSource config) this.store = DBStoreBuilder.newBuilder(config, dbDef) .setDBOptions(options) .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) .build(); // Use the DatanodeTable wrapper to disable the table iterator on diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java index 97b9b25e275d..b72f19eeeb51 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java @@ -35,9 +35,10 @@ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore { * @throws IOException - on Failure. */ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, - long containerID, String dbPath) - throws IOException { - super(config, containerID, new DatanodeSchemaOneDBDefinition(dbPath)); + long containerID, String dbPath, boolean openReadOnly) + throws IOException { + super(config, containerID, new DatanodeSchemaOneDBDefinition(dbPath), + openReadOnly); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java index fd8e4fa9d087..df9b8c06712d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java @@ -37,8 +37,9 @@ public class DatanodeStoreSchemaTwoImpl extends AbstractDatanodeStore { * @throws IOException - on Failure. */ public DatanodeStoreSchemaTwoImpl(ConfigurationSource config, - long containerID, String dbPath) - throws IOException { - super(config, containerID, new DatanodeSchemaTwoDBDefinition(dbPath)); + long containerID, String dbPath, boolean openReadOnly) + throws IOException { + super(config, containerID, new DatanodeSchemaTwoDBDefinition(dbPath), + openReadOnly); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 3a47120181ff..e7f6388cee02 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -54,7 +54,7 @@ public class TestContainerCache { private void createContainerDB(OzoneConfiguration conf, File dbFile) throws Exception { DatanodeStore store = new DatanodeStoreSchemaTwoImpl( - conf, 1, dbFile.getAbsolutePath()); + conf, 1, dbFile.getAbsolutePath(), false); // we close since the SCM pre-creates containers. // we will open and put Db handle into a cache when keys are being created diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 5b907afd9f82..ad48a19927a7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -87,6 +87,8 @@ public final class DBStoreBuilder { private CodecRegistry registry; private String rocksDbStat; private RocksDBConfiguration rocksDBConfiguration; + // Flag to indicate if the RocksDB should be opened readonly. + private boolean openReadOnly = false; /** * Create DBStoreBuilder from a generic DBDefinition. @@ -187,7 +189,7 @@ public DBStore build() throws IOException { } return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs, - registry); + registry, openReadOnly); } public DBStoreBuilder setName(String name) { @@ -227,6 +229,11 @@ public DBStoreBuilder setPath(Path path) { return this; } + public DBStoreBuilder setOpenReadOnly(boolean readOnly) { + this.openReadOnly = readOnly; + return this; + } + /** * Set the {@link DBOptions} and default {@link ColumnFamilyOptions} based * on {@code prof}. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 0890a81d8fb8..adbd2eb39ead 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -71,12 +71,13 @@ public class RDBStore implements DBStore { @VisibleForTesting public RDBStore(File dbFile, DBOptions options, Set families) throws IOException { - this(dbFile, options, new WriteOptions(), families, new CodecRegistry()); + this(dbFile, options, new WriteOptions(), families, new CodecRegistry(), + false); } public RDBStore(File dbFile, DBOptions options, WriteOptions writeOptions, Set families, - CodecRegistry registry) + CodecRegistry registry, boolean readOnly) throws IOException { Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); Preconditions.checkNotNull(families); @@ -108,8 +109,13 @@ public RDBStore(File dbFile, DBOptions options, extraCf.forEach(cf -> columnFamilyDescriptors.add(cf.getDescriptor())); } - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); + if (readOnly) { + db = RocksDB.openReadOnly(dbOptions, dbLocation.getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } else { + db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } for (int x = 0; x < columnFamilyHandles.size(); x++) { handleTable.put(