diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 482ac88f366c..21157baa99ef 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -343,6 +343,11 @@ public final class OzoneConfigKeys { public static final double HDDS_DATANODE_STORAGE_UTILIZATION_CRITICAL_THRESHOLD_DEFAULT = 0.95; + public static final String HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE = + "hdds.datanode.metadata.rocksdb.cache.size"; + public static final String + HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT = "64MB"; + public static final String OZONE_SECURITY_ENABLED_KEY = "ozone.security.enabled"; public static final boolean OZONE_SECURITY_ENABLED_DEFAULT = false; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 485397819fcd..0f7c94913322 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1278,6 +1278,16 @@ + + hdds.datanode.metadata.rocksdb.cache.size + 64MB + OZONE, DATANODE, MANAGEMENT + + Size of the block metadata cache shared among RocksDB instances on each + datanode. All containers on a datanode will share this cache. + + + hdds.command.status.report.interval 30s diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 6c258eda7cd0..efbc24730af7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -17,28 +17,41 @@ */ package org.apache.hadoop.ozone.container.metadata; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.*; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; +import org.rocksdb.BlockBasedTableConfig; +import org.rocksdb.BloomFilter; +import org.rocksdb.ColumnFamilyOptions; import org.rocksdb.DBOptions; +import org.rocksdb.LRUCache; +import org.rocksdb.RocksDB; import org.rocksdb.Statistics; import org.rocksdb.StatsLevel; +import org.rocksdb.util.SizeUnit; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; +import java.util.Collections; +import java.util.Map; import java.util.NoSuchElementException; +import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT; /** * Implementation of the {@link DatanodeStore} interface that contains @@ -55,10 +68,15 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table deletedBlocksTable; private static final Logger LOG = - LoggerFactory.getLogger(AbstractDatanodeStore.class); + LoggerFactory.getLogger(AbstractDatanodeStore.class); private DBStore store; private final AbstractDatanodeDBDefinition dbDef; private final long containerID; + private final ColumnFamilyOptions cfOptions; + + private static final DBProfile DEFAULT_PROFILE = DBProfile.DISK; + private static final Map + OPTIONS_CACHE = new ConcurrentHashMap<>(); /** * Constructs the metadata store and starts the DB services. @@ -67,8 +85,16 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { * @throws IOException - on Failure. */ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, - AbstractDatanodeDBDefinition dbDef) - throws IOException { + AbstractDatanodeDBDefinition dbDef) throws IOException { + + // The same config instance is used on each datanode, so we can share the + // corresponding column family options, providing a single shared cache + // for all containers on a datanode. + if (!OPTIONS_CACHE.containsKey(config)) { + OPTIONS_CACHE.put(config, buildColumnFamilyOptions(config)); + } + cfOptions = OPTIONS_CACHE.get(config); + this.dbDef = dbDef; this.containerID = containerID; start(config); @@ -76,9 +102,9 @@ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, @Override public void start(ConfigurationSource config) - throws IOException { + throws IOException { if (this.store == null) { - DBOptions options = new DBOptions(); + DBOptions options = DEFAULT_PROFILE.getDBOptions(); options.setCreateIfMissing(true); options.setCreateMissingColumnFamilies(true); @@ -93,7 +119,8 @@ public void start(ConfigurationSource config) } this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOption(options) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) .build(); // Use the DatanodeTable wrapper to disable the table iterator on @@ -179,6 +206,12 @@ public void compactDB() throws IOException { store.compactDB(); } + @VisibleForTesting + public static Map + getColumnFamilyOptionsCache() { + return Collections.unmodifiableMap(OPTIONS_CACHE); + } + private static void checkTableStatus(Table table, String name) throws IOException { String logMessage = "Unable to get a reference to %s table. Cannot " + @@ -191,6 +224,26 @@ private static void checkTableStatus(Table table, String name) } } + private static ColumnFamilyOptions buildColumnFamilyOptions( + ConfigurationSource config) { + long cacheSize = (long) config.getStorageSize( + HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE, + HDDS_DATANODE_METADATA_ROCKSDB_CACHE_SIZE_DEFAULT, + StorageUnit.BYTES); + + // Enables static creation of RocksDB objects. + RocksDB.loadLibrary(); + + BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); + tableConfig.setBlockCache(new LRUCache(cacheSize * SizeUnit.MB)) + .setPinL0FilterAndIndexBlocksInCache(true) + .setFilterPolicy(new BloomFilter()); + + return DEFAULT_PROFILE + .getColumnFamilyOptions() + .setTableFormatConfig(tableConfig); + } + /** * Block Iterator for KeyValue Container. This block iterator returns blocks * which match with the {@link MetadataKeyFilters.KeyPrefixFilter}. If no diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 79cf6a7e1ecc..c2b487be2933 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -20,6 +20,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -37,6 +38,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; @@ -50,6 +52,7 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.mockito.Mockito; +import org.rocksdb.ColumnFamilyOptions; import java.io.File; @@ -79,7 +82,6 @@ public class TestKeyValueContainer { @Rule public TemporaryFolder folder = new TemporaryFolder(); - private OzoneConfiguration conf; private String scmId = UUID.randomUUID().toString(); private VolumeSet volumeSet; private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy; @@ -89,6 +91,11 @@ public class TestKeyValueContainer { private final ChunkLayOutVersion layout; + // Use one configuration object across parameterized runs of tests. + // This preserves the column family options in the container options + // cache for testContainersShareColumnFamilyOptions. + private static final OzoneConfiguration CONF = new OzoneConfiguration(); + public TestKeyValueContainer(ChunkLayOutVersion layout) { this.layout = layout; } @@ -100,10 +107,9 @@ public static Iterable parameters() { @Before public void setUp() throws Exception { - conf = new OzoneConfiguration(); datanodeId = UUID.randomUUID(); HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot() - .getAbsolutePath()).conf(conf).datanodeUuid(datanodeId + .getAbsolutePath()).conf(CONF).datanodeUuid(datanodeId .toString()).build(); volumeSet = mock(MutableVolumeSet.class); @@ -116,14 +122,14 @@ public void setUp() throws Exception { (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString()); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); + keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF); } private void addBlocks(int count) throws Exception { long containerId = keyValueContainerData.getContainerID(); try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer - .getContainerData(), conf)) { + .getContainerData(), CONF)) { for (int i = 0; i < count; i++) { // Creating BlockData BlockID blockID = new BlockID(containerId, i); @@ -180,7 +186,7 @@ public void testContainerImportExport() throws Exception { long numberOfKeysToWrite = 12; //write one few keys to check the key count after import try(ReferenceCountedDB metadataStore = - BlockUtils.getDB(keyValueContainerData, conf)) { + BlockUtils.getDB(keyValueContainerData, CONF)) { Table blockDataTable = metadataStore.getStore().getBlockDataTable(); @@ -193,7 +199,7 @@ public void testContainerImportExport() throws Exception { metadataStore.getStore().getMetadataTable() .put(OzoneConsts.BLOCK_COUNT, numberOfKeysToWrite); } - BlockUtils.removeDB(keyValueContainerData, conf); + BlockUtils.removeDB(keyValueContainerData, CONF); Map metadata = new HashMap<>(); metadata.put("key1", "value1"); @@ -219,7 +225,7 @@ public void testContainerImportExport() throws Exception { keyValueContainerData.getLayOutVersion(), keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(), datanodeId.toString()); - KeyValueContainer container = new KeyValueContainer(containerData, conf); + KeyValueContainer container = new KeyValueContainer(containerData, CONF); HddsVolume containerVolume = volumeChoosingPolicy.chooseVolume(volumeSet .getVolumesList(), 1); @@ -291,7 +297,7 @@ public void testDeleteContainer() throws Exception { keyValueContainerData.setState(ContainerProtos.ContainerDataProto.State .CLOSED); keyValueContainer = new KeyValueContainer( - keyValueContainerData, conf); + keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); keyValueContainer.delete(); @@ -370,7 +376,7 @@ public void testUpdateContainerUnsupportedRequest() throws Exception { try { keyValueContainerData.setState( ContainerProtos.ContainerDataProto.State.CLOSED); - keyValueContainer = new KeyValueContainer(keyValueContainerData, conf); + keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); Map metadata = new HashMap<>(); metadata.put(OzoneConsts.VOLUME, OzoneConsts.OZONE); @@ -383,4 +389,32 @@ public void testUpdateContainerUnsupportedRequest() throws Exception { .getResult()); } } + + @Test + public void testContainersShareColumnFamilyOptions() throws Exception { + // Get a read only view (not a copy) of the options cache. + Map cachedOptions = + AbstractDatanodeStore.getColumnFamilyOptionsCache(); + + // Create Container 1 + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + Assert.assertEquals(1, cachedOptions.size()); + ColumnFamilyOptions options1 = cachedOptions.get(CONF); + Assert.assertNotNull(options1); + + // Create Container 2 + keyValueContainerData = new KeyValueContainerData(2L, + layout, + (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), + datanodeId.toString()); + keyValueContainer = new KeyValueContainer(keyValueContainerData, CONF); + keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); + + Assert.assertEquals(1, cachedOptions.size()); + ColumnFamilyOptions options2 = cachedOptions.get(CONF); + Assert.assertNotNull(options2); + + // Column family options object should be reused. + Assert.assertSame(options1, options2); + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 6a202993bdca..5b907afd9f82 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -24,15 +24,16 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import com.google.common.base.Preconditions; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; @@ -69,53 +70,124 @@ public final class DBStoreBuilder { // DB PKIProfile used by ROCKDB instances. public static final DBProfile HDDS_DEFAULT_DB_PROFILE = DBProfile.DISK; - private Set tables; - private DBProfile dbProfile; + // the DBOptions used if the caller does not specify. + private final DBOptions defaultDBOptions; + // The DBOptions specified by the caller. private DBOptions rocksDBOption; + // The column family options that will be used for any column families + // added by name only (without specifying options). + private ColumnFamilyOptions defaultCfOptions; private String dbname; private Path dbPath; - private List tableNames; + // Maps added column family names to the column family options they were + // added with. Value will be null if the column family was not added with + // any options. On build, this will be replaced with defaultCfOptions. + private Map cfOptions; private ConfigurationSource configuration; private CodecRegistry registry; private String rocksDbStat; private RocksDBConfiguration rocksDBConfiguration; - private DBStoreBuilder(ConfigurationSource configuration) { - this(configuration, configuration.getObject(RocksDBConfiguration.class)); + /** + * Create DBStoreBuilder from a generic DBDefinition. + */ + public static DBStore createDBStore(ConfigurationSource configuration, + DBDefinition definition) throws IOException { + return newBuilder(configuration, definition).build(); + } + + public static DBStoreBuilder newBuilder(ConfigurationSource configuration, + DBDefinition definition) { + + DBStoreBuilder builder = newBuilder(configuration); + builder.applyDBDefinition(definition); + + return builder; + } + + public static DBStoreBuilder newBuilder(ConfigurationSource configuration) { + return newBuilder(configuration, + configuration.getObject(RocksDBConfiguration.class)); + } + + public static DBStoreBuilder newBuilder(ConfigurationSource configuration, + RocksDBConfiguration rocksDBConfiguration) { + return new DBStoreBuilder(configuration, rocksDBConfiguration); } private DBStoreBuilder(ConfigurationSource configuration, RocksDBConfiguration rocksDBConfiguration) { - tables = new HashSet<>(); - tableNames = new LinkedList<>(); + cfOptions = new HashMap<>(); this.configuration = configuration; this.registry = new CodecRegistry(); this.rocksDbStat = configuration.getTrimmed( OZONE_METADATA_STORE_ROCKSDB_STATISTICS, OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); this.rocksDBConfiguration = rocksDBConfiguration; - } - public static DBStoreBuilder newBuilder(ConfigurationSource configuration) { - return new DBStoreBuilder(configuration); - } + // Get default DBOptions and ColumnFamilyOptions from the default DB + // profile. + DBProfile dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE, + HDDS_DEFAULT_DB_PROFILE); + LOG.debug("Default DB profile:{}", dbProfile); - public static DBStoreBuilder newBuilder(OzoneConfiguration configuration, - RocksDBConfiguration rocksDBConfiguration) { - return new DBStoreBuilder(configuration, rocksDBConfiguration); + defaultDBOptions = dbProfile.getDBOptions(); + setDefaultCFOptions(dbProfile.getColumnFamilyOptions()); } - public static DBStoreBuilder newBuilder(ConfigurationSource configuration, - DBDefinition definition) { - DBStoreBuilder builder = createDBStoreBuilder(configuration, definition); - builder.registerTables(definition); + private void applyDBDefinition(DBDefinition definition) { + // Set metadata dirs. + File metadataDir = definition.getDBLocation(configuration); - return builder; + if (metadataDir == null) { + LOG.warn("{} is not configured. We recommend adding this setting. " + + "Falling back to {} instead.", + definition.getLocationConfigKey(), + HddsConfigKeys.OZONE_METADATA_DIRS); + metadataDir = getOzoneMetaDirPath(configuration); + } + + setName(definition.getName()); + setPath(Paths.get(metadataDir.getPath())); + + // Add column family names and codecs. + for (DBColumnFamilyDefinition columnFamily : + definition.getColumnFamilies()) { + + addTable(columnFamily.getName()); + addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec()); + addCodec(columnFamily.getValueType(), columnFamily.getValueCodec()); + } } - public DBStoreBuilder setProfile(DBProfile profile) { - dbProfile = profile; - return this; + /** + * Builds a DBStore instance and returns that. + * + * @return DBStore + */ + public DBStore build() throws IOException { + if(StringUtil.isBlank(dbname) || (dbPath == null)) { + LOG.error("Required Parameter missing."); + throw new IOException("Required parameter is missing. Please make sure " + + "Path and DB name is provided."); + } + + Set tableConfigs = makeTableConfigs(); + + if (rocksDBOption == null) { + rocksDBOption = getDefaultDBOptions(tableConfigs); + } + + WriteOptions writeOptions = new WriteOptions(); + writeOptions.setSync(rocksDBConfiguration.getSyncOption()); + + File dbFile = getDBFile(); + if (!dbFile.getParentFile().exists()) { + throw new IOException("The DB destination directory should exist."); + } + + return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs, + registry); } public DBStoreBuilder setName(String name) { @@ -124,7 +196,12 @@ public DBStoreBuilder setName(String name) { } public DBStoreBuilder addTable(String tableName) { - tableNames.add(tableName); + return addTable(tableName, null); + } + + public DBStoreBuilder addTable(String tableName, + ColumnFamilyOptions options) { + cfOptions.put(tableName, options); return this; } @@ -133,26 +210,14 @@ public DBStoreBuilder addCodec(Class type, Codec codec) { return this; } - public DBStoreBuilder addTable(String tableName, ColumnFamilyOptions option) - throws IOException { - LOG.debug("using custom profile for table: {}", tableName); - return addTableDefinition(tableName, option); - } - - private DBStoreBuilder addTableDefinition(String tableName, - ColumnFamilyOptions option) throws IOException { - TableConfig tableConfig = new TableConfig(tableName, option); - if (!tables.add(tableConfig)) { - String message = "Unable to add the table: " + tableName + - ". Please check if this table name is already in use."; - LOG.error(message); - throw new IOException(message); - } + public DBStoreBuilder setDBOptions(DBOptions option) { + rocksDBOption = option; return this; } - public DBStoreBuilder setDBOption(DBOptions option) { - rocksDBOption = option; + public DBStoreBuilder setDefaultCFOptions( + ColumnFamilyOptions options) { + defaultCfOptions = options; return this; } @@ -163,61 +228,104 @@ public DBStoreBuilder setPath(Path path) { } /** - * Builds a DBStore instance and returns that. - * - * @return DBStore + * Set the {@link DBOptions} and default {@link ColumnFamilyOptions} based + * on {@code prof}. */ - public DBStore build() throws IOException { - if(StringUtil.isBlank(dbname) || (dbPath == null)) { - LOG.error("Required Parameter missing."); - throw new IOException("Required parameter is missing. Please make sure " - + "sure Path and DB name is provided."); - } - processDBProfile(); - processTables(); - DBOptions options = getDbProfile(); - - WriteOptions writeOptions = new WriteOptions(); - writeOptions.setSync(rocksDBConfiguration.getSyncOption()); - + public DBStoreBuilder setProfile(DBProfile prof) { + setDBOptions(prof.getDBOptions()); + setDefaultCFOptions(prof.getColumnFamilyOptions()); + return this; + } - File dbFile = getDBFile(); - if (!dbFile.getParentFile().exists()) { - throw new IOException("The DB destination directory should exist."); + /** + * Converts column families and their corresponding options that have been + * registered with the builder to a set of {@link TableConfig} objects. + * Column families with no options specified will have the default column + * family options for this builder applied. + */ + private Set makeTableConfigs() { + Set tableConfigs = new HashSet<>(); + + // If default column family was not added, add it with the default options. + cfOptions.putIfAbsent(DEFAULT_COLUMN_FAMILY_NAME, + defaultCfOptions); + + for (Map.Entry entry: + cfOptions.entrySet()) { + String name = entry.getKey(); + ColumnFamilyOptions options = entry.getValue(); + + if (options == null) { + LOG.debug("using default column family options for table: {}", name); + tableConfigs.add(new TableConfig(name, defaultCfOptions)); + } else { + tableConfigs.add(new TableConfig(name, options)); + } } - return new RDBStore(dbFile, options, writeOptions, tables, registry); + + return tableConfigs; } /** - * if the DBProfile is not set, we will default to using default from the - * config file. + * Attempts to get RocksDB {@link DBOptions} from an ini config file. If + * that file does not exist, the value of {@code defaultDBOptions} is used + * instead. + * After an {@link DBOptions} is chosen, it will have the logging level + * specified by builder's {@link RocksDBConfiguration} applied to it. It + * will also have statistics added if they are not turned off in the + * builder's {@link ConfigurationSource}. + * + * @param tableConfigs Configurations for each column family, used when + * reading DB options from the ini file. + * + * @return The {@link DBOptions} that should be used as the default value + * for this builder if one is not specified by the caller. */ - private void processDBProfile() { - if (dbProfile == null) { - dbProfile = this.configuration.getEnum(HDDS_DB_PROFILE, - HDDS_DEFAULT_DB_PROFILE); + private DBOptions getDefaultDBOptions(Collection tableConfigs) { + DBOptions dbOptions = getDBOptionsFromFile(tableConfigs); + + if (dbOptions == null) { + dbOptions = defaultDBOptions; + LOG.debug("Using RocksDB DBOptions from default profile."); } - LOG.debug("default profile:{}", dbProfile); - } - private void processTables() throws IOException { - List list = new ArrayList<>(tableNames); - list.add(DEFAULT_COLUMN_FAMILY_NAME); - for (String name : list) { - LOG.debug("using default profile for table:{}", name); - addTableDefinition(name, dbProfile.getColumnFamilyOptions()); + // Apply logging settings. + if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { + org.rocksdb.Logger logger = new org.rocksdb.Logger(dbOptions) { + @Override + protected void log(InfoLogLevel infoLogLevel, String s) { + ROCKS_DB_LOGGER.info(s); + } + }; + InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration + .getRocksdbLogLevel() + "_LEVEL"); + logger.setInfoLogLevel(level); + dbOptions.setLogger(logger); } - } - private DBOptions getDbProfile() { - if (rocksDBOption != null) { - return rocksDBOption; + // Create statistics. + if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { + Statistics statistics = new Statistics(); + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + dbOptions = dbOptions.setStatistics(statistics); } + + return dbOptions; + } + + /** + * Attempts to construct a {@link DBOptions} object from the configuration + * directory with name equal to {@code database name}.ini, where {@code + * database name} is the property set by + * {@link DBStoreBuilder#setName(String)}. + */ + private DBOptions getDBOptionsFromFile(Collection tableConfigs) { DBOptions option = null; - if (StringUtil.isNotBlank(dbname)) { - List columnFamilyDescriptors = new LinkedList<>(); - for (TableConfig tc : tables) { + List columnFamilyDescriptors = new ArrayList<>(); + + if (StringUtil.isNotBlank(dbname)) { + for (TableConfig tc : tableConfigs) { columnFamilyDescriptors.add(tc.getDescriptor()); } @@ -226,37 +334,14 @@ private DBOptions getDbProfile() { option = DBConfigFromFile.readFromFile(dbname, columnFamilyDescriptors); if(option != null) { - LOG.info("Using Configs from {}.ini file", dbname); + LOG.info("Using RocksDB DBOptions from {}.ini file", dbname); } } catch (IOException ex) { - LOG.info("Unable to read RocksDB config from {}", dbname, ex); + LOG.info("Unable to read RocksDB DBOptions from {}", dbname, ex); } } } - if (option == null) { - LOG.debug("Using default options: {}", dbProfile); - option = dbProfile.getDBOptions(); - } - - if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(option) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; - InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration - .getRocksdbLogLevel() + "_LEVEL"); - logger.setInfoLogLevel(level); - option.setLogger(logger); - } - - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - Statistics statistics = new Statistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - option = option.setStatistics(statistics); - } return option; } @@ -272,54 +357,4 @@ private File getDBFile() throws IOException { } return Paths.get(dbPath.toString(), dbname).toFile(); } - - private static DBStoreBuilder createDBStoreBuilder( - ConfigurationSource configuration, DBDefinition definition) { - - File metadataDir = definition.getDBLocation(configuration); - - if (metadataDir == null) { - - LOG.warn("{} is not configured. We recommend adding this setting. " + - "Falling back to {} instead.", - definition.getLocationConfigKey(), - HddsConfigKeys.OZONE_METADATA_DIRS); - metadataDir = getOzoneMetaDirPath(configuration); - } - - return DBStoreBuilder.newBuilder(configuration) - .setName(definition.getName()) - .setPath(Paths.get(metadataDir.getPath())); - } - - /** - * Create DBStoreBuilder from a generic DBDefinition. - */ - public static DBStore createDBStore(ConfigurationSource configuration, - DBDefinition definition) - throws IOException { - DBStoreBuilder builder = createDBStoreBuilder(configuration, definition); - builder.registerTables(definition); - - return builder.build(); - } - - public DBStoreBuilder registerTables(DBDefinition definition) { - for (DBColumnFamilyDefinition columnFamily : - definition.getColumnFamilies()) { - - if (!columnFamily.getTableName().equals(DEFAULT_COLUMN_FAMILY_NAME)) { - // The default column family is always added. - // If it is present in the DB Definition, ignore it so we don't get an - // error about adding it twice. - addTable(columnFamily.getName()); - } - // Add new codecs specified for the table, which may be an alias for a - // table that was already added with different codecs. - addCodec(columnFamily.getKeyType(), columnFamily.getKeyCodec()); - addCodec(columnFamily.getValueType(), columnFamily.getValueCodec()); - } - - return this; - } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index d406060165f3..99fcbae80452 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -27,6 +27,7 @@ import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.rules.TemporaryFolder; +import org.rocksdb.ColumnFamilyOptions; import java.io.File; import java.io.IOException; @@ -99,15 +100,27 @@ public void builderWithDoubleTableName() throws Exception { if(!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } - thrown.expect(IOException.class); - DBStoreBuilder.newBuilder(conf) + // Registering a new table with the same name should replace the previous + // one. + DBStore dbStore = DBStoreBuilder.newBuilder(conf) .setName("Test.db") .setPath(newFolder.toPath()) .addTable("FIRST") - .addTable("FIRST") + .addTable("FIRST", new ColumnFamilyOptions()) .build(); - // Nothing to do , This will throw so we do not have to close. + // Building should succeed without error. + + try (Table firstTable = dbStore.getTable("FIRST")) { + byte[] key = + RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); + byte[] value = + RandomStringUtils.random(9).getBytes(StandardCharsets.UTF_8); + firstTable.put(key, value); + byte[] temp = firstTable.get(key); + Assert.assertArrayEquals(value, temp); + } + dbStore.close(); } @Test