From dd0e0fa631f283ddcbf3bd9962c51ac56626725f Mon Sep 17 00:00:00 2001 From: wanghongbing Date: Mon, 22 Jan 2024 20:21:38 +0800 Subject: [PATCH 1/3] HDDS-10184. Fix ManagedStatistics not closed properly --- .../ozone/container/metadata/AbstractDatanodeStore.java | 7 ++++--- .../org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java | 7 ++++--- .../hdds/utils/db/managed/TestRocksObjectLeakDetector.java | 2 ++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 50303bd99ba6..90684ba31303 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -118,9 +118,10 @@ public void start(ConfigurationSource config) OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - ManagedStatistics statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - options.setStatistics(statistics); + try (ManagedStatistics statistics = new ManagedStatistics()) { + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + options.setStatistics(statistics); + } } DatanodeConfiguration dc = diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index fe495e7b0618..0722dd20d998 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -415,9 +415,10 @@ protected void log(InfoLogLevel infoLogLevel, String s) { // Create statistics. if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - ManagedStatistics statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - dbOptions.setStatistics(statistics); + try (ManagedStatistics statistics = new ManagedStatistics()) { + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + dbOptions.setStatistics(statistics); + } } return dbOptions; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index 87fbe23ac761..62912ad4aab8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeoutException; import java.util.function.Supplier; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -49,6 +50,7 @@ public class TestRocksObjectLeakDetector { static void setUp() throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); String omServiceId = "omServiceId1"; From 3764acd278d9dc77a188327ba754ff2d29fcf0ed Mon Sep 17 00:00:00 2001 From: wanghongbing Date: Tue, 23 Jan 2024 17:48:03 +0800 Subject: [PATCH 2/3] change close Statistics scope --- .../metadata/AbstractDatanodeStore.java | 109 +++++++++--------- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 12 +- 2 files changed, 65 insertions(+), 56 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 90684ba31303..65a75d618657 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; @@ -117,63 +118,67 @@ public void start(ConfigurationSource config) OZONE_METADATA_STORE_ROCKSDB_STATISTICS, OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - try (ManagedStatistics statistics = new ManagedStatistics()) { + ManagedStatistics statistics = null; + try { + if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { + statistics = new ManagedStatistics(); statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); options.setStatistics(statistics); } - } - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); - // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); - options.setInfoLogLevel(level); - options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); - options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); - - if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); - - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) - .build(); - } else { - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .build(); - } + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); + // Config user log files + InfoLogLevel level = InfoLogLevel.valueOf( + dc.getRocksdbLogLevel() + "_LEVEL"); + options.setInfoLogLevel(level); + options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); + options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); + + if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { + options.setDeleteObsoleteFilesPeriodMicros( + dc.getRocksdbDeleteObsoleteFilesPeriod()); + + // For V3, all Rocksdb dir has the same "container.db" name. So use + // parentDirName(storage UUID)-dbDirName as db metrics name + this.store = DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) + .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + + dbDef.getName()) + .build(); + } else { + this.store = DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) + .build(); + } - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); - checkTableStatus(metadataTable, metadataTable.getName()); - - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); - - blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); - checkTableStatus(blockDataTable, blockDataTable.getName()); - - deletedBlocksTable = new DatanodeTable<>( - dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); - checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); + // Use the DatanodeTable wrapper to disable the table iterator on + // existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is + // necessary. + metadataTable = new DatanodeTable<>( + dbDef.getMetadataColumnFamily().getTable(this.store)); + checkTableStatus(metadataTable, metadataTable.getName()); + + // The block iterator this class returns will need to use the table + // iterator internally, so construct a block data table instance + // that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = + dbDef.getBlockDataColumnFamily().getTable(this.store); + + blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); + checkTableStatus(blockDataTable, blockDataTable.getName()); + + deletedBlocksTable = new DatanodeTable<>( + dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); + checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); + } finally { + IOUtils.closeQuietly(statistics); + } } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 0722dd20d998..9a47e40fc27d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -50,6 +50,7 @@ import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -83,6 +84,9 @@ public final class DBStoreBuilder { // The column family options that will be used for any column families // added by name only (without specifying options). private ManagedColumnFamilyOptions defaultCfOptions; + + private ManagedStatistics statistics; + private String dbname; private Path dbPath; private String dbJmxBeanNameName; @@ -222,6 +226,7 @@ public DBStore build() throws IOException { enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs, configuration, threadNamePrefix); } finally { + IOUtils.closeQuietly(statistics); tableConfigs.forEach(TableConfig::close); } } @@ -415,10 +420,9 @@ protected void log(InfoLogLevel infoLogLevel, String s) { // Create statistics. if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - try (ManagedStatistics statistics = new ManagedStatistics()) { - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - dbOptions.setStatistics(statistics); - } + statistics = new ManagedStatistics(); + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + dbOptions.setStatistics(statistics); } return dbOptions; From d051057619b451121d2935abfe2710377bb34a89 Mon Sep 17 00:00:00 2001 From: wanghongbing Date: Wed, 24 Jan 2024 23:06:59 +0800 Subject: [PATCH 3/3] Statistics closed in RDBStore#close --- .../metadata/AbstractDatanodeStore.java | 119 ++++++++---------- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 18 ++- .../apache/hadoop/hdds/utils/db/RDBStore.java | 12 +- .../hadoop/hdds/utils/db/TestRDBStore.java | 2 +- 4 files changed, 66 insertions(+), 85 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 65a75d618657..b451071d7030 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.BatchOperationHandler; @@ -33,14 +32,12 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; import org.rocksdb.InfoLogLevel; -import org.rocksdb.StatsLevel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,9 +47,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; /** * Implementation of the {@link DatanodeStore} interface that contains @@ -114,71 +108,56 @@ public void start(ConfigurationSource config) options.setMaxTotalWalSize(maxWalSize); } - String rocksDbStat = config.getTrimmed( - OZONE_METADATA_STORE_ROCKSDB_STATISTICS, - OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); - - ManagedStatistics statistics = null; - try { - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - options.setStatistics(statistics); - } - - DatanodeConfiguration dc = - config.getObject(DatanodeConfiguration.class); - // Config user log files - InfoLogLevel level = InfoLogLevel.valueOf( - dc.getRocksdbLogLevel() + "_LEVEL"); - options.setInfoLogLevel(level); - options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); - options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); - - if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { - options.setDeleteObsoleteFilesPeriodMicros( - dc.getRocksdbDeleteObsoleteFilesPeriod()); - - // For V3, all Rocksdb dir has the same "container.db" name. So use - // parentDirName(storage UUID)-dbDirName as db metrics name - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + - dbDef.getName()) - .build(); - } else { - this.store = DBStoreBuilder.newBuilder(config, dbDef) - .setDBOptions(options) - .setDefaultCFOptions(cfOptions) - .setOpenReadOnly(openReadOnly) - .build(); - } - - // Use the DatanodeTable wrapper to disable the table iterator on - // existing Table implementations retrieved from the DBDefinition. - // See the DatanodeTable's Javadoc for an explanation of why this is - // necessary. - metadataTable = new DatanodeTable<>( - dbDef.getMetadataColumnFamily().getTable(this.store)); - checkTableStatus(metadataTable, metadataTable.getName()); - - // The block iterator this class returns will need to use the table - // iterator internally, so construct a block data table instance - // that does not have the iterator disabled by DatanodeTable. - blockDataTableWithIterator = - dbDef.getBlockDataColumnFamily().getTable(this.store); - - blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); - checkTableStatus(blockDataTable, blockDataTable.getName()); - - deletedBlocksTable = new DatanodeTable<>( - dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); - checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); - } finally { - IOUtils.closeQuietly(statistics); + DatanodeConfiguration dc = + config.getObject(DatanodeConfiguration.class); + // Config user log files + InfoLogLevel level = InfoLogLevel.valueOf( + dc.getRocksdbLogLevel() + "_LEVEL"); + options.setInfoLogLevel(level); + options.setMaxLogFileSize(dc.getRocksdbLogMaxFileSize()); + options.setKeepLogFileNum(dc.getRocksdbLogMaxFileNum()); + + if (this.dbDef instanceof DatanodeSchemaThreeDBDefinition) { + options.setDeleteObsoleteFilesPeriodMicros( + dc.getRocksdbDeleteObsoleteFilesPeriod()); + + // For V3, all Rocksdb dir has the same "container.db" name. So use + // parentDirName(storage UUID)-dbDirName as db metrics name + this.store = DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) + .setDBJmxBeanNameName(dbDef.getDBLocation(config).getName() + "-" + + dbDef.getName()) + .build(); + } else { + this.store = DBStoreBuilder.newBuilder(config, dbDef) + .setDBOptions(options) + .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) + .build(); } + + // Use the DatanodeTable wrapper to disable the table iterator on + // existing Table implementations retrieved from the DBDefinition. + // See the DatanodeTable's Javadoc for an explanation of why this is + // necessary. + metadataTable = new DatanodeTable<>( + dbDef.getMetadataColumnFamily().getTable(this.store)); + checkTableStatus(metadataTable, metadataTable.getName()); + + // The block iterator this class returns will need to use the table + // iterator internally, so construct a block data table instance + // that does not have the iterator disabled by DatanodeTable. + blockDataTableWithIterator = + dbDef.getBlockDataColumnFamily().getTable(this.store); + + blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); + checkTableStatus(blockDataTable, blockDataTable.getName()); + + deletedBlocksTable = new DatanodeTable<>( + dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); + checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 9a47e40fc27d..32fcbfec6e44 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -50,7 +50,6 @@ import static org.rocksdb.RocksDB.DEFAULT_COLUMN_FAMILY; import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -84,7 +83,7 @@ public final class DBStoreBuilder { // The column family options that will be used for any column families // added by name only (without specifying options). private ManagedColumnFamilyOptions defaultCfOptions; - + // Initialize the Statistics instance if ROCKSDB_STATISTICS enabled private ManagedStatistics statistics; private String dbname; @@ -192,6 +191,11 @@ private void setDBOptionsProps(ManagedDBOptions dbOptions) { if (maxNumberOfOpenFiles != null) { dbOptions.setMaxOpenFiles(maxNumberOfOpenFiles); } + if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { + statistics = new ManagedStatistics(); + statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); + dbOptions.setStatistics(statistics); + } } /** @@ -221,12 +225,11 @@ public DBStore build() throws IOException { throw new IOException("The DB destination directory should exist."); } - return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs, + return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, tableConfigs, registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName, enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs, configuration, threadNamePrefix); } finally { - IOUtils.closeQuietly(statistics); tableConfigs.forEach(TableConfig::close); } } @@ -418,13 +421,6 @@ protected void log(InfoLogLevel infoLogLevel, String s) { dbOptions.setWalTtlSeconds(rocksDBConfiguration.getWalTTL()); dbOptions.setWalSizeLimitMB(rocksDBConfiguration.getWalSizeLimit()); - // Create statistics. - if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { - statistics = new ManagedStatistics(); - statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); - dbOptions.setStatistics(statistics); - } - return dbOptions; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 47000f8cbc41..6760eb47f486 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily; import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.hdds.utils.db.managed.ManagedTransactionLogIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; @@ -77,10 +78,11 @@ public class RDBStore implements DBStore { // number in request to avoid increase in heap memory. private final long maxDbUpdatesSizeThreshold; private final ManagedDBOptions dbOptions; + private final ManagedStatistics statistics; private final String threadNamePrefix; @SuppressWarnings("parameternumber") - public RDBStore(File dbFile, ManagedDBOptions dbOptions, + public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics statistics, ManagedWriteOptions writeOptions, Set families, CodecRegistry registry, boolean readOnly, int maxFSSnapshots, String dbJmxBeanName, boolean enableCompactionDag, @@ -97,6 +99,7 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, codecRegistry = registry; dbLocation = dbFile; this.dbOptions = dbOptions; + this.statistics = statistics; try { if (enableCompactionDag) { @@ -119,8 +122,8 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, if (dbJmxBeanName == null) { dbJmxBeanName = dbFile.getName(); } - metrics = RocksDBStoreMetrics.create(dbOptions.statistics(), db, - dbJmxBeanName); + // Use statistics instead of dbOptions.statistics() to avoid repeated init. + metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName); if (metrics == null) { LOG.warn("Metrics registration failed during RocksDB init, " + "db path :{}", dbJmxBeanName); @@ -231,6 +234,9 @@ public void close() throws IOException { RocksDBCheckpointDifferHolder .invalidateCacheEntry(rocksDBCheckpointDiffer.getMetadataDir()); } + if (statistics != null) { + IOUtils.close(LOG, statistics); + } IOUtils.close(LOG, db); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java index 7724835957fe..ee589ca8a370 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java @@ -65,7 +65,7 @@ public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options, Set families, long maxDbUpdatesSizeThreshold) throws IOException { - return new RDBStore(dbFile, options, new ManagedWriteOptions(), families, + return new RDBStore(dbFile, options, null, new ManagedWriteOptions(), families, CodecRegistry.newBuilder().build(), false, 1000, null, false, maxDbUpdatesSizeThreshold, true, null, ""); }