diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 512c055e1000..77fa48e7327d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1518,6 +1518,16 @@
to the OM.
+
+
+ ozone.om.snapshot.rocksdb.metrics.enabled
+ false
+ OZONE, OM
+
+ Skip collecting RocksDBStore metrics for Snapshotted DB.
+
+
+
hdds.rest.http-address
0.0.0.0:9880
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index cdfae899e169..b46a742ac3d2 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -102,6 +102,7 @@ public final class DBStoreBuilder {
private final DBProfile defaultCfProfile;
private boolean enableCompactionDag;
private boolean createCheckpointDirs = true;
+ private boolean enableRocksDbMetrics = true;
// this is to track the total size of dbUpdates data since sequence
// number in request to avoid increase in heap memory.
private long maxDbUpdatesSizeThreshold;
@@ -232,7 +233,7 @@ public DBStore build() throws IOException {
return new RDBStore(dbFile, rocksDBOption, statistics, writeOptions, tableConfigs,
registry.build(), openReadOnly, maxFSSnapshots, dbJmxBeanNameName,
enableCompactionDag, maxDbUpdatesSizeThreshold, createCheckpointDirs,
- configuration, threadNamePrefix);
+ configuration, threadNamePrefix, enableRocksDbMetrics);
} finally {
tableConfigs.forEach(TableConfig::close);
}
@@ -302,6 +303,11 @@ public DBStoreBuilder setCreateCheckpointDirs(boolean createCheckpointDirs) {
this.createCheckpointDirs = createCheckpointDirs;
return this;
}
+
+ public DBStoreBuilder setEnableRocksDbMetrics(boolean enableRocksDbMetrics) {
+ this.enableRocksDbMetrics = enableRocksDbMetrics;
+ return this;
+ }
/**
* Set the {@link ManagedDBOptions} and default
* {@link ManagedColumnFamilyOptions} based on {@code prof}.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index de2627fa7aa0..40d3507d2eb4 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -83,7 +83,8 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati
String dbJmxBeanName, boolean enableCompactionDag,
long maxDbUpdatesSizeThreshold,
boolean createCheckpointDirs,
- ConfigurationSource configuration, String threadNamePrefix)
+ ConfigurationSource configuration, String threadNamePrefix,
+ boolean enableRocksDBMetrics)
throws IOException {
this.threadNamePrefix = threadNamePrefix;
@@ -118,13 +119,18 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati
dbJmxBeanName = dbFile.getName();
}
// Use statistics instead of dbOptions.statistics() to avoid repeated init.
- metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName);
- if (metrics == null) {
- LOG.warn("Metrics registration failed during RocksDB init, " +
+ if (!enableRocksDBMetrics) {
+ LOG.debug("Skipped Metrics registration during RocksDB init, " +
"db path :{}", dbJmxBeanName);
} else {
- LOG.debug("Metrics registration succeed during RocksDB init, " +
- "db path :{}", dbJmxBeanName);
+ metrics = RocksDBStoreMetrics.create(statistics, db, dbJmxBeanName);
+ if (metrics == null) {
+ LOG.warn("Metrics registration failed during RocksDB init, " +
+ "db path :{}", dbJmxBeanName);
+ } else {
+ LOG.debug("Metrics registration succeed during RocksDB init, " +
+ "db path :{}", dbJmxBeanName);
+ }
}
// Create checkpoints and snapshot directories if not exists.
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index 165dc936b8f2..e2fa36775e40 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -63,7 +63,7 @@ public static RDBStore newRDBStore(File dbFile, ManagedDBOptions options,
throws IOException {
return new RDBStore(dbFile, options, null, new ManagedWriteOptions(), families,
CodecRegistry.newBuilder().build(), false, 1000, null, false,
- maxDbUpdatesSizeThreshold, true, null, "");
+ maxDbUpdatesSizeThreshold, true, null, "", true);
}
public static final int MAX_DB_UPDATES_SIZE_THRESHOLD = 80;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index 3a09ae6f20b6..7e80766c7fe5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -172,6 +172,10 @@ private OMConfigKeys() {
public static final String OZONE_OM_METRICS_SAVE_INTERVAL =
"ozone.om.save.metrics.interval";
public static final String OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT = "5m";
+ public static final String OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED =
+ "ozone.om.snapshot.rocksdb.metrics.enabled";
+ public static final boolean
+ OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT = false;
/**
* OM Ratis related configurations.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 6693b9fe3482..240c26fa14b0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -27,6 +27,8 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_CHECKPOINT_DIR_CREATION_POLL_TIMEOUT_DEFAULT;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
@@ -392,7 +394,8 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name)
omEpoch = 0;
int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT);
- setStore(loadDB(conf, dir, name, true, Optional.of(Boolean.TRUE), maxOpenFiles, false, false));
+ setStore(loadDB(conf, dir, name, true, Optional.of(Boolean.TRUE),
+ maxOpenFiles, false, false, true));
initializeOmTables(CacheType.PARTIAL_CACHE, false);
perfMetrics = null;
}
@@ -425,7 +428,9 @@ private OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name)
checkSnapshotDirExist(checkpoint);
}
setStore(loadDB(conf, metaDir, dbName, false,
- java.util.Optional.of(Boolean.TRUE), maxOpenFiles, false, false));
+ java.util.Optional.of(Boolean.TRUE), maxOpenFiles, false, false,
+ conf.getBoolean(OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED,
+ OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED_DEFAULT)));
initializeOmTables(CacheType.PARTIAL_CACHE, false);
} catch (IOException e) {
stop();
@@ -571,16 +576,18 @@ public void start(OzoneConfiguration configuration) throws IOException {
}
public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, int maxOpenFiles) throws IOException {
- return loadDB(configuration, metaDir, OM_DB_NAME, false, java.util.Optional.empty(), maxOpenFiles, true, true);
+ return loadDB(configuration, metaDir, OM_DB_NAME, false,
+ java.util.Optional.empty(), maxOpenFiles, true, true, true);
}
@SuppressWarnings("checkstyle:parameternumber")
public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
- String dbName, boolean readOnly,
- java.util.Optional disableAutoCompaction,
- int maxOpenFiles,
- boolean enableCompactionDag,
- boolean createCheckpointDirs)
+ String dbName, boolean readOnly,
+ java.util.Optional disableAutoCompaction,
+ int maxOpenFiles,
+ boolean enableCompactionDag,
+ boolean createCheckpointDirs,
+ boolean enableRocksDBMetrics)
throws IOException {
final int maxFSSnapshots = configuration.getInt(
OZONE_OM_FS_SNAPSHOT_MAX_LIMIT, OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT);
@@ -593,7 +600,8 @@ public static DBStore loadDB(OzoneConfiguration configuration, File metaDir,
.setMaxFSSnapshots(maxFSSnapshots)
.setEnableCompactionDag(enableCompactionDag)
.setCreateCheckpointDirs(createCheckpointDirs)
- .setMaxNumberOfOpenFiles(maxOpenFiles);
+ .setMaxNumberOfOpenFiles(maxOpenFiles)
+ .setEnableRocksDbMetrics(enableRocksDBMetrics);
disableAutoCompaction.ifPresent(
dbStoreBuilder::disableDefaultCFAutoCompaction);
return addOMTablesAndCodecs(dbStoreBuilder).build();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index 49ed69f51f33..925facb48c8f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -59,11 +59,13 @@
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
+import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
+import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
@@ -78,6 +80,7 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.io.TempDir;
+import org.slf4j.event.Level;
/**
* Unit test ozone snapshot manager.
@@ -108,6 +111,8 @@ void init(@TempDir File tempDir) throws Exception {
// Only allow one entry in cache so each new one causes an eviction
configuration.setInt(
OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, 1);
+ configuration.setBoolean(
+ OMConfigKeys.OZONE_OM_SNAPSHOT_ROCKSDB_METRICS_ENABLED, false);
OmTestManagers omTestManagers = new OmTestManagers(configuration);
om = omTestManagers.getOzoneManager();
@@ -135,8 +140,12 @@ public void testSnapshotFeatureFlagSafetyCheck() throws IOException {
}
@Test
- public void testCloseOnEviction() throws IOException {
+ public void testCloseOnEviction() throws IOException,
+ InterruptedException, TimeoutException {
+ GenericTestUtils.setLogLevel(RDBStore.getLogger(), Level.DEBUG);
+ GenericTestUtils.LogCapturer logCapture =
+ GenericTestUtils.LogCapturer.captureLogs(RDBStore.getLogger());
// set up db tables
Table volumeTable = mock(Table.class);
Table bucketTable = mock(Table.class);
@@ -213,6 +222,12 @@ public void testCloseOnEviction() throws IOException {
// confirm store was closed
verify(firstSnapshotStore, timeout(3000).times(1)).close();
+
+ // Verify RocksDBStoreMetrics registration is skipped.
+ String msg = "Skipped Metrics registration during RocksDB init";
+ GenericTestUtils.waitFor(() -> {
+ return logCapture.getOutput().contains(msg);
+ }, 100, 30_000);
}
@BeforeEach