diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java new file mode 100644 index 000000000000..abe7d22c3ef8 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBMetrics.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdds.utils.db; + +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MutableCounterLong; + +import com.google.common.annotations.VisibleForTesting; + +/** + * Class to hold RocksDB metrics. + */ +public class RDBMetrics { + + private static final String SOURCE_NAME = + RDBMetrics.class.getSimpleName(); + + public RDBMetrics() { + } + + public static RDBMetrics create() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + return ms.register(SOURCE_NAME, + "Rocks DB Metrics", + new RDBMetrics()); + } + + private @Metric MutableCounterLong numDBKeyMayExistChecks; + private @Metric MutableCounterLong numDBKeyMayExistMisses; + + + public void incNumDBKeyMayExistChecks() { + numDBKeyMayExistChecks.incr(); + } + + public void incNumDBKeyMayExistMisses() { + numDBKeyMayExistMisses.incr(); + } + + + @VisibleForTesting + public long getNumDBKeyMayExistChecks() { + return numDBKeyMayExistChecks.value(); + } + + @VisibleForTesting + public long getNumDBKeyMayExistMisses() { + return numDBKeyMayExistMisses.value(); + } + + public void unRegister() { + MetricsSystem ms = DefaultMetricsSystem.instance(); + ms.unregisterSource(SOURCE_NAME); + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 0e3c20837b62..e8df8f225dfc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -67,6 +67,7 @@ public class RDBStore implements DBStore { private RDBCheckpointManager checkPointManager; private String checkpointsParentDir; private List columnFamilyHandles; + private RDBMetrics rdbMetrics; @VisibleForTesting public RDBStore(File dbFile, DBOptions options, @@ -131,6 +132,7 @@ public RDBStore(File dbFile, DBOptions options, //Initialize checkpoint manager checkPointManager = new RDBCheckpointManager(db, "om"); + rdbMetrics = RDBMetrics.create(); } catch (RocksDBException e) { throw toIOException( @@ -178,6 +180,7 @@ public void close() throws IOException { statMBeanName = null; } + rdbMetrics.unRegister(); if (db != null) { db.close(); } @@ -252,7 +255,7 @@ public Table getTable(String name) throws IOException { if (handle == null) { throw new IOException("No such table in this DB. TableName : " + name); } - return new RDBTable(this.db, handle, this.writeOptions); + return new RDBTable(this.db, handle, this.writeOptions, rdbMetrics); } @Override @@ -274,7 +277,7 @@ public Table getTable(String name, public ArrayList listTables() throws IOException { ArrayList
returnList = new ArrayList<>(); for (ColumnFamilyHandle handle : handleTable.values()) { - returnList.add(new RDBTable(db, handle, writeOptions)); + returnList.add(new RDBTable(db, handle, writeOptions, rdbMetrics)); } return returnList; } @@ -378,4 +381,7 @@ public RocksDB getDb() { return db; } + public RDBMetrics getMetrics() { + return rdbMetrics; + } } \ No newline at end of file diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java index 49ccc020922f..56083a56487c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java @@ -48,6 +48,7 @@ class RDBTable implements Table { private final RocksDB db; private final ColumnFamilyHandle handle; private final WriteOptions writeOptions; + private final RDBMetrics rdbMetrics; /** * Constructs a TableStore. @@ -57,10 +58,11 @@ class RDBTable implements Table { * @param writeOptions - RocksDB write Options. */ RDBTable(RocksDB db, ColumnFamilyHandle handle, - WriteOptions writeOptions) { + WriteOptions writeOptions, RDBMetrics rdbMetrics) { this.db = db; this.handle = handle; this.writeOptions = writeOptions; + this.rdbMetrics = rdbMetrics; } /** @@ -125,8 +127,18 @@ public boolean isExist(byte[] key) throws IOException { // RocksDB#keyMayExist // If the key definitely does not exist in the database, then this // method returns false, else true. - return db.keyMayExist(handle, key, new StringBuilder()) - && db.get(handle, key) != null; + rdbMetrics.incNumDBKeyMayExistChecks(); + StringBuilder outValue = new StringBuilder(); + boolean keyMayExist = db.keyMayExist(handle, key, outValue); + if (keyMayExist) { + boolean keyExists = (outValue.length() > 0) || + (db.get(handle, key) != null); + if (!keyExists) { + rdbMetrics.incNumDBKeyMayExistMisses(); + } + return keyExists; + } + return false; } catch (RocksDBException e) { throw toIOException( "Error in accessing DB. ", e); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 788883dbbfff..364e537bc0de 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.utils.db; +import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -232,20 +233,58 @@ public void forEachAndIterator() throws Exception { @Test public void testIsExist() throws Exception { - try (Table testTable = rdbStore.getTable("Seventh")) { - byte[] key = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); - byte[] value = - RandomStringUtils.random(10).getBytes(StandardCharsets.UTF_8); + DBOptions rocksDBOptions = new DBOptions(); + rocksDBOptions.setCreateIfMissing(true); + rocksDBOptions.setCreateMissingColumnFamilies(true); + + String tableName = DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY); + + Set configSet = new HashSet<>(); + TableConfig newConfig = new TableConfig(tableName, + new ColumnFamilyOptions()); + configSet.add(newConfig); + + File rdbLocation = folder.newFolder(); + RDBStore dbStore = new RDBStore(rdbLocation, rocksDBOptions, configSet); + + byte[] key = RandomStringUtils.random(10, true, false) + .getBytes(StandardCharsets.UTF_8); + byte[] value = RandomStringUtils.random(10, true, false) + .getBytes(StandardCharsets.UTF_8); + + try (Table testTable = dbStore.getTable(tableName)) { testTable.put(key, value); + + // Test if isExist returns true for a key that definitely exists. Assert.assertTrue(testTable.isExist(key)); + // Test if isExist returns false for a key that has been deleted. testTable.delete(key); Assert.assertFalse(testTable.isExist(key)); byte[] invalidKey = RandomStringUtils.random(5).getBytes(StandardCharsets.UTF_8); + // Test if isExist returns false for a key that is definitely not present. Assert.assertFalse(testTable.isExist(invalidKey)); + + RDBMetrics rdbMetrics = dbStore.getMetrics(); + Assert.assertEquals(3, rdbMetrics.getNumDBKeyMayExistChecks()); + Assert.assertTrue(rdbMetrics.getNumDBKeyMayExistMisses() == 0); + + // Reinsert key for further testing. + testTable.put(key, value); + } + + dbStore.close(); + rocksDBOptions = new DBOptions(); + rocksDBOptions.setCreateIfMissing(true); + rocksDBOptions.setCreateMissingColumnFamilies(true); + dbStore = new RDBStore(rdbLocation, rocksDBOptions, configSet); + try (Table testTable = dbStore.getTable(tableName)) { + // Verify isExist works with key not in block cache. + Assert.assertTrue(testTable.isExist(key)); + } finally { + dbStore.close(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 1aaf8873d58d..b2be6a07acb9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -113,5 +113,8 @@ protected void configureServlets() { void stop() throws Exception { LOG.info("Stopping Recon server"); httpServer.stop(); + OzoneManagerServiceProvider ozoneManagerServiceProvider = injector + .getInstance(OzoneManagerServiceProvider.class); + ozoneManagerServiceProvider.stop(); } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java index 3f57af6f5d4e..de9a9d6d7f16 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/OzoneManagerServiceProvider.java @@ -33,7 +33,7 @@ public interface OzoneManagerServiceProvider { /** * Stop the OM sync data. */ - void stop(); + void stop() throws Exception; /** * Return instance of OM Metadata manager. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 789b30168c82..fd3892afbbc4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -198,8 +198,9 @@ public void start() { } @Override - public void stop() { + public void stop() throws Exception { reconTaskController.stop(); + omMetadataManager.stop(); scheduler.shutdownNow(); }