diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index 3e14b0fa260b..e71f6d725888 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -21,6 +21,8 @@ import java.io.IOException; import java.math.BigInteger; import java.security.cert.X509Certificate; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -100,6 +102,9 @@ public class SCMMetadataStoreImpl implements SCMMetadataStore { private DBStore store; private final OzoneConfiguration configuration; + private SCMMetadataStoreMetrics metrics; + private Map> tableMap = new ConcurrentHashMap<>(); + /** * Constructs the metadata store and starts the DB Services. * @@ -139,57 +144,62 @@ public void start(OzoneConfiguration config) deletedBlocksTable = DELETED_BLOCKS.getTable(this.store); - checkTableStatus(deletedBlocksTable, - DELETED_BLOCKS.getName()); + checkAndPopulateTable(deletedBlocksTable, DELETED_BLOCKS.getName()); validCertsTable = VALID_CERTS.getTable(store); - checkTableStatus(validCertsTable, VALID_CERTS.getName()); + checkAndPopulateTable(validCertsTable, VALID_CERTS.getName()); validSCMCertsTable = VALID_SCM_CERTS.getTable(store); - checkTableStatus(validSCMCertsTable, VALID_SCM_CERTS.getName()); + checkAndPopulateTable(validSCMCertsTable, VALID_SCM_CERTS.getName()); revokedCertsTable = REVOKED_CERTS.getTable(store); - checkTableStatus(revokedCertsTable, REVOKED_CERTS.getName()); + checkAndPopulateTable(revokedCertsTable, REVOKED_CERTS.getName()); revokedCertsV2Table = REVOKED_CERTS_V2.getTable(store); - checkTableStatus(revokedCertsV2Table, REVOKED_CERTS_V2.getName()); + checkAndPopulateTable(revokedCertsV2Table, REVOKED_CERTS_V2.getName()); pipelineTable = PIPELINES.getTable(store); - checkTableStatus(pipelineTable, PIPELINES.getName()); + checkAndPopulateTable(pipelineTable, PIPELINES.getName()); containerTable = CONTAINERS.getTable(store); - checkTableStatus(containerTable, CONTAINERS.getName()); + checkAndPopulateTable(containerTable, CONTAINERS.getName()); transactionInfoTable = TRANSACTIONINFO.getTable(store); - checkTableStatus(transactionInfoTable, TRANSACTIONINFO.getName()); + checkAndPopulateTable(transactionInfoTable, TRANSACTIONINFO.getName()); crlInfoTable = CRLS.getTable(store); + checkAndPopulateTable(crlInfoTable, CRLS.getName()); + crlSequenceIdTable = CRL_SEQUENCE_ID.getTable(store); + checkAndPopulateTable(crlInfoTable, CRL_SEQUENCE_ID.getName()); + sequenceIdTable = SEQUENCE_ID.getTable(store); - checkTableStatus(sequenceIdTable, SEQUENCE_ID.getName()); + checkAndPopulateTable(sequenceIdTable, SEQUENCE_ID.getName()); moveTable = MOVE.getTable(store); - checkTableStatus(moveTable, MOVE.getName()); + checkAndPopulateTable(moveTable, MOVE.getName()); metaTable = META.getTable(store); - checkTableStatus(moveTable, META.getName()); + checkAndPopulateTable(moveTable, META.getName()); statefulServiceConfigTable = STATEFUL_SERVICE_CONFIG.getTable(store); - checkTableStatus(statefulServiceConfigTable, + checkAndPopulateTable(statefulServiceConfigTable, STATEFUL_SERVICE_CONFIG.getName()); + + metrics = SCMMetadataStoreMetrics.create(this); } } @@ -199,6 +209,10 @@ public void stop() throws Exception { store.close(); store = null; } + if (metrics != null) { + metrics.unRegister(); + metrics = null; + } } @Override @@ -304,7 +318,8 @@ public Table getStatefulServiceConfigTable() { return statefulServiceConfigTable; } - private void checkTableStatus(Table table, String name) throws IOException { + private void checkAndPopulateTable(Table table, String name) + throws IOException { String logMessage = "Unable to get a reference to %s table. Cannot " + "continue."; String errMsg = "Inconsistent DB state, Table - %s. Please check the" + @@ -313,6 +328,14 @@ private void checkTableStatus(Table table, String name) throws IOException { LOG.error(String.format(logMessage, name)); throw new IOException(String.format(errMsg, name)); } + tableMap.put(name, table); + } + + Map> getTableMap() { + return tableMap; } + SCMMetadataStoreMetrics getMetrics() { + return metrics; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreMetrics.java new file mode 100644 index 000000000000..64be4dab7361 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreMetrics.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.metadata; + +import com.google.gson.Gson; +import org.apache.commons.text.WordUtils; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.ozone.OzoneConsts; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.metrics2.lib.Interns.info; + +/** + * Class contains metrics related to SCM Metadata Store. + */ +@Metrics(about = "SCM Metadata Store Metrics", context = OzoneConsts.OZONE) +public final class SCMMetadataStoreMetrics implements MetricsSource { + + private static final Logger LOG = + LoggerFactory.getLogger(SCMMetadataStoreMetrics.class); + + public static final String METRICS_SOURCE_NAME = + SCMMetadataStoreMetrics.class.getSimpleName(); + + private static final MetricsInfo ESTIMATED_KEY_COUNT = info( + "EstimatedKeyCount", + "Tracked estimated key count of all column families"); + + private MetricsRegistry registry; + private static SCMMetadataStoreMetrics instance; + + private SCMMetadataStoreImpl scmMetadataStore; + + private Map columnFamilyMetrics; + + public SCMMetadataStoreMetrics(SCMMetadataStoreImpl scmMetadataStoreImpl) { + this.registry = new MetricsRegistry(METRICS_SOURCE_NAME); + this.scmMetadataStore = scmMetadataStoreImpl; + + columnFamilyMetrics = scmMetadataStoreImpl.getTableMap().entrySet() + .stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> getMetricsInfo(e.getKey()))); + } + + public static synchronized SCMMetadataStoreMetrics create(SCMMetadataStoreImpl + scmMetadataStore) { + if (instance != null) { + return instance; + } + instance = DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, + "SCM Metadata store related metrics", + new SCMMetadataStoreMetrics(scmMetadataStore)); + return instance; + } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + MetricsRecordBuilder builder = collector.addRecord(METRICS_SOURCE_NAME); + + Map keyCountMap = new HashMap<>(); + for (Map.Entry entry: columnFamilyMetrics.entrySet()) { + long count = 0L; + try { + count = scmMetadataStore.getTableMap().get(entry.getKey()) + .getEstimatedKeyCount(); + } catch (IOException e) { + LOG.error("Can not get estimated key count for table {}", + entry.getKey(), e); + } + builder.addGauge(entry.getValue(), count); + keyCountMap.put(entry.getKey(), count); + } + + Gson gson = new Gson(); + builder.tag(ESTIMATED_KEY_COUNT, gson.toJson(keyCountMap)); + } + + public static synchronized void unRegister() { + instance = null; + DefaultMetricsSystem.instance().unregisterSource(METRICS_SOURCE_NAME); + } + + private MetricsInfo getMetricsInfo(String tableName) { + String name = WordUtils.capitalize(tableName); + String metric = name + "EstimatedKeyCount"; + String description = "Estimated key count in table of " + name; + return info(metric, description); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestSCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestSCMMetadataStoreImpl.java new file mode 100644 index 000000000000..0c96747d7489 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestSCMMetadataStoreImpl.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.metadata; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.apache.hadoop.test.MetricsAsserts.getLongGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.apache.hadoop.test.MetricsAsserts.getStringMetric; + + +/** + * Testing of SCMMetadataStoreImpl. + */ +public class TestSCMMetadataStoreImpl { + private OzoneConfiguration conf; + private SCMMetadataStore scmMetadataStore; + + @BeforeEach + public void setUp(@TempDir Path tempDir) throws Exception { + conf = SCMTestUtils.getConf(); + + scmMetadataStore = new SCMMetadataStoreImpl(conf); + scmMetadataStore.start(conf); + } + + @Test + public void testEstimatedKeyCount() { + Assertions.assertTrue(getString("EstimatedKeyCount") + .contains("\"sequenceId\":0")); + Assertions.assertEquals(0, getGauge("SequenceIdEstimatedKeyCount")); + + try { + scmMetadataStore.getSequenceIdTable().put("TestKey", 1L); + } catch (IOException e) { + // Ignore + } + + Assertions.assertTrue(getString("EstimatedKeyCount") + .contains("\"sequenceId\":1")); + Assertions.assertEquals(1, getGauge("SequenceIdEstimatedKeyCount")); + } + + private long getGauge(String metricName) { + return getLongGauge(metricName, + getMetrics(SCMMetadataStoreMetrics.METRICS_SOURCE_NAME)); + } + + private String getString(String metricName) { + return getStringMetric(metricName, + getMetrics(SCMMetadataStoreMetrics.METRICS_SOURCE_NAME)); + } +}