diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 67e651a7361d..3ffc19f0fd55 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -16,10 +16,12 @@ */ package org.apache.hadoop.hbase.regionserver; +import org.apache.hadoop.hbase.metrics.BaseSource; + /** * Latency metrics for a specific table in a RegionServer. */ -public interface MetricsTableLatencies { +public interface MetricsTableLatencies extends BaseSource { /** * The name of the metrics diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPS.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPS.java new file mode 100644 index 000000000000..667cf53d4f24 --- /dev/null +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPS.java @@ -0,0 +1,33 @@ +package org.apache.hadoop.hbase.regionserver; + +public interface MetricsTableQPS { + + String TABLE_READ_QPS = "tableReadQPS"; + String TABLE_WRITE_QPS = "tableWriteQPS"; + + /** + * Update table read QPS + * @param tableName The table the metric is for + * @param count Number of occurrences to record + */ + void updateTableReadQPS(String tableName, long count); + + /** + * Update table read QPS + * @param tableName The table the metric is for + */ + void updateTableReadQPS(String tableName); + + /** + * Update table write QPS + * @param tableName The table the metric is for + * @param count Number of occurrences to record + */ + void updateTableWriteQPS(String tableName, long count); + + /** + * Update table write QPS + * @param tableName The table the metric is for + */ + void updateTableWriteQPS(String tableName); +} diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index 2c052f2a87b9..0ee8c559a3f3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; import com.google.common.annotations.VisibleForTesting; @@ -172,4 +174,14 @@ public void updateScanSize(String tableName, long scanSize) { public void updateScanTime(String tableName, long t) { getOrCreateTableHistogram(tableName).updateScanTime(t); } + + @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { + MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName); + // source is registered in supers constructor, sometimes called before the whole initialization. + metricsRegistry.snapshot(mrb, all); + if (metricsAdapter != null) { + // snapshot MetricRegistry as well + metricsAdapter.snapshotAllMetrics(registry, mrb); + } + } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPSImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPSImpl.java new file mode 100644 index 000000000000..fa759641e8f6 --- /dev/null +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQPSImpl.java @@ -0,0 +1,81 @@ +package org.apache.hadoop.hbase.regionserver; + +import java.util.HashMap; + +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.metrics.Meter; +import org.apache.hadoop.hbase.metrics.MetricRegistry; + +@InterfaceAudience.Private +public class MetricsTableQPSImpl implements MetricsTableQPS { + + private final HashMap metersByTable = new HashMap<>(); + private final MetricRegistry metricRegistry; + + public MetricsTableQPSImpl(MetricRegistry metricRegistry) { + this.metricRegistry = metricRegistry; + } + + public static class TableMeters { + final Meter tableReadQPSMeter; + final Meter tableWriteQPSMeter; + + TableMeters(MetricRegistry metricRegistry, TableName tableName) { + this.tableReadQPSMeter = metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QPS)); + this.tableWriteQPSMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QPS)); + } + + public void updateTableReadQPS(long count) { + tableReadQPSMeter.mark(count); + } + public void updateTableReadQPS() { + tableReadQPSMeter.mark(); + } + public void updateTableWriteQPS(long count) { + tableWriteQPSMeter.mark(count); + } + public void updateTableWriteQPS() { + tableWriteQPSMeter.mark(); + } + } + + private static String qualifyMetricsName(TableName tableName, String metric) { + StringBuilder sb = new StringBuilder(); + sb.append("Namespace_").append(tableName.getNamespaceAsString()); + sb.append("_table_").append(tableName.getQualifierAsString()); + sb.append("_metric_").append(metric); + return sb.toString(); + } + + private TableMeters getOrCreateTableMeter(String tableName) { + final TableName tn = TableName.valueOf(tableName); + TableMeters meter = metersByTable.get(tn); + if (meter == null) { + meter = new TableMeters(metricRegistry, tn); + metersByTable.put(tn, meter); + } + return meter; + } + + @Override + public void updateTableReadQPS(String tableName, long count) { + getOrCreateTableMeter(tableName).updateTableReadQPS(count); + } + + @Override + public void updateTableReadQPS(String tableName) { + getOrCreateTableMeter(tableName).updateTableReadQPS(); + } + + @Override + public void updateTableWriteQPS(String tableName, long count) { + getOrCreateTableMeter(tableName).updateTableWriteQPS(count); + } + + @Override + public void updateTableWriteQPS(String tableName) { + getOrCreateTableMeter(tableName).updateTableWriteQPS(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 1157eba52988..7560b3e1a01d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -3149,6 +3149,10 @@ OperationStatus[] batchMutate(BatchOperationInProgress batchOp) throws IOExce if (!initialized) { this.writeRequestsCount.add(batchOp.operations.length); + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerWriteQPS(this.htableDescriptor.getTableName(), batchOp.operations.length); + } if (!batchOp.isInReplay()) { doPreMutationHook(batchOp); } @@ -5851,7 +5855,10 @@ public boolean bulkLoadHFiles(Collection> familyPaths, bool boolean isSuccessful = false; try { this.writeRequestsCount.increment(); - + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerWriteQPS(this.htableDescriptor.getTableName()); + } // There possibly was a split that happened between when the split keys // were gathered and before the HRegion's write lock was taken. We need // to validate the HFile region before attempting to bulk load all of them @@ -6246,6 +6253,10 @@ public boolean nextRaw(List outResults, ScannerContext scannerContext) // scanner is closed throw new UnknownScannerException("Scanner was closed"); } + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerReadQPS(getRegionInfo().getTable()); + } boolean moreValues = false; if (outResults.isEmpty()) { // Usually outResults is empty. This is true when next is called @@ -7590,6 +7601,10 @@ public void processRowsWithLocks(RowProcessor processor, long timeout, if (!mutations.isEmpty()) { writeRequestsCount.add(mutations.size()); + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerWriteQPS(this.htableDescriptor.getTableName(), mutations.size()); + } // 5. Call the preBatchMutate hook processor.preBatchMutate(this, walEdit); @@ -7792,6 +7807,10 @@ public Result append(Append mutate, long nonceGroup, long nonce) throws IOExcept // Lock row startRegionOperation(op); this.writeRequestsCount.increment(); + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerWriteQPS(this.htableDescriptor.getTableName()); + } RowLock rowLock = null; WALKey walKey = null; boolean doRollBackMemstore = false; @@ -8061,6 +8080,10 @@ public Result increment(Increment mutation, long nonceGroup, long nonce) checkFamilies(mutation.getFamilyCellMap().keySet()); startRegionOperation(op); this.writeRequestsCount.increment(); + if (rsServices instanceof HRegionServer) { + ((HRegionServer) rsServices).getRegionServerMetrics(). + updateServerWriteQPS(this.htableDescriptor.getTableName()); + } try { // Which Increment is it? Narrow increment-only consistency or slow (default) and general // row-wide consistency. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java index 555b5d50db8d..1223c8743776 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServer.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.Timer; @@ -49,6 +50,8 @@ public class MetricsRegionServer { private MetricRegistry metricRegistry; private Timer bulkLoadTimer; + private Meter serverReadQPS; + private Meter serverWriteQPS; public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf) { this(regionServerWrapper, @@ -62,6 +65,8 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi // create and use metrics from the new hbase-metrics based registry. bulkLoadTimer = metricRegistry.timer("Bulkload"); + serverReadQPS = metricRegistry.meter("ServerReadQPS"); + serverWriteQPS = metricRegistry.meter("ServerWriteQPS"); } MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, @@ -211,4 +216,32 @@ public void updateCompaction(boolean isMajor, long t, int inputFileCount, int ou public void updateBulkLoad(long millis) { this.bulkLoadTimer.updateMillis(millis); } + + public void updateServerReadQPS(TableName tn, long count) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableReadQPS(tn, count); + } + this.serverReadQPS.mark(count); + } + + public void updateServerReadQPS(TableName tn) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableReadQPS(tn); + } + this.serverReadQPS.mark(); + } + + public void updateServerWriteQPS(TableName tn, long count) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableWriteQPS(tn, count); + } + this.serverWriteQPS.mark(count); + } + + public void updateServerWriteQPS(TableName tn) { + if (tableMetrics != null && tn != null) { + tableMetrics.updateTableWriteQPS(tn); + } + this.serverWriteQPS.mark(); + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java index 571ef0e6d767..bcfc13be3a98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerTableMetrics.java @@ -19,6 +19,8 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.metrics.MetricRegistries; +import org.apache.hadoop.hbase.metrics.MetricRegistry; /** * Captures operation metrics by table. Separates metrics collection for table metrics away from @@ -28,9 +30,13 @@ public class RegionServerTableMetrics { private final MetricsTableLatencies latencies; + private MetricRegistry metricRegistry; + MetricsTableQPS qps; public RegionServerTableMetrics() { latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class); + metricRegistry = MetricRegistries.global().get(latencies.getMetricRegistryInfo()).get(); + qps = new MetricsTableQPSImpl(metricRegistry); } public void updatePut(TableName table, long time) { @@ -68,4 +74,20 @@ public void updateScanTime(TableName table, long time) { public void updateScanSize(TableName table, long size) { latencies.updateScanSize(table.getNameAsString(), size); } + + public void updateTableReadQPS(TableName table, long count) { + qps.updateTableReadQPS(table.getNameAsString(), count); + } + + public void updateTableReadQPS(TableName table) { + qps.updateTableReadQPS(table.getNameAsString()); + } + + public void updateTableWriteQPS(TableName table, long count) { + qps.updateTableWriteQPS(table.getNameAsString(), count); + } + + public void updateTableWriteQPS(TableName table) { + qps.updateTableWriteQPS(table.getNameAsString()); + } }