Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Query Per Second for each table in a RegionServer.
*/
@InterfaceAudience.Private
public interface MetricsTableQueryMeter {

/**
* Update table read QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableReadQueryMeter(TableName tableName, long count);

/**
* Update table read QPS
* @param tableName The table the metric is for
*/
void updateTableReadQueryMeter(TableName tableName);

/**
* Update table write QPS
* @param tableName The table the metric is for
* @param count Number of occurrences to record
*/
void updateTableWriteQueryMeter(TableName tableName, long count);

/**
* Update table write QPS
* @param tableName The table the metric is for
*/
void updateTableWriteQueryMeter(TableName tableName);
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry;
import org.apache.yetus.audience.InterfaceAudience;

Expand Down Expand Up @@ -171,4 +173,15 @@ public void updateScanSize(String tableName, long scanSize) {
public void updateScanTime(String tableName, long t) {
getOrCreateTableHistogram(tableName).updateScanTime(t);
}

@Override
public void getMetrics(MetricsCollector metricsCollector, boolean all) {
MetricsRecordBuilder mrb = metricsCollector.addRecord(metricsName);
// source is registered in supers constructor, sometimes called before the whole initialization.
metricsRegistry.snapshot(mrb, all);
if (metricsAdapter != null) {
// snapshot MetricRegistry as well
metricsAdapter.snapshotAllMetrics(registry, mrb);
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to you under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;

import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.yetus.audience.InterfaceAudience;

/**
* Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in
* a RegionServer.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Doc is wrong? Not for one table?

*/
@InterfaceAudience.Private
public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter {
private final Map<TableName, TableMeters> metersByTable = new ConcurrentHashMap<>();
private final MetricRegistry metricRegistry;

private final static String TABLE_READ_QUERY_PER_SECOND = "tableReadQueryPerSecond";
private final static String TABLE_WRITE_QUERY_PER_SECOND = "tableWriteQueryPerSecond";

public MetricsTableQueryMeterImpl(MetricRegistry metricRegistry) {
this.metricRegistry = metricRegistry;
}

private static class TableMeters {
final Meter tableReadQueryMeter;
final Meter tableWriteQueryMeter;

TableMeters(MetricRegistry metricRegistry, TableName tableName) {
this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName,
TABLE_READ_QUERY_PER_SECOND));
this.tableWriteQueryMeter =
metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND));
}

public void updateTableReadQueryMeter(long count) {
tableReadQueryMeter.mark(count);
}

public void updateTableReadQueryMeter() {
tableReadQueryMeter.mark();
}

public void updateTableWriteQueryMeter(long count) {
tableWriteQueryMeter.mark(count);
}

public void updateTableWriteQueryMeter() {
tableWriteQueryMeter.mark();
}
}

private static String qualifyMetricsName(TableName tableName, String metric) {
StringBuilder sb = new StringBuilder();
sb.append("Namespace_").append(tableName.getNamespaceAsString());
sb.append("_table_").append(tableName.getQualifierAsString());
sb.append("_metric_").append(metric);
return sb.toString();
}

private TableMeters getOrCreateTableMeter(TableName tableName) {
return metersByTable.computeIfAbsent(tableName, tbn -> new TableMeters(metricRegistry, tbn));
}

@Override
public void updateTableReadQueryMeter(TableName tableName, long count) {
getOrCreateTableMeter(tableName).updateTableReadQueryMeter(count);
}

@Override
public void updateTableReadQueryMeter(TableName tableName) {
getOrCreateTableMeter(tableName).updateTableReadQueryMeter();
}

@Override
public void updateTableWriteQueryMeter(TableName tableName, long count) {
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter(count);
}

@Override
public void updateTableWriteQueryMeter(TableName tableName) {
getOrCreateTableMeter(tableName).updateTableWriteQueryMeter();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -4083,6 +4083,10 @@ OperationStatus[] batchMutate(BatchOperation<?> batchOp) throws IOException {
requestFlushIfNeeded();
}
} finally {
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName(), batchOp.size());
}
batchOp.closeRegionOperation();
}
return batchOp.retCodeDetails;
Expand Down Expand Up @@ -6612,6 +6616,9 @@ public boolean nextRaw(List<Cell> outResults, ScannerContext scannerContext)
if (!outResults.isEmpty()) {
readRequestsCount.increment();
}
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateReadQueryMeter(getRegionInfo().getTable());
}

// If the size limit was reached it means a partial Result is being returned. Returning a
// partial Result means that we should not reset the filters; filters should only be reset in
Expand Down Expand Up @@ -7741,6 +7748,11 @@ public void processRowsWithLocks(RowProcessor<?,?> processor, long timeout,

// STEP 11. Release row lock(s)
releaseRowLocks(acquiredRowLocks);

if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName(), mutations.size());
}
}
success = true;
} finally {
Expand Down Expand Up @@ -7896,6 +7908,10 @@ private Result doDelta(Operation op, Mutation mutation, long nonceGroup, long no
rsServices.getNonceManager().addMvccToOperationContext(nonceGroup, nonce,
writeEntry.getWriteNumber());
}
if (rsServices != null && rsServices.getMetrics() != null) {
rsServices.getMetrics().updateWriteQueryMeter(this.htableDescriptor.
getTableName());
}
writeEntry = null;
} finally {
this.updatesLock.readLock().unlock();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.Meter;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.metrics.Timer;
Expand Down Expand Up @@ -51,6 +52,8 @@ public class MetricsRegionServer {

private MetricRegistry metricRegistry;
private Timer bulkLoadTimer;
private Meter serverReadQueryMeter;
private Meter serverWriteQueryMeter;

public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Configuration conf,
MetricsTable metricsTable) {
Expand All @@ -68,6 +71,8 @@ public MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper, Confi
bulkLoadTimer = metricRegistry.timer("Bulkload");

quotaSource = CompatibilitySingletonFactory.getInstance(MetricsRegionServerQuotaSource.class);
serverReadQueryMeter = metricRegistry.meter("ServerReadQueryPerSecond");
serverWriteQueryMeter = metricRegistry.meter("ServerWriteQueryPerSecond");
}

MetricsRegionServer(MetricsRegionServerWrapper regionServerWrapper,
Expand Down Expand Up @@ -248,4 +253,32 @@ public void incrementNumRegionSizeReportsSent(long numReportsSent) {
public void incrementRegionSizeReportingChoreTime(long time) {
quotaSource.incrementRegionSizeReportingChoreTime(time);
}

public void updateReadQueryMeter(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQueryMeter(tn, count);
}
this.serverReadQueryMeter.mark(count);
}

public void updateReadQueryMeter(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableReadQueryMeter(tn);
}
this.serverReadQueryMeter.mark();
}

public void updateWriteQueryMeter(TableName tn, long count) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn, count);
}
this.serverWriteQueryMeter.mark(count);
}

public void updateWriteQueryMeter(TableName tn) {
if (tableMetrics != null && tn != null) {
tableMetrics.updateTableWriteQueryMeter(tn);
}
this.serverWriteQueryMeter.mark();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.metrics.MetricRegistries;
import org.apache.yetus.audience.InterfaceAudience;

/**
Expand All @@ -28,9 +29,12 @@
public class RegionServerTableMetrics {

private final MetricsTableLatencies latencies;
private final MetricsTableQueryMeter queryMeter;

public RegionServerTableMetrics() {
latencies = CompatibilitySingletonFactory.getInstance(MetricsTableLatencies.class);
queryMeter = new MetricsTableQueryMeterImpl(MetricRegistries.global().
get(((MetricsTableLatenciesImpl) latencies).getMetricRegistryInfo()).get());
}

public void updatePut(TableName table, long time) {
Expand Down Expand Up @@ -68,4 +72,20 @@ public void updateScanTime(TableName table, long time) {
public void updateScanSize(TableName table, long size) {
latencies.updateScanSize(table.getNameAsString(), size);
}

public void updateTableReadQueryMeter(TableName table, long count) {
queryMeter.updateTableReadQueryMeter(table, count);
}

public void updateTableReadQueryMeter(TableName table) {
queryMeter.updateTableReadQueryMeter(table);
}

public void updateTableWriteQueryMeter(TableName table, long count) {
queryMeter.updateTableWriteQueryMeter(table, count);
}

public void updateTableWriteQueryMeter(TableName table) {
queryMeter.updateTableWriteQueryMeter(table);
}
}