Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ public final class SCMPerformanceMetrics implements MetricsSource {
private MutableRate deleteKeyFailureLatencyNs;
@Metric(about = "Latency for deleteKey success in nanoseconds")
private MutableRate deleteKeySuccessLatencyNs;
@Metric(about = "Latency for a successful allocateBlock call in nanoseconds")
private MutableRate allocateBlockSuccessLatencyNs;
@Metric(about = "Latency for a failed allocateBlock call in nanoseconds")
private MutableRate allocateBlockFailureLatencyNs;

public SCMPerformanceMetrics() {
this.registry = new MetricsRegistry(SOURCE_NAME);
Expand Down Expand Up @@ -78,6 +82,16 @@ public void getMetrics(MetricsCollector collector, boolean all) {
deleteKeySuccessLatencyNs.snapshot(recordBuilder, true);
deleteKeyFailure.snapshot(recordBuilder, true);
deleteKeyFailureLatencyNs.snapshot(recordBuilder, true);
allocateBlockSuccessLatencyNs.snapshot(recordBuilder, true);
allocateBlockFailureLatencyNs.snapshot(recordBuilder, true);
}

public void updateAllocateBlockSuccessLatencyNs(long startNanos) {
allocateBlockSuccessLatencyNs.add(Time.monotonicNowNanos() - startNanos);
}

public void updateAllocateBlockFailureLatencyNs(long startNanos) {
allocateBlockFailureLatencyNs.add(Time.monotonicNowNanos() - startNanos);
}

public void updateDeleteKeySuccessStats(long startNanos) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
import org.apache.hadoop.util.Time;
import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -416,6 +417,7 @@ public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
@Override
public void openPipeline(PipelineID pipelineId)
throws IOException {
long startNanos = Time.monotonicNowNanos();
HddsProtos.PipelineID pipelineIdProtobuf = pipelineId.getProtobuf();
acquireWriteLock();
final Pipeline pipeline;
Expand All @@ -431,6 +433,7 @@ public void openPipeline(PipelineID pipelineId)
} finally {
releaseWriteLock();
}
metrics.updatePipelineCreationLatencyNs(startNanos);
metrics.incNumPipelineCreated();
metrics.createPerPipelineMetrics(pipeline);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,9 @@
import org.apache.hadoop.metrics2.lib.Interns;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.util.Time;

/**
* This class maintains Pipeline related metrics.
Expand All @@ -54,6 +56,7 @@ public final class SCMPipelineMetrics implements MetricsSource {
private @Metric MutableCounterLong numPipelineReportProcessed;
private @Metric MutableCounterLong numPipelineReportProcessingFailed;
private @Metric MutableCounterLong numPipelineContainSameDatanodes;
private @Metric MutableRate pipelineCreationLatencyNs;
private final Map<PipelineID, MutableCounterLong> numBlocksAllocated;
private final Map<PipelineID, MutableCounterLong> numBytesWritten;

Expand Down Expand Up @@ -100,6 +103,7 @@ public void getMetrics(MetricsCollector collector, boolean all) {
numPipelineReportProcessed.snapshot(recordBuilder, true);
numPipelineReportProcessingFailed.snapshot(recordBuilder, true);
numPipelineContainSameDatanodes.snapshot(recordBuilder, true);
pipelineCreationLatencyNs.snapshot(recordBuilder, true);
numBytesWritten
.forEach((pid, metric) -> metric.snapshot(recordBuilder, true));
numBlocksAllocated
Expand Down Expand Up @@ -208,4 +212,8 @@ void incNumPipelineReportProcessingFailed() {
void incNumPipelineContainSameDatanodes() {
numPipelineContainSameDatanodes.incr();
}

public void updatePipelineCreationLatencyNs(long startNanos) {
pipelineCreationLatencyNs.add(Time.monotonicNowNanos() - startNanos);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ public List<AllocatedBlock> allocateBlock(
String owner, ExcludeList excludeList,
String clientMachine
) throws IOException {
long startNanos = Time.monotonicNowNanos();
Map<String, String> auditMap = Maps.newHashMap();
auditMap.put("size", String.valueOf(size));
auditMap.put("num", String.valueOf(num));
Expand Down Expand Up @@ -234,17 +235,21 @@ public List<AllocatedBlock> allocateBlock(
AUDIT.logWriteFailure(buildAuditMessageForFailure(
SCMAction.ALLOCATE_BLOCK, auditMap, null)
);
perfMetrics.updateAllocateBlockFailureLatencyNs(startNanos);
} else {
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
SCMAction.ALLOCATE_BLOCK, auditMap));
perfMetrics.updateAllocateBlockSuccessLatencyNs(startNanos);
}

return blocks;
} catch (TimeoutException ex) {
perfMetrics.updateAllocateBlockFailureLatencyNs(startNanos);
AUDIT.logWriteFailure(buildAuditMessageForFailure(
SCMAction.ALLOCATE_BLOCK, auditMap, ex));
throw new IOException(ex);
} catch (Exception ex) {
perfMetrics.updateAllocateBlockFailureLatencyNs(startNanos);
AUDIT.logWriteFailure(buildAuditMessageForFailure(
SCMAction.ALLOCATE_BLOCK, auditMap, ex));
throw ex;
Expand Down