diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutorMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutorMetrics.java new file mode 100644 index 000000000000..096e1b94423e --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventExecutorMetrics.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *
+ * http://www.apache.org/licenses/LICENSE-2.0 + *
+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.server.events; + +/** + * Metrics for Event Executors. + */ +public interface EventExecutorMetrics { + /** + * get name. + */ + String getName(); + + /** + * stop metrics. + */ + void stop(); + + /** + * Increment the number of the failed events. + */ + void incrFailedEvents(long delta); + + + /** + * Increment the number of the processed events. + */ + void incrSuccessfulEvents(long delta); + + /** + * Increment the number of the not-yet processed events. + */ + void incrQueuedEvents(long delta); + + /** + * Increment the number of events scheduled to be processed. + */ + void incrScheduledEvents(long delta); + + /** + * Increment the number of dropped events to be processed. + */ + void incrDroppedEvents(long delta); + + /** + * Increment the number of events having long wait in queue + * crossing threshold. + */ + void incrLongWaitInQueueEvents(long delta); + + /** + * Increment the number of events having long execution crossing threshold. + */ + void incrLongTimeExecutionEvents(long delta); + + /** + * Return the number of the failed events. + */ + long failedEvents(); + + + /** + * Return the number of the processed events. + */ + long successfulEvents(); + + /** + * Return the number of the not-yet processed events. + */ + long queuedEvents(); + + /** + * Return the number of events scheduled to be processed. + */ + long scheduledEvents(); + + /** + * Return the number of dropped events to be processed. + */ + long droppedEvents(); + + /** + * Return the number of events having long wait in queue crossing threshold. + */ + long longWaitInQueueEvents(); + + /** + * Return the number of events having long execution crossing threshold. + */ + long longTimeExecutionEvents(); +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java index 4949195cddb1..b4cdfac16dda 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/FixedThreadPoolWithAffinityExecutor.java @@ -18,10 +18,6 @@ package org.apache.hadoop.hdds.server.events; import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.metrics2.annotation.Metric; -import org.apache.hadoop.metrics2.annotation.Metrics; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,12 +41,8 @@ * * @param
the payload type of events */ -@Metrics(context = "EventQueue") public class FixedThreadPoolWithAffinityExecutor
implements EventExecutor
{ - - private static final String EVENT_QUEUE = "EventQueue"; - private static final Logger LOG = LoggerFactory.getLogger(FixedThreadPoolWithAffinityExecutor.class); @@ -65,28 +57,8 @@ public class FixedThreadPoolWithAffinityExecutor
private final List
* Create FixedThreadPoolExecutor with affinity.
* Based on the payload's hash code, the payload will be scheduled to the
* same thread.
- *
- * @param name Unique name used in monitoring and metrics.
*/
public FixedThreadPoolWithAffinityExecutor(
- String name, EventHandler eventHandler,
+ EventHandler eventHandler,
List clazz, List handler, P message, EventPublisher
publisher) {
- queued.incr();
+ metrics.incrQueuedEvents(1L);
// For messages that need to be routed to the same thread need to
// implement hashCode to match the messages. This should be safe for
// other messages that implement the native hash.
@@ -172,42 +139,42 @@ public void onMessage(EventHandler handler, P message, EventPublisher
BlockingQueue
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.metrics;
+
+import org.apache.hadoop.hdds.server.events.EventExecutorMetrics;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+
+/**
+ * Metrics for Container Report Event Executors.
+ */
+@Metrics(context = "EventQueue")
+public class ContainerReportMetrics implements EventExecutorMetrics {
+ private static final String EVENT_QUEUE = "EventQueue";
+ private String name;
+
+ @Metric
+ private MutableCounterLong queued;
+
+ @Metric
+ private MutableCounterLong done;
+
+ @Metric
+ private MutableCounterLong failed;
+
+ @Metric
+ private MutableCounterLong scheduled;
+
+ @Metric
+ private MutableCounterLong dropped;
+
+ @Metric
+ private MutableCounterLong longWaitInQueue;
+
+ @Metric
+ private MutableCounterLong longTimeExecution;
+
+ public ContainerReportMetrics(String name) {
+ this.name = name;
+ DefaultMetricsSystem.instance().register(EVENT_QUEUE + this.name,
+ "Event Executor metrics ", this);
+ }
+
+ @Override
+ public void stop() {
+ DefaultMetricsSystem.instance().unregisterSource(EVENT_QUEUE + name);
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public void incrFailedEvents(long delta) {
+ failed.incr(delta);
+ }
+
+ @Override
+ public void incrSuccessfulEvents(long delta) {
+ done.incr(delta);
+ }
+
+ @Override
+ public void incrQueuedEvents(long delta) {
+ queued.incr(delta);
+ }
+
+ @Override
+ public void incrScheduledEvents(long delta) {
+ scheduled.incr(delta);
+ }
+
+ @Override
+ public void incrDroppedEvents(long delta) {
+ dropped.incr(delta);
+ }
+
+ @Override
+ public void incrLongWaitInQueueEvents(long delta) {
+ longWaitInQueue.incr(delta);
+ }
+
+ @Override
+ public void incrLongTimeExecutionEvents(long delta) {
+ longTimeExecution.incr(delta);
+ }
+
+ @Override
+ public long failedEvents() {
+ return failed.value();
+ }
+
+ @Override
+ public long successfulEvents() {
+ return done.value();
+ }
+
+ @Override
+ public long queuedEvents() {
+ return queued.value();
+ }
+
+ @Override
+ public long scheduledEvents() {
+ return scheduled.value();
+ }
+
+ @Override
+ public long droppedEvents() {
+ return dropped.value();
+ }
+
+ @Override
+ public long longWaitInQueueEvents() {
+ return longWaitInQueue.value();
+ }
+
+ @Override
+ public long longTimeExecutionEvents() {
+ return longTimeExecution.value();
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/IncrementalContainerReportMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/IncrementalContainerReportMetrics.java
new file mode 100644
index 000000000000..44627c777d26
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/metrics/IncrementalContainerReportMetrics.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.metrics;
+
+import org.apache.hadoop.metrics2.annotation.Metrics;
+
+/**
+ * Metrics for Container Report Event Executors.
+ */
+@Metrics(context = "EventQueue")
+public class IncrementalContainerReportMetrics extends ContainerReportMetrics {
+ public IncrementalContainerReportMetrics(String name) {
+ super(name);
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 733a418444fd..94462c52ad42 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -44,6 +44,8 @@
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerManagerImpl;
import org.apache.hadoop.hdds.scm.PlacementPolicyValidateProxy;
+import org.apache.hadoop.hdds.scm.container.metrics.ContainerReportMetrics;
+import org.apache.hadoop.hdds.scm.container.metrics.IncrementalContainerReportMetrics;
import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps;
import org.apache.hadoop.hdds.scm.container.replication.DatanodeCommandCountUpdatedHandler;
import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager;
@@ -80,6 +82,7 @@
import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
import org.apache.hadoop.hdds.server.OzoneAdmins;
import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.hdds.server.events.EventExecutorMetrics;
import org.apache.hadoop.hdds.server.events.FixedThreadPoolWithAffinityExecutor;
import org.apache.hadoop.hdds.server.http.RatisDropwizardExports;
import org.apache.hadoop.hdds.utils.HAUtils;
@@ -498,25 +501,26 @@ private void initializeEventHandlers() {
= FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(queues);
Map List
queue = workQueues.get(index);
queue.add((Q) message);
if (queue instanceof IQueueMetrics) {
- dropped.incr(((IQueueMetrics) queue).getAndResetDropCount(
+ metrics.incrDroppedEvents(((IQueueMetrics) queue).getAndResetDropCount(
message.getClass().getSimpleName()));
}
}
@Override
public long failedEvents() {
- return failed.value();
+ return metrics.failedEvents();
}
@Override
public long successfulEvents() {
- return done.value();
+ return metrics.successfulEvents();
}
@Override
public long queuedEvents() {
- return queued.value();
+ return metrics.queuedEvents();
}
@Override
public long scheduledEvents() {
- return scheduled.value();
+ return metrics.scheduledEvents();
}
@Override
public long droppedEvents() {
- return dropped.value();
+ return metrics.droppedEvents();
}
public long longWaitInQueueEvents() {
- return longWaitInQueue.value();
+ return metrics.longWaitInQueueEvents();
}
public long longTimeExecutionEvents() {
- return longTimeExecution.value();
+ return metrics.longTimeExecutionEvents();
}
@Override
@@ -217,7 +184,6 @@ public void close() {
executor.shutdown();
}
executorMap.clear();
- DefaultMetricsSystem.instance().unregisterSource(EVENT_QUEUE + name);
}
@Override
@@ -267,26 +233,26 @@ public void run() {
long curTime = Time.monotonicNow();
if (createTime != 0
&& ((curTime - createTime) > executor.queueWaitThreshold)) {
- executor.longWaitInQueue.incr();
+ executor.metrics.incrLongWaitInQueueEvents(1L);
LOG.warn("Event remained in queue for long time {} millisec, {}",
(curTime - createTime), eventId);
}
- executor.scheduled.incr();
+ executor.metrics.incrScheduledEvents(1L);
try {
executor.eventHandler.onMessage(report,
executor.eventPublisher);
- executor.done.incr();
+ executor.metrics.incrSuccessfulEvents(1L);
curTime = Time.monotonicNow();
if (createTime != 0
&& (curTime - createTime) > executor.execWaitThreshold) {
- executor.longTimeExecution.incr();
+ executor.metrics.incrLongTimeExecutionEvents(1L);
LOG.warn("Event taken long execution time {} millisec, {}",
(curTime - createTime), eventId);
}
} catch (Exception ex) {
LOG.error("Error on execution message {}", report, ex);
- executor.failed.incr();
+ executor.metrics.incrFailedEvents(1L);
}
if (Thread.currentThread().isInterrupted()) {
LOG.warn("Interrupt of execution of Reports");
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
index 767941e52920..edc1e3628c80 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/events/TestEventQueue.java
@@ -81,12 +81,13 @@ public void simpleEventWithFixedThreadPoolExecutor()
queues.add(eventQueue);
Map