Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix issue when metrics are not available #1207

Merged
merged 3 commits into from
Oct 6, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@

## Unreleased

- Fix issue when specific metrics are not available in certain OS
([#1207](https://github.com/open-telemetry/opentelemetry-python/pull/1207))

## Version 0.13b0

Released 2020-09-17
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -372,11 +372,12 @@ def _get_system_cpu_time(self, observer: metrics.ValueObserver) -> None:
"""
for cpu, times in enumerate(psutil.cpu_times(percpu=True)):
for metric in self._config["system.cpu.time"]:
self._system_cpu_time_labels["state"] = metric
self._system_cpu_time_labels["cpu"] = cpu + 1
observer.observe(
getattr(times, metric), self._system_cpu_time_labels
)
if hasattr(times, metric):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How about getattr here followed by a check for None? Would save one attribute lookup if/when on hotpath.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This would be a good optimization, I thought about it but then I remembered the default interval for this instrumentation was to collect metrics every 30 seconds. Can definitely be improved when it becomes an issue

self._system_cpu_time_labels["state"] = metric
self._system_cpu_time_labels["cpu"] = cpu + 1
observer.observe(
getattr(times, metric), self._system_cpu_time_labels
)

def _get_system_cpu_utilization(
self, observer: metrics.ValueObserver
Expand All @@ -391,12 +392,13 @@ def _get_system_cpu_utilization(
psutil.cpu_times_percent(percpu=True)
):
for metric in self._config["system.cpu.utilization"]:
self._system_cpu_utilization_labels["state"] = metric
self._system_cpu_utilization_labels["cpu"] = cpu + 1
observer.observe(
getattr(times_percent, metric) / 100,
self._system_cpu_utilization_labels,
)
if hasattr(times_percent, metric):
self._system_cpu_utilization_labels["state"] = metric
self._system_cpu_utilization_labels["cpu"] = cpu + 1
observer.observe(
getattr(times_percent, metric) / 100,
self._system_cpu_utilization_labels,
)

def _get_system_memory_usage(
self, observer: metrics.ValueObserver
Expand All @@ -409,10 +411,11 @@ def _get_system_memory_usage(
virtual_memory = psutil.virtual_memory()
for metric in self._config["system.memory.usage"]:
self._system_memory_usage_labels["state"] = metric
observer.observe(
getattr(virtual_memory, metric),
self._system_memory_usage_labels,
)
if hasattr(virtual_memory, metric):
observer.observe(
getattr(virtual_memory, metric),
self._system_memory_usage_labels,
)

def _get_system_memory_utilization(
self, observer: metrics.ValueObserver
Expand All @@ -426,10 +429,11 @@ def _get_system_memory_utilization(

for metric in self._config["system.memory.utilization"]:
self._system_memory_utilization_labels["state"] = metric
observer.observe(
getattr(system_memory, metric) / system_memory.total,
self._system_memory_utilization_labels,
)
if hasattr(system_memory, metric):
observer.observe(
getattr(system_memory, metric) / system_memory.total,
self._system_memory_utilization_labels,
)

def _get_system_swap_usage(self, observer: metrics.ValueObserver) -> None:
"""Observer callback for swap usage
Expand All @@ -441,9 +445,11 @@ def _get_system_swap_usage(self, observer: metrics.ValueObserver) -> None:

for metric in self._config["system.swap.usage"]:
self._system_swap_usage_labels["state"] = metric
observer.observe(
getattr(system_swap, metric), self._system_swap_usage_labels
)
if hasattr(system_swap, metric):
observer.observe(
getattr(system_swap, metric),
self._system_swap_usage_labels,
)

def _get_system_swap_utilization(
self, observer: metrics.ValueObserver
Expand All @@ -456,11 +462,12 @@ def _get_system_swap_utilization(
system_swap = psutil.swap_memory()

for metric in self._config["system.swap.utilization"]:
self._system_swap_utilization_labels["state"] = metric
observer.observe(
getattr(system_swap, metric) / system_swap.total,
self._system_swap_utilization_labels,
)
if hasattr(system_swap, metric):
self._system_swap_utilization_labels["state"] = metric
observer.observe(
getattr(system_swap, metric) / system_swap.total,
self._system_swap_utilization_labels,
)

# TODO Add _get_system_swap_page_faults
# TODO Add _get_system_swap_page_operations
Expand All @@ -473,12 +480,13 @@ def _get_system_disk_io(self, observer: metrics.SumObserver) -> None:
"""
for device, counters in psutil.disk_io_counters(perdisk=True).items():
for metric in self._config["system.disk.io"]:
self._system_disk_io_labels["device"] = device
self._system_disk_io_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_bytes".format(metric)),
self._system_disk_io_labels,
)
if hasattr(counters, "{}_bytes".format(metric)):
self._system_disk_io_labels["device"] = device
self._system_disk_io_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_bytes".format(metric)),
self._system_disk_io_labels,
)

def _get_system_disk_operations(
self, observer: metrics.SumObserver
Expand All @@ -490,12 +498,13 @@ def _get_system_disk_operations(
"""
for device, counters in psutil.disk_io_counters(perdisk=True).items():
for metric in self._config["system.disk.operations"]:
self._system_disk_operations_labels["device"] = device
self._system_disk_operations_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_count".format(metric)),
self._system_disk_operations_labels,
)
if hasattr(counters, "{}_count".format(metric)):
self._system_disk_operations_labels["device"] = device
self._system_disk_operations_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_count".format(metric)),
self._system_disk_operations_labels,
)

def _get_system_disk_time(self, observer: metrics.SumObserver) -> None:
"""Observer callback for disk time
Expand All @@ -505,12 +514,13 @@ def _get_system_disk_time(self, observer: metrics.SumObserver) -> None:
"""
for device, counters in psutil.disk_io_counters(perdisk=True).items():
for metric in self._config["system.disk.time"]:
self._system_disk_time_labels["device"] = device
self._system_disk_time_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_time".format(metric)) / 1000,
self._system_disk_time_labels,
)
if hasattr(counters, "{}_time".format(metric)):
self._system_disk_time_labels["device"] = device
self._system_disk_time_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_time".format(metric)) / 1000,
self._system_disk_time_labels,
)

def _get_system_disk_merged(self, observer: metrics.SumObserver) -> None:
"""Observer callback for disk merged operations
Expand All @@ -524,12 +534,13 @@ def _get_system_disk_merged(self, observer: metrics.SumObserver) -> None:

for device, counters in psutil.disk_io_counters(perdisk=True).items():
for metric in self._config["system.disk.time"]:
self._system_disk_merged_labels["device"] = device
self._system_disk_merged_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_merged_count".format(metric)),
self._system_disk_merged_labels,
)
if hasattr(counters, "{}_merged_count".format(metric)):
self._system_disk_merged_labels["device"] = device
self._system_disk_merged_labels["direction"] = metric
observer.observe(
getattr(counters, "{}_merged_count".format(metric)),
self._system_disk_merged_labels,
)

# TODO Add _get_system_filesystem_usage
# TODO Add _get_system_filesystem_utilization
Expand All @@ -548,14 +559,17 @@ def _get_system_network_dropped_packets(
for device, counters in psutil.net_io_counters(pernic=True).items():
for metric in self._config["system.network.dropped.packets"]:
in_out = {"receive": "in", "transmit": "out"}[metric]
self._system_network_dropped_packets_labels["device"] = device
self._system_network_dropped_packets_labels[
"direction"
] = metric
observer.observe(
getattr(counters, "drop{}".format(in_out)),
self._system_network_dropped_packets_labels,
)
if hasattr(counters, "drop{}".format(in_out)):
self._system_network_dropped_packets_labels[
"device"
] = device
self._system_network_dropped_packets_labels[
"direction"
] = metric
observer.observe(
getattr(counters, "drop{}".format(in_out)),
self._system_network_dropped_packets_labels,
)

def _get_system_network_packets(
self, observer: metrics.SumObserver
Expand All @@ -569,12 +583,13 @@ def _get_system_network_packets(
for device, counters in psutil.net_io_counters(pernic=True).items():
for metric in self._config["system.network.dropped.packets"]:
recv_sent = {"receive": "recv", "transmit": "sent"}[metric]
self._system_network_packets_labels["device"] = device
self._system_network_packets_labels["direction"] = metric
observer.observe(
getattr(counters, "packets_{}".format(recv_sent)),
self._system_network_packets_labels,
)
if hasattr(counters, "packets_{}".format(recv_sent)):
self._system_network_packets_labels["device"] = device
self._system_network_packets_labels["direction"] = metric
observer.observe(
getattr(counters, "packets_{}".format(recv_sent)),
self._system_network_packets_labels,
)

def _get_system_network_errors(
self, observer: metrics.SumObserver
Expand All @@ -587,12 +602,13 @@ def _get_system_network_errors(
for device, counters in psutil.net_io_counters(pernic=True).items():
for metric in self._config["system.network.errors"]:
in_out = {"receive": "in", "transmit": "out"}[metric]
self._system_network_errors_labels["device"] = device
self._system_network_errors_labels["direction"] = metric
observer.observe(
getattr(counters, "err{}".format(in_out)),
self._system_network_errors_labels,
)
if hasattr(counters, "err{}".format(in_out)):
self._system_network_errors_labels["device"] = device
self._system_network_errors_labels["direction"] = metric
observer.observe(
getattr(counters, "err{}".format(in_out)),
self._system_network_errors_labels,
)

def _get_system_network_io(self, observer: metrics.SumObserver) -> None:
"""Observer callback for network IO
Expand All @@ -604,12 +620,13 @@ def _get_system_network_io(self, observer: metrics.SumObserver) -> None:
for device, counters in psutil.net_io_counters(pernic=True).items():
for metric in self._config["system.network.dropped.packets"]:
recv_sent = {"receive": "recv", "transmit": "sent"}[metric]
self._system_network_io_labels["device"] = device
self._system_network_io_labels["direction"] = metric
observer.observe(
getattr(counters, "bytes_{}".format(recv_sent)),
self._system_network_io_labels,
)
if hasattr(counters, "bytes_{}".format(recv_sent)):
self._system_network_io_labels["device"] = device
self._system_network_io_labels["direction"] = metric
observer.observe(
getattr(counters, "bytes_{}".format(recv_sent)),
self._system_network_io_labels,
)

def _get_system_network_connections(
self, observer: metrics.UpDownSumObserver
Expand Down Expand Up @@ -662,10 +679,11 @@ def _get_runtime_memory(self, observer: metrics.SumObserver) -> None:
"""
proc_memory = self._proc.memory_info()
for metric in self._config["runtime.memory"]:
self._runtime_memory_labels["type"] = metric
observer.observe(
getattr(proc_memory, metric), self._runtime_memory_labels,
)
if hasattr(proc_memory, metric):
self._runtime_memory_labels["type"] = metric
observer.observe(
getattr(proc_memory, metric), self._runtime_memory_labels,
)

def _get_runtime_cpu_time(self, observer: metrics.SumObserver) -> None:
"""Observer callback for runtime CPU time
Expand All @@ -675,10 +693,11 @@ def _get_runtime_cpu_time(self, observer: metrics.SumObserver) -> None:
"""
proc_cpu = self._proc.cpu_times()
for metric in self._config["runtime.cpu.time"]:
self._runtime_cpu_time_labels["type"] = metric
observer.observe(
getattr(proc_cpu, metric), self._runtime_cpu_time_labels,
)
if hasattr(proc_cpu, metric):
self._runtime_cpu_time_labels["type"] = metric
observer.observe(
getattr(proc_cpu, metric), self._runtime_cpu_time_labels,
)

def _get_runtime_gc_count(self, observer: metrics.SumObserver) -> None:
"""Observer callback for garbage collection
Expand Down