diff --git a/docs/configuration/cluster_manager/cluster_stats.rst b/docs/configuration/cluster_manager/cluster_stats.rst index a0c0f6a519bd6..831b534e93985 100644 --- a/docs/configuration/cluster_manager/cluster_stats.rst +++ b/docs/configuration/cluster_manager/cluster_stats.rst @@ -22,8 +22,8 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi upstream_cx_connect_fail, Counter, Total connection failures upstream_cx_connect_timeout, Counter, Total connection timeouts upstream_cx_overflow, Counter, Total times that the cluster's connection circuit breaker overflowed - upstream_cx_connect_ms, Timer, Connection establishment milliseconds - upstream_cx_length_ms, Timer, Connection length milliseconds + upstream_cx_connect_ms, Histogram, Connection establishment milliseconds + upstream_cx_length_ms, Histogram, Connection length milliseconds upstream_cx_destroy, Counter, Total destroyed connections upstream_cx_destroy_local, Counter, Total connections destroyed locally upstream_cx_destroy_remote, Counter, Total connections destroyed remotely @@ -120,16 +120,16 @@ are rooted at *cluster..* and contain the following statistics: upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Timer, Request time milliseconds + upstream_rq_time, Histogram, Request time milliseconds canary.upstream_rq_<\*xx>, Counter, Upstream canary aggregate HTTP response codes canary.upstream_rq_<\*>, Counter, Upstream canary specific HTTP response codes - canary.upstream_rq_time, Timer, Upstream canary request time milliseconds + canary.upstream_rq_time, Histogram, Upstream canary request time milliseconds internal.upstream_rq_<\*xx>, Counter, Internal origin aggregate HTTP response codes internal.upstream_rq_<\*>, Counter, Internal origin specific HTTP response codes - internal.upstream_rq_time, Timer, Internal origin request time milliseconds + internal.upstream_rq_time, Histogram, Internal origin request time milliseconds external.upstream_rq_<\*xx>, Counter, External origin aggregate HTTP response codes external.upstream_rq_<\*>, Counter, External origin specific HTTP response codes - external.upstream_rq_time, Timer, External origin request time milliseconds + external.upstream_rq_time, Histogram, External origin request time milliseconds .. _config_cluster_manager_cluster_stats_alt_tree: @@ -156,7 +156,7 @@ Envoy will track the following statistics in *cluster..zone.., Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Timer, Request time milliseconds + upstream_rq_time, Histogram, Request time milliseconds Load balancer statistics ------------------------ diff --git a/docs/configuration/http_conn_man/stats.rst b/docs/configuration/http_conn_man/stats.rst index 495fe926bc8dd..7092134e966bf 100644 --- a/docs/configuration/http_conn_man/stats.rst +++ b/docs/configuration/http_conn_man/stats.rst @@ -27,7 +27,7 @@ statistics: downstream_cx_websocket_active, Gauge, Total active WebSocket connections downstream_cx_http2_active, Gauge, Total active HTTP/2 connections downstream_cx_protocol_error, Counter, Total protocol errors - downstream_cx_length_ms, Timer, Connection length milliseconds + downstream_cx_length_ms, Histogram, Connection length milliseconds downstream_cx_rx_bytes_total, Counter, Total bytes received downstream_cx_rx_bytes_buffered, Gauge, Total received bytes currently buffered downstream_cx_tx_bytes_total, Counter, Total bytes sent @@ -50,7 +50,7 @@ statistics: downstream_rq_4xx, Counter, Total 4xx responses downstream_rq_5xx, Counter, Total 5xx responses downstream_rq_ws_on_non_ws_route, Counter, Total WebSocket upgrade requests rejected by non WebSocket routes - downstream_rq_time, Timer, Request time milliseconds + downstream_rq_time, Histogram, Request time milliseconds rs_too_large, Counter, Total response errors due to buffering an overly large body. Per user agent statistics diff --git a/docs/configuration/http_filters/dynamodb_filter.rst b/docs/configuration/http_filters/dynamodb_filter.rst index 34955c718420f..af94e00d964b8 100644 --- a/docs/configuration/http_filters/dynamodb_filter.rst +++ b/docs/configuration/http_filters/dynamodb_filter.rst @@ -32,9 +32,9 @@ namespace. :widths: 1, 1, 2 upstream_rq_total, Counter, Total number of requests with - upstream_rq_time, Timer, Time spent on + upstream_rq_time, Histogram, Time spent on upstream_rq_total_xxx, Counter, Total number of requests with per response code (503/2xx/etc) - upstream_rq_time_xxx, Timer, Time spent on per response code (400/3xx/etc) + upstream_rq_time_xxx, Histogram, Time spent on per response code (400/3xx/etc) Per table stats can be found in the *http..dynamodb.table..* namespace. Most of the operations to DynamoDB involve a single table, but BatchGetItem and BatchWriteItem can @@ -46,9 +46,9 @@ in all operations from the batch. :widths: 1, 1, 2 upstream_rq_total, Counter, Total number of requests on table - upstream_rq_time, Timer, Time spent on table + upstream_rq_time, Histogram, Time spent on table upstream_rq_total_xxx, Counter, Total number of requests on table per response code (503/2xx/etc) - upstream_rq_time_xxx, Timer, Time spent on table per response code (400/3xx/etc) + upstream_rq_time_xxx, Histogram, Time spent on table per response code (400/3xx/etc) *Disclaimer: Please note that this is a pre-release Amazon DynamoDB feature that is not yet widely available.* Per partition and operation stats can be found in the *http..dynamodb.table..* diff --git a/docs/configuration/http_filters/router_filter.rst b/docs/configuration/http_filters/router_filter.rst index 042b55aec16f1..9cdc294d9b119 100644 --- a/docs/configuration/http_filters/router_filter.rst +++ b/docs/configuration/http_filters/router_filter.rst @@ -266,7 +266,7 @@ statistics: upstream_rq_<\*xx>, Counter, "Aggregate HTTP response codes (e.g., 2xx, 3xx, etc.)" upstream_rq_<\*>, Counter, "Specific HTTP response codes (e.g., 201, 302, etc.)" - upstream_rq_time, Timer, Request time milliseconds + upstream_rq_time, Histogram, Request time milliseconds Runtime ------- diff --git a/docs/configuration/listeners/stats.rst b/docs/configuration/listeners/stats.rst index e2be967788daa..821019160ffb9 100644 --- a/docs/configuration/listeners/stats.rst +++ b/docs/configuration/listeners/stats.rst @@ -12,7 +12,7 @@ Every listener has a statistics tree rooted at *listener..* with the follo downstream_cx_total, Counter, Total connections downstream_cx_destroy, Counter, Total destroyed connections downstream_cx_active, Gauge, Total active connections - downstream_cx_length_ms, Timer, Connection length milliseconds + downstream_cx_length_ms, Histogram, Connection length milliseconds ssl.connection_error, Counter, Total TLS connection errors not including failed certificate verifications ssl.handshake, Counter, Total successful TLS connection handshakes ssl.no_certificate, Counter, Total successul TLS connections with no client certificate diff --git a/docs/configuration/network_filters/mongo_proxy_filter.rst b/docs/configuration/network_filters/mongo_proxy_filter.rst index 583981980c3bf..904f8bffa8820 100644 --- a/docs/configuration/network_filters/mongo_proxy_filter.rst +++ b/docs/configuration/network_filters/mongo_proxy_filter.rst @@ -128,7 +128,7 @@ namespace. total, Counter, Number of commands reply_num_docs, Histogram, Number of documents in reply reply_size, Histogram, Size of the reply in bytes - reply_time_ms, Timer, Command time in milliseconds + reply_time_ms, Histogram, Command time in milliseconds .. _config_network_filters_mongo_proxy_collection_stats: @@ -147,7 +147,7 @@ The MongoDB filter will gather statistics for queries in the multi_get, Counter, Number of multi gets reply_num_docs, Histogram, Number of documents in reply reply_size, Histogram, Size of the reply in bytes - reply_time_ms, Timer, Query time in milliseconds + reply_time_ms, Histogram, Query time in milliseconds .. _config_network_filters_mongo_proxy_callsite_stats: diff --git a/docs/intro/arch_overview/statistics.rst b/docs/intro/arch_overview/statistics.rst index c5053d31302d1..38325f781fbd9 100644 --- a/docs/intro/arch_overview/statistics.rst +++ b/docs/intro/arch_overview/statistics.rst @@ -18,6 +18,8 @@ documented in detail in the operations guide. Envoy uses statsd as the statistics output format, though plugging in a different statistics sink would not be difficult. Both TCP and UDP statsd is supported. Internally, counters and gauges are -batched and periodically flushed to improve performance. Timers are written as they are received. +batched and periodically flushed to improve performance. Histograms are written as they are +received. Note: what were previously referred to as timers have become histograms as the only +difference between the two representations was the units. Statistics :ref:`configuration `. diff --git a/docs/operations/admin.rst b/docs/operations/admin.rst index 0a63aab88523b..32abd3190fffc 100644 --- a/docs/operations/admin.rst +++ b/docs/operations/admin.rst @@ -128,7 +128,7 @@ The fields are: .. http:get:: /stats - Outputs all statistics on demand. This includes only counters and gauges. Timers are not output as - Envoy currently has no built in histogram support and relies on statsd for timer aggregation. This - command is very useful for local debugging. See :ref:`here ` for more - information. + Outputs all statistics on demand. This includes only counters and gauges. Histograms are not + output as Envoy currently has no built in histogram support and relies on statsd for + aggregation. This command is very useful for local debugging. See :ref:`here ` + for more information. diff --git a/include/envoy/stats/BUILD b/include/envoy/stats/BUILD index 5ed8d0a807171..05da24e2ff8aa 100644 --- a/include/envoy/stats/BUILD +++ b/include/envoy/stats/BUILD @@ -13,6 +13,15 @@ envoy_cc_library( hdrs = ["stats.h"], ) +envoy_cc_library( + name = "timespan", + hdrs = ["timespan.h"], + deps = [ + ":stats_interface", + "//include/envoy/common:time_interface", + ], +) + envoy_cc_library( name = "stats_macros", hdrs = ["stats_macros.h"], diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h index 32567660e43ed..c99bb01e983e5 100644 --- a/include/envoy/stats/stats.h +++ b/include/envoy/stats/stats.h @@ -19,21 +19,32 @@ class Instance; namespace Stats { +/** + * General interface for all stats objects. + */ +class Metric { +public: + virtual ~Metric() {} + /** + * Returns the full name of the Metric. + */ + virtual const std::string& name() const PURE; +}; + /** * An always incrementing counter with latching capability. Each increment is added both to a * global counter as well as periodic counter. Calling latch() returns the periodic counter and * clears it. */ -class Counter { +class Counter : public virtual Metric { public: virtual ~Counter() {} virtual void add(uint64_t amount) PURE; virtual void inc() PURE; virtual uint64_t latch() PURE; - virtual std::string name() PURE; virtual void reset() PURE; - virtual bool used() PURE; - virtual uint64_t value() PURE; + virtual bool used() const PURE; + virtual uint64_t value() const PURE; }; typedef std::shared_ptr CounterSharedPtr; @@ -41,57 +52,39 @@ typedef std::shared_ptr CounterSharedPtr; /** * A gauge that can both increment and decrement. */ -class Gauge { +class Gauge : public virtual Metric { public: virtual ~Gauge() {} virtual void add(uint64_t amount) PURE; virtual void dec() PURE; virtual void inc() PURE; - virtual std::string name() PURE; virtual void set(uint64_t value) PURE; virtual void sub(uint64_t amount) PURE; - virtual bool used() PURE; - virtual uint64_t value() PURE; + virtual bool used() const PURE; + virtual uint64_t value() const PURE; }; typedef std::shared_ptr GaugeSharedPtr; /** - * An individual timespan that is owned by a timer. The initial time is captured on construction. - * A timespan must be completed via complete() for it to be stored. If the timespan is deleted - * this will be treated as a cancellation. + * A histogram that records values one at a time. + * Note: Histograms now incorporate what used to be timers because the only difference between the + * two stat types was the units being represented. It is assumed that no downstream user of this + * class (Sinks, in particular) will need to explicitly differentiate between histograms + * representing durations and histograms representing other types of data. */ -class Timespan { +class Histogram : public virtual Metric { public: - virtual ~Timespan() {} - - /** - * Complete the span using the default name of the timer that the span was allocated from. - */ - virtual void complete() PURE; + virtual ~Histogram() {} /** - * Complete the span using a dynamic name. This is useful if a span needs to get counted - * against a timer with a dynamic name. + * Records an unsigned value. If a timer, values are in units of milliseconds. */ - virtual void complete(const std::string& dynamic_name) PURE; + virtual void recordValue(uint64_t value) PURE; }; -typedef std::unique_ptr TimespanPtr; - -/** - * A timer that can capture timespans. - */ -class Timer { -public: - virtual ~Timer() {} - - virtual TimespanPtr allocateSpan() PURE; - virtual std::string name() PURE; -}; - -typedef std::shared_ptr TimerSharedPtr; +typedef std::shared_ptr HistogramSharedPtr; /** * A sink for stats. Each sink is responsible for writing stats to a backing store. @@ -109,12 +102,12 @@ class Sink { /** * Flush a counter delta. */ - virtual void flushCounter(const std::string& name, uint64_t delta) PURE; + virtual void flushCounter(const Counter& counter, uint64_t delta) PURE; /** * Flush a gauge value. */ - virtual void flushGauge(const std::string& name, uint64_t value) PURE; + virtual void flushGauge(const Gauge& gauge, uint64_t value) PURE; /** * This will be called after beginFlush(), some number of flushCounter(), and some number of @@ -125,12 +118,7 @@ class Sink { /** * Flush a histogram value. */ - virtual void onHistogramComplete(const std::string& name, uint64_t value) PURE; - - /** - * Flush a timespan value. - */ - virtual void onTimespanComplete(const std::string& name, std::chrono::milliseconds ms) PURE; + virtual void onHistogramComplete(const Histogram& histogram, uint64_t value) PURE; }; typedef std::unique_ptr SinkPtr; @@ -157,12 +145,7 @@ class Scope { /** * Deliver an individual histogram value to all registered sinks. */ - virtual void deliverHistogramToSinks(const std::string& name, uint64_t value) PURE; - - /** - * Deliver an individual timespan completion to all registered sinks. - */ - virtual void deliverTimingToSinks(const std::string& name, std::chrono::milliseconds ms) PURE; + virtual void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) PURE; /** * @return a counter within the scope's namespace. @@ -175,9 +158,9 @@ class Scope { virtual Gauge& gauge(const std::string& name) PURE; /** - * @return a timer within the scope's namespace. + * @return a histogram within the scope's namespace with a particular value type. */ - virtual Timer& timer(const std::string& name) PURE; + virtual Histogram& histogram(const std::string& name) PURE; }; /** diff --git a/include/envoy/stats/stats_macros.h b/include/envoy/stats/stats_macros.h index 74b4fe83b4039..bbc921dfba3ff 100644 --- a/include/envoy/stats/stats_macros.h +++ b/include/envoy/stats/stats_macros.h @@ -10,33 +10,33 @@ namespace Envoy { * is also easy to mock and test. The general flow looks like this: * * Define a block of stats like this: - * #define MY_COOL_STATS(COUNTER, GAUGE, TIMER) \ + * #define MY_COOL_STATS(COUNTER, GAUGE, HISTOGRAM) \ * COUNTER(counter1) * GAUGE(gauge1) - * TIMER(timer1) + * HISTOGRAM(histogram1) * ... * * Now actually put these stats somewhere, usually as a member of a struct: * struct MyCoolStats { - * MY_COOL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + * MY_COOL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) * }; * * Finally, when you want to actually instantiate the above struct using a Stats::Pool, you do: * MyCoolStats stats{ - * MY_COOL_STATS(POOL_COUNTER(...), POOL_GAUGE(...), POOL_TIMER(...))}; + * MY_COOL_STATS(POOL_COUNTER(...), POOL_GAUGE(...), POOL_HISTOGRAM(...))}; */ #define GENERATE_COUNTER_STRUCT(NAME) Stats::Counter& NAME##_; #define GENERATE_GAUGE_STRUCT(NAME) Stats::Gauge& NAME##_; -#define GENERATE_TIMER_STRUCT(NAME) Stats::Timer& NAME##_; +#define GENERATE_HISTOGRAM_STRUCT(NAME) Stats::Histogram& NAME##_; #define FINISH_STAT_DECL_(X) + std::string(#X)), #define POOL_COUNTER_PREFIX(POOL, PREFIX) (POOL).counter(PREFIX FINISH_STAT_DECL_ #define POOL_GAUGE_PREFIX(POOL, PREFIX) (POOL).gauge(PREFIX FINISH_STAT_DECL_ -#define POOL_TIMER_PREFIX(POOL, PREFIX) (POOL).timer(PREFIX FINISH_STAT_DECL_ +#define POOL_HISTOGRAM_PREFIX(POOL, PREFIX) (POOL).histogram(PREFIX FINISH_STAT_DECL_ #define POOL_COUNTER(POOL) POOL_COUNTER_PREFIX(POOL, "") #define POOL_GAUGE(POOL) POOL_GAUGE_PREFIX(POOL, "") -#define POOL_TIMER(POOL) POOL_TIMER_PREFIX(POOL, "") +#define POOL_HISTOGRAM(POOL) POOL_HISTOGRAM_PREFIX(POOL, "") } // Envoy diff --git a/include/envoy/stats/timespan.h b/include/envoy/stats/timespan.h new file mode 100644 index 0000000000000..1d3877bf579c5 --- /dev/null +++ b/include/envoy/stats/timespan.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include "envoy/common/time.h" +#include "envoy/stats/stats.h" + +namespace Envoy { +namespace Stats { + +/** + * An individual timespan that flushes its measured value (in milliseconds) to a histogram. The + * initial time is captured on construction. A timespan must be completed via complete() for it to + * be stored. If the timespan is deleted this will be treated as a cancellation. + */ +class Timespan { +public: + Timespan(Histogram& histogram) + : histogram_(histogram), start_(std::chrono::steady_clock::now()) {} + + /** + * Complete the timespan and send the time to the histogram. + */ + void complete() { histogram_.recordValue(getRawDuration().count()); } + + /** + * Get duration since the creation of the span. + */ + std::chrono::milliseconds getRawDuration() { + return std::chrono::duration_cast(std::chrono::steady_clock::now() - + start_); + } + +private: + Histogram& histogram_; + const MonotonicTime start_; +}; + +typedef std::unique_ptr TimespanPtr; + +} // namespace Stats +} // namespace Envoy diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index d8144335b5ad5..a669e930d281e 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -174,69 +174,69 @@ class HostSet { * All cluster stats. @see stats_macros.h */ // clang-format off -#define ALL_CLUSTER_STATS(COUNTER, GAUGE, TIMER) \ - COUNTER(lb_healthy_panic) \ - COUNTER(lb_local_cluster_not_ok) \ - COUNTER(lb_recalculate_zone_structures) \ - COUNTER(lb_zone_cluster_too_small) \ - COUNTER(lb_zone_no_capacity_left) \ - COUNTER(lb_zone_number_differs) \ - COUNTER(lb_zone_routing_all_directly) \ - COUNTER(lb_zone_routing_sampled) \ - COUNTER(lb_zone_routing_cross_zone) \ - COUNTER(upstream_cx_total) \ - GAUGE (upstream_cx_active) \ - COUNTER(upstream_cx_http1_total) \ - COUNTER(upstream_cx_http2_total) \ - COUNTER(upstream_cx_connect_fail) \ - COUNTER(upstream_cx_connect_timeout) \ - COUNTER(upstream_cx_overflow) \ - TIMER (upstream_cx_connect_ms) \ - TIMER (upstream_cx_length_ms) \ - COUNTER(upstream_cx_destroy) \ - COUNTER(upstream_cx_destroy_local) \ - COUNTER(upstream_cx_destroy_remote) \ - COUNTER(upstream_cx_destroy_with_active_rq) \ - COUNTER(upstream_cx_destroy_local_with_active_rq) \ - COUNTER(upstream_cx_destroy_remote_with_active_rq) \ - COUNTER(upstream_cx_close_notify) \ - COUNTER(upstream_cx_rx_bytes_total) \ - GAUGE (upstream_cx_rx_bytes_buffered) \ - COUNTER(upstream_cx_tx_bytes_total) \ - GAUGE (upstream_cx_tx_bytes_buffered) \ - COUNTER(upstream_cx_protocol_error) \ - COUNTER(upstream_cx_max_requests) \ - COUNTER(upstream_cx_none_healthy) \ - COUNTER(upstream_rq_total) \ - GAUGE (upstream_rq_active) \ - COUNTER(upstream_rq_pending_total) \ - COUNTER(upstream_rq_pending_overflow) \ - COUNTER(upstream_rq_pending_failure_eject) \ - GAUGE (upstream_rq_pending_active) \ - COUNTER(upstream_rq_cancelled) \ - COUNTER(upstream_rq_maintenance_mode) \ - COUNTER(upstream_rq_timeout) \ - COUNTER(upstream_rq_per_try_timeout) \ - COUNTER(upstream_rq_rx_reset) \ - COUNTER(upstream_rq_tx_reset) \ - COUNTER(upstream_rq_retry) \ - COUNTER(upstream_rq_retry_success) \ - COUNTER(upstream_rq_retry_overflow) \ - COUNTER(upstream_flow_control_paused_reading_total) \ - COUNTER(upstream_flow_control_resumed_reading_total) \ - COUNTER(upstream_flow_control_backed_up_total) \ - COUNTER(upstream_flow_control_drained_total) \ - COUNTER(bind_errors) \ - GAUGE (max_host_weight) \ - COUNTER(membership_change) \ - GAUGE (membership_healthy) \ - GAUGE (membership_total) \ - COUNTER(retry_or_shadow_abandoned) \ - COUNTER(update_attempt) \ - COUNTER(update_success) \ - COUNTER(update_failure) \ - COUNTER(update_empty) \ - GAUGE(version) +#define ALL_CLUSTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER (lb_healthy_panic) \ + COUNTER (lb_local_cluster_not_ok) \ + COUNTER (lb_recalculate_zone_structures) \ + COUNTER (lb_zone_cluster_too_small) \ + COUNTER (lb_zone_no_capacity_left) \ + COUNTER (lb_zone_number_differs) \ + COUNTER (lb_zone_routing_all_directly) \ + COUNTER (lb_zone_routing_sampled) \ + COUNTER (lb_zone_routing_cross_zone) \ + COUNTER (upstream_cx_total) \ + GAUGE (upstream_cx_active) \ + COUNTER (upstream_cx_http1_total) \ + COUNTER (upstream_cx_http2_total) \ + COUNTER (upstream_cx_connect_fail) \ + COUNTER (upstream_cx_connect_timeout) \ + COUNTER (upstream_cx_overflow) \ + HISTOGRAM(upstream_cx_connect_ms) \ + HISTOGRAM(upstream_cx_length_ms) \ + COUNTER (upstream_cx_destroy) \ + COUNTER (upstream_cx_destroy_local) \ + COUNTER (upstream_cx_destroy_remote) \ + COUNTER (upstream_cx_destroy_with_active_rq) \ + COUNTER (upstream_cx_destroy_local_with_active_rq) \ + COUNTER (upstream_cx_destroy_remote_with_active_rq) \ + COUNTER (upstream_cx_close_notify) \ + COUNTER (upstream_cx_rx_bytes_total) \ + GAUGE (upstream_cx_rx_bytes_buffered) \ + COUNTER (upstream_cx_tx_bytes_total) \ + GAUGE (upstream_cx_tx_bytes_buffered) \ + COUNTER (upstream_cx_protocol_error) \ + COUNTER (upstream_cx_max_requests) \ + COUNTER (upstream_cx_none_healthy) \ + COUNTER (upstream_rq_total) \ + GAUGE (upstream_rq_active) \ + COUNTER (upstream_rq_pending_total) \ + COUNTER (upstream_rq_pending_overflow) \ + COUNTER (upstream_rq_pending_failure_eject) \ + GAUGE (upstream_rq_pending_active) \ + COUNTER (upstream_rq_cancelled) \ + COUNTER (upstream_rq_maintenance_mode) \ + COUNTER (upstream_rq_timeout) \ + COUNTER (upstream_rq_per_try_timeout) \ + COUNTER (upstream_rq_rx_reset) \ + COUNTER (upstream_rq_tx_reset) \ + COUNTER (upstream_rq_retry) \ + COUNTER (upstream_rq_retry_success) \ + COUNTER (upstream_rq_retry_overflow) \ + COUNTER (upstream_flow_control_paused_reading_total) \ + COUNTER (upstream_flow_control_resumed_reading_total) \ + COUNTER (upstream_flow_control_backed_up_total) \ + COUNTER (upstream_flow_control_drained_total) \ + COUNTER (bind_errors) \ + GAUGE (max_host_weight) \ + COUNTER (membership_change) \ + GAUGE (membership_healthy) \ + GAUGE (membership_total) \ + COUNTER (retry_or_shadow_abandoned) \ + COUNTER (update_attempt) \ + COUNTER (update_success) \ + COUNTER (update_failure) \ + COUNTER (update_empty) \ + GAUGE (version) // clang-format on @@ -244,7 +244,7 @@ class HostSet { * Struct definition for all cluster stats. @see stats_macros.h */ struct ClusterStats { - ALL_CLUSTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + ALL_CLUSTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; /** diff --git a/source/common/dynamo/dynamo_filter.cc b/source/common/dynamo/dynamo_filter.cc index 557a02f9b3623..247a8e8df400f 100644 --- a/source/common/dynamo/dynamo_filter.cc +++ b/source/common/dynamo/dynamo_filter.cc @@ -181,14 +181,16 @@ void DynamoFilter::chargeStatsPerEntity(const std::string& entity, const std::st std::to_string(status))) .inc(); - scope_.deliverTimingToSinks( - fmt::format("{}{}.{}.upstream_rq_time", stat_prefix_, entity_type, entity), latency); - scope_.deliverTimingToSinks( - fmt::format("{}{}.{}.upstream_rq_time_{}", stat_prefix_, entity_type, entity, group_string), - latency); - scope_.deliverTimingToSinks(fmt::format("{}{}.{}.upstream_rq_time_{}", stat_prefix_, entity_type, - entity, std::to_string(status)), - latency); + scope_.histogram(fmt::format("{}{}.{}.upstream_rq_time", stat_prefix_, entity_type, entity)) + .recordValue(latency.count()); + scope_ + .histogram(fmt::format("{}{}.{}.upstream_rq_time_{}", stat_prefix_, entity_type, entity, + group_string)) + .recordValue(latency.count()); + scope_ + .histogram(fmt::format("{}{}.{}.upstream_rq_time_{}", stat_prefix_, entity_type, entity, + std::to_string(status))) + .recordValue(latency.count()); } void DynamoFilter::chargeUnProcessedKeysStats(const Json::Object& json_body) { diff --git a/source/common/filter/BUILD b/source/common/filter/BUILD index 0787177caeb19..83f943cf99826 100644 --- a/source/common/filter/BUILD +++ b/source/common/filter/BUILD @@ -49,6 +49,7 @@ envoy_cc_library( "//include/envoy/network:filter_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", + "//include/envoy/stats:timespan", "//include/envoy/upstream:cluster_manager_interface", "//include/envoy/upstream:upstream_interface", "//source/common/common:assert_lib", diff --git a/source/common/filter/tcp_proxy.cc b/source/common/filter/tcp_proxy.cc index 0ebd89eeca382..0f8d7829e00ab 100644 --- a/source/common/filter/tcp_proxy.cc +++ b/source/common/filter/tcp_proxy.cc @@ -234,10 +234,10 @@ Network::FilterStatus TcpProxy::initializeUpstreamConnection() { read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_active_.inc(); read_callbacks_->upstreamHost()->stats().cx_total_.inc(); read_callbacks_->upstreamHost()->stats().cx_active_.inc(); - connect_timespan_ = - read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_connect_ms_.allocateSpan(); - connected_timespan_ = - read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_length_ms_.allocateSpan(); + connect_timespan_.reset(new Stats::Timespan( + read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_connect_ms_)); + connected_timespan_.reset(new Stats::Timespan( + read_callbacks_->upstreamHost()->cluster().stats().upstream_cx_length_ms_)); return Network::FilterStatus::Continue; } diff --git a/source/common/filter/tcp_proxy.h b/source/common/filter/tcp_proxy.h index 041a193f377d4..fbe61ef47e010 100644 --- a/source/common/filter/tcp_proxy.h +++ b/source/common/filter/tcp_proxy.h @@ -9,6 +9,7 @@ #include "envoy/network/connection.h" #include "envoy/network/filter.h" #include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" diff --git a/source/common/http/BUILD b/source/common/http/BUILD index 2981441e56580..27de0dbfa8c15 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -114,6 +114,7 @@ envoy_cc_library( "//include/envoy/ssl:connection_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", + "//include/envoy/stats:timespan", "//include/envoy/tracing:http_tracer_interface", "//include/envoy/upstream:upstream_interface", "//source/common/buffer:buffer_lib", @@ -216,6 +217,7 @@ envoy_cc_library( "//include/envoy/network:connection_interface", "//include/envoy/stats:stats_interface", "//include/envoy/stats:stats_macros", + "//include/envoy/stats:timespan", ], ) diff --git a/source/common/http/codes.cc b/source/common/http/codes.cc index 9e4483f12f6c1..9379028946840 100644 --- a/source/common/http/codes.cc +++ b/source/common/http/codes.cc @@ -81,31 +81,34 @@ void CodeUtility::chargeResponseStat(const ResponseStatInfo& info) { } void CodeUtility::chargeResponseTiming(const ResponseTimingInfo& info) { - info.cluster_scope_.deliverTimingToSinks(info.prefix_ + "upstream_rq_time", info.response_time_); + info.cluster_scope_.histogram(info.prefix_ + "upstream_rq_time") + .recordValue(info.response_time_.count()); if (info.upstream_canary_) { - info.cluster_scope_.deliverTimingToSinks(info.prefix_ + "canary.upstream_rq_time", - info.response_time_); + info.cluster_scope_.histogram(info.prefix_ + "canary.upstream_rq_time") + .recordValue(info.response_time_.count()); } if (info.internal_request_) { - info.cluster_scope_.deliverTimingToSinks(info.prefix_ + "internal.upstream_rq_time", - info.response_time_); + info.cluster_scope_.histogram(info.prefix_ + "internal.upstream_rq_time") + .recordValue(info.response_time_.count()); } else { - info.cluster_scope_.deliverTimingToSinks(info.prefix_ + "external.upstream_rq_time", - info.response_time_); + info.cluster_scope_.histogram(info.prefix_ + "external.upstream_rq_time") + .recordValue(info.response_time_.count()); } if (!info.request_vcluster_name_.empty()) { - info.global_scope_.deliverTimingToSinks("vhost." + info.request_vhost_name_ + ".vcluster." + - info.request_vcluster_name_ + ".upstream_rq_time", - info.response_time_); + info.global_scope_ + .histogram("vhost." + info.request_vhost_name_ + ".vcluster." + + info.request_vcluster_name_ + ".upstream_rq_time") + .recordValue(info.response_time_.count()); } // Handle per zone stats. if (!info.from_zone_.empty() && !info.to_zone_.empty()) { - info.cluster_scope_.deliverTimingToSinks( - fmt::format("{}zone.{}.{}.upstream_rq_time", info.prefix_, info.from_zone_, info.to_zone_), - info.response_time_); + info.cluster_scope_ + .histogram(fmt::format("{}zone.{}.{}.upstream_rq_time", info.prefix_, info.from_zone_, + info.to_zone_)) + .recordValue(info.response_time_.count()); } } diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index cea84f7b9f6ec..4337c5f418888 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -38,7 +38,7 @@ ConnectionManagerStats ConnectionManagerImpl::generateStats(const std::string& p Stats::Scope& scope) { return { {ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix), - POOL_TIMER_PREFIX(scope, prefix))}, + POOL_HISTOGRAM_PREFIX(scope, prefix))}, prefix, scope}; } @@ -60,7 +60,7 @@ ConnectionManagerImpl::ConnectionManagerImpl(ConnectionManagerConfig& config, const LocalInfo::LocalInfo& local_info, Upstream::ClusterManager& cluster_manager) : config_(config), stats_(config_.stats()), - conn_length_(stats_.named_.downstream_cx_length_ms_.allocateSpan()), + conn_length_(new Stats::Timespan(stats_.named_.downstream_cx_length_ms_)), drain_close_(drain_close), random_generator_(random_generator), tracer_(tracer), runtime_(runtime), local_info_(local_info), cluster_manager_(cluster_manager), listener_stats_(config_.listenerStats()) {} @@ -348,7 +348,7 @@ ConnectionManagerImpl::ActiveStream::ActiveStream(ConnectionManagerImpl& connect snapped_route_config_(connection_manager.config_.routeConfigProvider().config()), stream_id_(ConnectionManagerUtility::generateStreamId(*snapped_route_config_, connection_manager.random_generator_)), - request_timer_(connection_manager_.stats_.named_.downstream_rq_time_.allocateSpan()), + request_timer_(new Stats::Timespan(connection_manager_.stats_.named_.downstream_rq_time_)), request_info_(connection_manager_.codec_->protocol()) { connection_manager_.stats_.named_.downstream_rq_total_.inc(); connection_manager_.stats_.named_.downstream_rq_active_.inc(); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index fba7b30da227b..eb71664812da8 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -38,56 +38,56 @@ namespace Http { * All stats for the connection manager. @see stats_macros.h */ // clang-format off -#define ALL_HTTP_CONN_MAN_STATS(COUNTER, GAUGE, TIMER) \ - COUNTER(downstream_cx_total) \ - COUNTER(downstream_cx_ssl_total) \ - COUNTER(downstream_cx_http1_total) \ - COUNTER(downstream_cx_websocket_total) \ - COUNTER(downstream_cx_http2_total) \ - COUNTER(downstream_cx_destroy) \ - COUNTER(downstream_cx_destroy_remote) \ - COUNTER(downstream_cx_destroy_local) \ - COUNTER(downstream_cx_destroy_active_rq) \ - COUNTER(downstream_cx_destroy_local_active_rq) \ - COUNTER(downstream_cx_destroy_remote_active_rq) \ - GAUGE (downstream_cx_active) \ - GAUGE (downstream_cx_ssl_active) \ - GAUGE (downstream_cx_http1_active) \ - GAUGE (downstream_cx_websocket_active) \ - GAUGE (downstream_cx_http2_active) \ - COUNTER(downstream_cx_protocol_error) \ - TIMER (downstream_cx_length_ms) \ - COUNTER(downstream_cx_rx_bytes_total) \ - GAUGE (downstream_cx_rx_bytes_buffered) \ - COUNTER(downstream_cx_tx_bytes_total) \ - GAUGE (downstream_cx_tx_bytes_buffered) \ - COUNTER(downstream_cx_drain_close) \ - COUNTER(downstream_cx_idle_timeout) \ - COUNTER(downstream_flow_control_paused_reading_total) \ - COUNTER(downstream_flow_control_resumed_reading_total) \ - COUNTER(downstream_rq_total) \ - COUNTER(downstream_rq_http1_total) \ - COUNTER(downstream_rq_http2_total) \ - GAUGE (downstream_rq_active) \ - COUNTER(downstream_rq_response_before_rq_complete) \ - COUNTER(downstream_rq_rx_reset) \ - COUNTER(downstream_rq_tx_reset) \ - COUNTER(downstream_rq_non_relative_path) \ - COUNTER(downstream_rq_ws_on_non_ws_route) \ - COUNTER(downstream_rq_too_large) \ - COUNTER(downstream_rq_2xx) \ - COUNTER(downstream_rq_3xx) \ - COUNTER(downstream_rq_4xx) \ - COUNTER(downstream_rq_5xx) \ - TIMER (downstream_rq_time) \ - COUNTER(rs_too_large) +#define ALL_HTTP_CONN_MAN_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER (downstream_cx_total) \ + COUNTER (downstream_cx_ssl_total) \ + COUNTER (downstream_cx_http1_total) \ + COUNTER (downstream_cx_websocket_total) \ + COUNTER (downstream_cx_http2_total) \ + COUNTER (downstream_cx_destroy) \ + COUNTER (downstream_cx_destroy_remote) \ + COUNTER (downstream_cx_destroy_local) \ + COUNTER (downstream_cx_destroy_active_rq) \ + COUNTER (downstream_cx_destroy_local_active_rq) \ + COUNTER (downstream_cx_destroy_remote_active_rq) \ + GAUGE (downstream_cx_active) \ + GAUGE (downstream_cx_ssl_active) \ + GAUGE (downstream_cx_http1_active) \ + GAUGE (downstream_cx_websocket_active) \ + GAUGE (downstream_cx_http2_active) \ + COUNTER (downstream_cx_protocol_error) \ + HISTOGRAM(downstream_cx_length_ms) \ + COUNTER (downstream_cx_rx_bytes_total) \ + GAUGE (downstream_cx_rx_bytes_buffered) \ + COUNTER (downstream_cx_tx_bytes_total) \ + GAUGE (downstream_cx_tx_bytes_buffered) \ + COUNTER (downstream_cx_drain_close) \ + COUNTER (downstream_cx_idle_timeout) \ + COUNTER (downstream_flow_control_paused_reading_total) \ + COUNTER (downstream_flow_control_resumed_reading_total) \ + COUNTER (downstream_rq_total) \ + COUNTER (downstream_rq_http1_total) \ + COUNTER (downstream_rq_http2_total) \ + GAUGE (downstream_rq_active) \ + COUNTER (downstream_rq_response_before_rq_complete) \ + COUNTER (downstream_rq_rx_reset) \ + COUNTER (downstream_rq_tx_reset) \ + COUNTER (downstream_rq_non_relative_path) \ + COUNTER (downstream_rq_ws_on_non_ws_route) \ + COUNTER (downstream_rq_too_large) \ + COUNTER (downstream_rq_2xx) \ + COUNTER (downstream_rq_3xx) \ + COUNTER (downstream_rq_4xx) \ + COUNTER (downstream_rq_5xx) \ + HISTOGRAM(downstream_rq_time) \ + COUNTER (rs_too_large) // clang-format on /** * Wrapper struct for connection manager stats. @see stats_macros.h */ struct ConnectionManagerNamedStats { - ALL_HTTP_CONN_MAN_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + ALL_HTTP_CONN_MAN_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; struct ConnectionManagerStats { diff --git a/source/common/http/http1/BUILD b/source/common/http/http1/BUILD index ee6495ed3ac4f..a5d80a5b9567f 100644 --- a/source/common/http/http1/BUILD +++ b/source/common/http/http1/BUILD @@ -45,6 +45,7 @@ envoy_cc_library( "//include/envoy/http:header_map_interface", "//include/envoy/network:connection_interface", "//include/envoy/stats:stats_interface", + "//include/envoy/stats:timespan", "//include/envoy/upstream:upstream_interface", "//source/common/common:linked_object", "//source/common/common:utility_lib", diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index 06bec33a53b76..18d485c2ee1f9 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -271,8 +271,8 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })), remaining_requests_(parent_.host_->cluster().maxRequestsPerConnection()) { - parent_.conn_connect_ms_ = - parent_.host_->cluster().stats().upstream_cx_connect_ms_.allocateSpan(); + parent_.conn_connect_ms_.reset( + new Stats::Timespan(parent_.host_->cluster().stats().upstream_cx_connect_ms_)); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection(parent_.dispatcher_); real_host_description_ = data.host_description_; codec_client_ = parent_.createCodecClient(data); @@ -283,7 +283,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->cluster().stats().upstream_cx_http1_total_.inc(); parent_.host_->stats().cx_total_.inc(); parent_.host_->stats().cx_active_.inc(); - conn_length_ = parent_.host_->cluster().stats().upstream_cx_length_ms_.allocateSpan(); + conn_length_.reset(new Stats::Timespan(parent_.host_->cluster().stats().upstream_cx_length_ms_)); connect_timer_->enableTimer(parent_.host_->cluster().connectTimeout()); parent_.host_->cluster().resourceManager(parent_.priority_).connections().inc(); diff --git a/source/common/http/http1/conn_pool.h b/source/common/http/http1/conn_pool.h index 164e4ff68128f..5106c75c0e9c8 100644 --- a/source/common/http/http1/conn_pool.h +++ b/source/common/http/http1/conn_pool.h @@ -9,6 +9,7 @@ #include "envoy/event/timer.h" #include "envoy/http/conn_pool.h" #include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" #include "common/common/linked_object.h" diff --git a/source/common/http/http2/BUILD b/source/common/http/http2/BUILD index 78e2631837be1..32a45a5557783 100644 --- a/source/common/http/http2/BUILD +++ b/source/common/http/http2/BUILD @@ -48,6 +48,7 @@ envoy_cc_library( "//include/envoy/event:timer_interface", "//include/envoy/http:conn_pool_interface", "//include/envoy/network:connection_interface", + "//include/envoy/stats:timespan", "//include/envoy/upstream:upstream_interface", "//source/common/http:codec_client_lib", "//source/common/network:utility_lib", diff --git a/source/common/http/http2/conn_pool.cc b/source/common/http/http2/conn_pool.cc index 62d5b6676fa9d..44cd3f2f3aba4 100644 --- a/source/common/http/http2/conn_pool.cc +++ b/source/common/http/http2/conn_pool.cc @@ -212,8 +212,8 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) : parent_(parent), connect_timer_(parent_.dispatcher_.createTimer([this]() -> void { onConnectTimeout(); })) { - parent_.conn_connect_ms_ = - parent_.host_->cluster().stats().upstream_cx_connect_ms_.allocateSpan(); + parent_.conn_connect_ms_.reset( + new Stats::Timespan(parent_.host_->cluster().stats().upstream_cx_connect_ms_)); Upstream::Host::CreateConnectionData data = parent_.host_->createConnection(parent_.dispatcher_); real_host_description_ = data.host_description_; client_ = parent_.createCodecClient(data); @@ -227,7 +227,7 @@ ConnPoolImpl::ActiveClient::ActiveClient(ConnPoolImpl& parent) parent_.host_->cluster().stats().upstream_cx_total_.inc(); parent_.host_->cluster().stats().upstream_cx_active_.inc(); parent_.host_->cluster().stats().upstream_cx_http2_total_.inc(); - conn_length_ = parent_.host_->cluster().stats().upstream_cx_length_ms_.allocateSpan(); + conn_length_.reset(new Stats::Timespan(parent_.host_->cluster().stats().upstream_cx_length_ms_)); client_->setConnectionStats({parent_.host_->cluster().stats().upstream_cx_rx_bytes_total_, parent_.host_->cluster().stats().upstream_cx_rx_bytes_buffered_, diff --git a/source/common/http/http2/conn_pool.h b/source/common/http/http2/conn_pool.h index 9b1ed024ff61c..c8ae2bcbf01d8 100644 --- a/source/common/http/http2/conn_pool.h +++ b/source/common/http/http2/conn_pool.h @@ -7,6 +7,7 @@ #include "envoy/event/timer.h" #include "envoy/http/conn_pool.h" #include "envoy/network/connection.h" +#include "envoy/stats/timespan.h" #include "envoy/upstream/upstream.h" #include "common/http/codec_client.h" diff --git a/source/common/http/user_agent.cc b/source/common/http/user_agent.cc index 2b74d686ccf10..63017ea0d3a41 100644 --- a/source/common/http/user_agent.cc +++ b/source/common/http/user_agent.cc @@ -12,11 +12,14 @@ namespace Envoy { namespace Http { void UserAgent::completeConnectionLength(Stats::Timespan& span) { + + // Note: stats_ and scope_ are set together, so it's assumed that scope_ will be non-nullptr if + // stats_ is. if (!stats_) { return; } - span.complete(prefix_ + "downstream_cx_length_ms"); + scope_->histogram(prefix_ + "downstream_cx_length_ms").recordValue(span.getRawDuration().count()); } void UserAgent::initializeFromHeaders(const HeaderMap& headers, const std::string& prefix, @@ -44,6 +47,7 @@ void UserAgent::initializeFromHeaders(const HeaderMap& headers, const std::strin stats_.reset(new UserAgentStats{ALL_USER_AGENTS_STATS(POOL_COUNTER_PREFIX(scope, prefix_))}); stats_->downstream_cx_total_.inc(); stats_->downstream_rq_total_.inc(); + scope_ = &scope; } } diff --git a/source/common/http/user_agent.h b/source/common/http/user_agent.h index d7164e149f2c2..3dc5263982fa0 100644 --- a/source/common/http/user_agent.h +++ b/source/common/http/user_agent.h @@ -7,6 +7,7 @@ #include "envoy/http/header_map.h" #include "envoy/network/connection.h" #include "envoy/stats/stats_macros.h" +#include "envoy/stats/timespan.h" namespace Envoy { /** @@ -62,6 +63,7 @@ class UserAgent { Type type_{Type::NotInitialized}; std::unique_ptr stats_; std::string prefix_; + Stats::Scope* scope_{}; }; } // namespace Http diff --git a/source/common/mongo/proxy.cc b/source/common/mongo/proxy.cc index 04b76fe934a7d..1f371f9c1d575 100644 --- a/source/common/mongo/proxy.cc +++ b/source/common/mongo/proxy.cc @@ -191,12 +191,13 @@ void ProxyFilter::chargeReplyStats(ActiveQuery& active_query, const std::string& reply_documents_byte_size += document->byteSize(); } - scope_.deliverHistogramToSinks(fmt::format("{}.reply_num_docs", prefix), - message.documents().size()); - scope_.deliverHistogramToSinks(fmt::format("{}.reply_size", prefix), reply_documents_byte_size); - scope_.deliverTimingToSinks(fmt::format("{}.reply_time_ms", prefix), - std::chrono::duration_cast( - std::chrono::steady_clock::now() - active_query.start_time_)); + scope_.histogram(fmt::format("{}.reply_num_docs", prefix)) + .recordValue(message.documents().size()); + scope_.histogram(fmt::format("{}.reply_size", prefix)).recordValue(reply_documents_byte_size); + scope_.histogram(fmt::format("{}.reply_time_ms", prefix)) + .recordValue(std::chrono::duration_cast( + std::chrono::steady_clock::now() - active_query.start_time_) + .count()); } void ProxyFilter::doDecode(Buffer::Instance& buffer) { diff --git a/source/common/mongo/proxy.h b/source/common/mongo/proxy.h index de2dd15f97a11..0922fe9d81615 100644 --- a/source/common/mongo/proxy.h +++ b/source/common/mongo/proxy.h @@ -41,7 +41,7 @@ typedef ConstSingleton MongoRuntimeConfig; * All mongo proxy stats. @see stats_macros.h */ // clang-format off -#define ALL_MONGO_PROXY_STATS(COUNTER, GAUGE, TIMER) \ +#define ALL_MONGO_PROXY_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(decoding_error) \ COUNTER(delays_injected) \ COUNTER(op_get_more) \ @@ -68,7 +68,7 @@ typedef ConstSingleton MongoRuntimeConfig; * Struct definition for all mongo proxy stats. @see stats_macros.h */ struct MongoProxyStats { - ALL_MONGO_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + ALL_MONGO_PROXY_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; /** @@ -165,7 +165,7 @@ class ProxyFilter : public Network::Filter, MongoProxyStats generateStats(const std::string& prefix, Stats::Scope& scope) { return MongoProxyStats{ALL_MONGO_PROXY_STATS(POOL_COUNTER_PREFIX(scope, prefix), POOL_GAUGE_PREFIX(scope, prefix), - POOL_TIMER_PREFIX(scope, prefix))}; + POOL_HISTOGRAM_PREFIX(scope, prefix))}; } void chargeQueryStats(const std::string& prefix, QueryMessageInfo::QueryType query_type); diff --git a/source/common/ssl/context_impl.cc b/source/common/ssl/context_impl.cc index 0f9a4ad23469c..488232d134086 100644 --- a/source/common/ssl/context_impl.cc +++ b/source/common/ssl/context_impl.cc @@ -251,7 +251,7 @@ bool ContextImpl::verifyCertificateHash(X509* cert, const std::vector& SslStats ContextImpl::generateStats(Stats::Scope& store) { std::string prefix("ssl."); return {ALL_SSL_STATS(POOL_COUNTER_PREFIX(store, prefix), POOL_GAUGE_PREFIX(store, prefix), - POOL_TIMER_PREFIX(store, prefix))}; + POOL_HISTOGRAM_PREFIX(store, prefix))}; } size_t ContextImpl::daysUntilFirstCertExpires() { diff --git a/source/common/ssl/context_impl.h b/source/common/ssl/context_impl.h index a4354ed8b218b..6ce9c2cfe4bf9 100644 --- a/source/common/ssl/context_impl.h +++ b/source/common/ssl/context_impl.h @@ -22,7 +22,7 @@ namespace Envoy { namespace Ssl { // clang-format off -#define ALL_SSL_STATS(COUNTER, GAUGE, TIMER) \ +#define ALL_SSL_STATS(COUNTER, GAUGE, HISTOGRAM) \ COUNTER(connection_error) \ COUNTER(handshake) \ COUNTER(no_certificate) \ @@ -36,7 +36,7 @@ namespace Ssl { * Wrapper struct for SSL stats. @see stats_macros.h */ struct SslStats { - ALL_SSL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + ALL_SSL_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; class ContextImpl : public virtual Context { diff --git a/source/common/stats/stats_impl.cc b/source/common/stats/stats_impl.cc index 731b99bce2bb5..37aa4950d65b4 100644 --- a/source/common/stats/stats_impl.cc +++ b/source/common/stats/stats_impl.cc @@ -16,12 +16,6 @@ std::string Utility::sanitizeStatsName(const std::string& name) { return stats_name; } -void TimerImpl::TimespanImpl::complete(const std::string& dynamic_name) { - std::chrono::milliseconds ms = std::chrono::duration_cast( - std::chrono::steady_clock::now() - start_); - parent_.parent_.deliverTimingToSinks(dynamic_name, ms); -} - RawStatData* HeapRawStatDataAllocator::alloc(const std::string& name) { RawStatData* data = new RawStatData(); memset(data, 0, sizeof(RawStatData)); diff --git a/source/common/stats/stats_impl.h b/source/common/stats/stats_impl.h index f0f31552390fb..382642c85d8bd 100644 --- a/source/common/stats/stats_impl.h +++ b/source/common/stats/stats_impl.h @@ -73,12 +73,27 @@ class RawStatDataAllocator { virtual void free(RawStatData& data) PURE; }; +/** + * Implementation of the Metric interface. Virtual inheritance is used because the interfaces that + * will inherit from Metric will have other base classes that will also inherit from Metric. + */ +class MetricImpl : public virtual Metric { +public: + MetricImpl(const std::string& name) : name_(name) {} + + const std::string& name() const override { return name_; } + +private: + const std::string name_; +}; + /** * Counter implementation that wraps a RawStatData. */ -class CounterImpl : public Counter { +class CounterImpl : public Counter, public MetricImpl { public: - CounterImpl(RawStatData& data, RawStatDataAllocator& alloc) : data_(data), alloc_(alloc) {} + CounterImpl(RawStatData& data, RawStatDataAllocator& alloc) + : MetricImpl(data.name_), data_(data), alloc_(alloc) {} ~CounterImpl() { alloc_.free(data_); } // Stats::Counter @@ -90,10 +105,9 @@ class CounterImpl : public Counter { void inc() override { add(1); } uint64_t latch() override { return data_.pending_increment_.exchange(0); } - std::string name() override { return data_.name_; } void reset() override { data_.value_ = 0; } - bool used() override { return data_.flags_ & RawStatData::Flags::Used; } - uint64_t value() override { return data_.value_; } + bool used() const override { return data_.flags_ & RawStatData::Flags::Used; } + uint64_t value() const override { return data_.value_; } private: RawStatData& data_; @@ -103,9 +117,10 @@ class CounterImpl : public Counter { /** * Gauge implementation that wraps a RawStatData. */ -class GaugeImpl : public Gauge { +class GaugeImpl : public Gauge, public MetricImpl { public: - GaugeImpl(RawStatData& data, RawStatDataAllocator& alloc) : data_(data), alloc_(alloc) {} + GaugeImpl(RawStatData& data, RawStatDataAllocator& alloc) + : MetricImpl(data.name_), data_(data), alloc_(alloc) {} ~GaugeImpl() { alloc_.free(data_); } // Stats::Gauge @@ -115,7 +130,6 @@ class GaugeImpl : public Gauge { } virtual void dec() override { sub(1); } virtual void inc() override { add(1); } - virtual std::string name() override { return data_.name_; } virtual void set(uint64_t value) override { data_.value_ = value; data_.flags_ |= RawStatData::Flags::Used; @@ -125,8 +139,8 @@ class GaugeImpl : public Gauge { ASSERT(used()); data_.value_ -= amount; } - bool used() override { return data_.flags_ & RawStatData::Flags::Used; } - virtual uint64_t value() override { return data_.value_; } + virtual uint64_t value() const override { return data_.value_; } + bool used() const override { return data_.flags_ & RawStatData::Flags::Used; } private: RawStatData& data_; @@ -134,34 +148,15 @@ class GaugeImpl : public Gauge { }; /** - * Timer implementation for the heap. + * Histogram implementation for the heap. */ -class TimerImpl : public Timer { +class HistogramImpl : public Histogram, public MetricImpl { public: - TimerImpl(const std::string& name, Store& parent) : name_(name), parent_(parent) {} + HistogramImpl(const std::string& name, Store& parent) : MetricImpl(name), parent_(parent) {} - // Stats::Timer - TimespanPtr allocateSpan() override { return TimespanPtr{new TimespanImpl(*this)}; } - std::string name() override { return name_; } + // Stats::Histogram + void recordValue(uint64_t value) override { parent_.deliverHistogramToSinks(*this, value); } -private: - /** - * Timespan implementation for the heap. - */ - class TimespanImpl : public Timespan { - public: - TimespanImpl(TimerImpl& parent) : parent_(parent), start_(std::chrono::steady_clock::now()) {} - - // Stats::Timespan - void complete() override { complete(parent_.name_); } - void complete(const std::string& dynamic_name) override; - - private: - TimerImpl& parent_; - MonotonicTime start_; - }; - - std::string name_; Store& parent_; }; @@ -222,18 +217,21 @@ class IsolatedStoreImpl : public Store { gauges_([this](const std::string& name) -> GaugeImpl* { return new GaugeImpl(*alloc_.alloc(name), alloc_); }), - timers_( - [this](const std::string& name) -> TimerImpl* { return new TimerImpl(name, *this); }) {} + histograms_([this](const std::string& name) -> HistogramImpl* { + return new HistogramImpl(name, *this); + }) {} // Stats::Scope Counter& counter(const std::string& name) override { return counters_.get(name); } ScopePtr createScope(const std::string& name) override { return ScopePtr{new ScopeImpl(*this, name)}; } - void deliverHistogramToSinks(const std::string&, uint64_t) override {} - void deliverTimingToSinks(const std::string&, std::chrono::milliseconds) override {} + void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Gauge& gauge(const std::string& name) override { return gauges_.get(name); } - Timer& timer(const std::string& name) override { return timers_.get(name); } + Histogram& histogram(const std::string& name) override { + Histogram& histogram = histograms_.get(name); + return histogram; + } // Stats::Store std::list counters() const override { return counters_.toList(); } @@ -248,11 +246,12 @@ class IsolatedStoreImpl : public Store { ScopePtr createScope(const std::string& name) override { return ScopePtr{new ScopeImpl(parent_, prefix_ + name)}; } - void deliverHistogramToSinks(const std::string&, uint64_t) override {} - void deliverTimingToSinks(const std::string&, std::chrono::milliseconds) override {} + void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Counter& counter(const std::string& name) override { return parent_.counter(prefix_ + name); } Gauge& gauge(const std::string& name) override { return parent_.gauge(prefix_ + name); } - Timer& timer(const std::string& name) override { return parent_.timer(prefix_ + name); } + Histogram& histogram(const std::string& name) override { + return parent_.histogram(prefix_ + name); + } IsolatedStoreImpl& parent_; const std::string prefix_; @@ -261,7 +260,7 @@ class IsolatedStoreImpl : public Store { HeapRawStatDataAllocator alloc_; IsolatedStatsCache counters_; IsolatedStatsCache gauges_; - IsolatedStatsCache timers_; + IsolatedStatsCache histograms_; }; } // namespace Stats diff --git a/source/common/stats/statsd.cc b/source/common/stats/statsd.cc index 1f672fff07da8..b1586cfcf6f08 100644 --- a/source/common/stats/statsd.cc +++ b/source/common/stats/statsd.cc @@ -60,16 +60,17 @@ UdpStatsdSink::UdpStatsdSink(ThreadLocal::SlotAllocator& tls, }); } -void UdpStatsdSink::flushCounter(const std::string& name, uint64_t delta) { - tls_->getTyped().writeCounter(name, delta); +void UdpStatsdSink::flushCounter(const Counter& counter, uint64_t delta) { + tls_->getTyped().writeCounter(counter.name(), delta); } -void UdpStatsdSink::flushGauge(const std::string& name, uint64_t value) { - tls_->getTyped().writeGauge(name, value); +void UdpStatsdSink::flushGauge(const Gauge& gauge, uint64_t value) { + tls_->getTyped().writeGauge(gauge.name(), value); } -void UdpStatsdSink::onTimespanComplete(const std::string& name, std::chrono::milliseconds ms) { - tls_->getTyped().writeTimer(name, ms); +void UdpStatsdSink::onHistogramComplete(const Histogram& histogram, uint64_t value) { + // For statsd histograms are all timers. + tls_->getTyped().writeTimer(histogram.name(), std::chrono::milliseconds(value)); } char TcpStatsdSink::STAT_PREFIX[] = "envoy."; diff --git a/source/common/stats/statsd.h b/source/common/stats/statsd.h index 8ca0689da6dd5..9df9cf8d3bf0c 100644 --- a/source/common/stats/statsd.h +++ b/source/common/stats/statsd.h @@ -46,14 +46,11 @@ class UdpStatsdSink : public Sink { // Stats::Sink void beginFlush() override {} - void flushCounter(const std::string& name, uint64_t delta) override; - void flushGauge(const std::string& name, uint64_t value) override; + void flushCounter(const Counter& counter, uint64_t delta) override; + void flushGauge(const Gauge& gauge, uint64_t value) override; void endFlush() override {} - void onHistogramComplete(const std::string& name, uint64_t value) override { - // For statsd histograms are just timers. - onTimespanComplete(name, std::chrono::milliseconds(value)); - } - void onTimespanComplete(const std::string& name, std::chrono::milliseconds ms) override; + void onHistogramComplete(const Histogram& histogram, uint64_t value) override; + // Called in unit test to validate writer construction and address. int getFdForTests() { return tls_->getTyped().getFdForTests(); } @@ -74,23 +71,20 @@ class TcpStatsdSink : public Sink { // Stats::Sink void beginFlush() override { tls_->getTyped().beginFlush(true); } - void flushCounter(const std::string& name, uint64_t delta) override { - tls_->getTyped().flushCounter(name, delta); + void flushCounter(const Counter& counter, uint64_t delta) override { + tls_->getTyped().flushCounter(counter.name(), delta); } - void flushGauge(const std::string& name, uint64_t value) override { - tls_->getTyped().flushGauge(name, value); + void flushGauge(const Gauge& gauge, uint64_t value) override { + tls_->getTyped().flushGauge(gauge.name(), value); } void endFlush() override { tls_->getTyped().endFlush(true); } - void onHistogramComplete(const std::string& name, uint64_t value) override { - // For statsd histograms are just timers. - onTimespanComplete(name, std::chrono::milliseconds(value)); - } - - void onTimespanComplete(const std::string& name, std::chrono::milliseconds ms) override { - tls_->getTyped().onTimespanComplete(name, ms); + void onHistogramComplete(const Histogram& histogram, uint64_t value) override { + // For statsd histograms are all timers. + tls_->getTyped().onTimespanComplete(histogram.name(), + std::chrono::milliseconds(value)); } private: diff --git a/source/common/stats/thread_local_store.cc b/source/common/stats/thread_local_store.cc index 679f3a64b77ee..3c2d804fdb914 100644 --- a/source/common/stats/thread_local_store.cc +++ b/source/common/stats/thread_local_store.cc @@ -146,7 +146,7 @@ Counter& ThreadLocalStoreImpl::ScopeImpl::counter(const std::string& name) { return *central_ref; } -void ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const std::string& name, +void ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const Histogram& histogram, uint64_t value) { // Thread local deliveries must be blocked outright for histograms and timers during shutdown. // This is because the sinks may end up trying to create new connections via the thread local @@ -157,22 +157,8 @@ void ThreadLocalStoreImpl::ScopeImpl::deliverHistogramToSinks(const std::string& return; } - const std::string final_name = prefix_ + name; for (Sink& sink : parent_.timer_sinks_) { - sink.onHistogramComplete(final_name, value); - } -} - -void ThreadLocalStoreImpl::ScopeImpl::deliverTimingToSinks(const std::string& name, - std::chrono::milliseconds ms) { - // See comment in deliverHistogramToSinks() for why we guard this. - if (parent_.shutting_down_) { - return; - } - - const std::string final_name = prefix_ + name; - for (Sink& sink : parent_.timer_sinks_) { - sink.onTimespanComplete(final_name, ms); + sink.onHistogramComplete(histogram, value); } } @@ -203,13 +189,13 @@ Gauge& ThreadLocalStoreImpl::ScopeImpl::gauge(const std::string& name) { return *central_ref; } -Timer& ThreadLocalStoreImpl::ScopeImpl::timer(const std::string& name) { +Histogram& ThreadLocalStoreImpl::ScopeImpl::histogram(const std::string& name) { // See comments in counter(). There is no super clean way (via templates or otherwise) to // share this code so I'm leaving it largely duplicated for now. std::string final_name = prefix_ + name; - TimerSharedPtr* tls_ref = nullptr; + HistogramSharedPtr* tls_ref = nullptr; if (!parent_.shutting_down_ && parent_.tls_) { - tls_ref = &parent_.tls_->getTyped().scope_cache_[this].timers_[final_name]; + tls_ref = &parent_.tls_->getTyped().scope_cache_[this].histograms_[final_name]; } if (tls_ref && *tls_ref) { @@ -217,9 +203,9 @@ Timer& ThreadLocalStoreImpl::ScopeImpl::timer(const std::string& name) { } std::unique_lock lock(parent_.lock_); - TimerSharedPtr& central_ref = central_cache_.timers_[final_name]; + HistogramSharedPtr& central_ref = central_cache_.histograms_[final_name]; if (!central_ref) { - central_ref.reset(new TimerImpl(final_name, parent_)); + central_ref.reset(new HistogramImpl(final_name, parent_)); } if (tls_ref) { diff --git a/source/common/stats/thread_local_store.h b/source/common/stats/thread_local_store.h index 90b9eddec01f0..5b1647c40adeb 100644 --- a/source/common/stats/thread_local_store.h +++ b/source/common/stats/thread_local_store.h @@ -53,14 +53,13 @@ class ThreadLocalStoreImpl : public StoreRoot { // Stats::Scope Counter& counter(const std::string& name) override { return default_scope_->counter(name); } ScopePtr createScope(const std::string& name) override; - void deliverHistogramToSinks(const std::string& name, uint64_t value) override { - return default_scope_->deliverHistogramToSinks(name, value); - } - void deliverTimingToSinks(const std::string& name, std::chrono::milliseconds ms) override { - return default_scope_->deliverTimingToSinks(name, ms); + void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { + return default_scope_->deliverHistogramToSinks(histogram, value); } Gauge& gauge(const std::string& name) override { return default_scope_->gauge(name); } - Timer& timer(const std::string& name) override { return default_scope_->timer(name); } + Histogram& histogram(const std::string& name) override { + return default_scope_->histogram(name); + }; // Stats::Store std::list counters() const override; @@ -76,7 +75,7 @@ class ThreadLocalStoreImpl : public StoreRoot { struct TlsCacheEntry { std::unordered_map counters_; std::unordered_map gauges_; - std::unordered_map timers_; + std::unordered_map histograms_; }; struct ScopeImpl : public Scope { @@ -89,10 +88,9 @@ class ThreadLocalStoreImpl : public StoreRoot { ScopePtr createScope(const std::string& name) override { return parent_.createScope(prefix_ + name); } - void deliverHistogramToSinks(const std::string& name, uint64_t value) override; - void deliverTimingToSinks(const std::string& name, std::chrono::milliseconds ms) override; + void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override; Gauge& gauge(const std::string& name) override; - Timer& timer(const std::string& name) override; + Histogram& histogram(const std::string& name) override; ThreadLocalStoreImpl& parent_; const std::string prefix_; diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 69ebf25519cf0..9f30bf021d1f9 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -67,7 +67,7 @@ HostImpl::createConnection(Event::Dispatcher& dispatcher, const ClusterInfo& clu void HostImpl::weight(uint32_t new_weight) { weight_ = std::max(1U, std::min(100U, new_weight)); } ClusterStats ClusterInfoImpl::generateStats(Stats::Scope& scope) { - return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_TIMER(scope))}; + return {ALL_CLUSTER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; } ClusterInfoImpl::ClusterInfoImpl(const envoy::api::v2::Cluster& config, diff --git a/source/server/BUILD b/source/server/BUILD index 64c26f5c06cce..d998c2d01df98 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -61,6 +61,7 @@ envoy_cc_library( "//include/envoy/network:filter_interface", "//include/envoy/network:listen_socket_interface", "//include/envoy/network:listener_interface", + "//include/envoy/stats:timespan", "//source/common/common:linked_object", "//source/common/common:non_copyable", ], diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 12ff693c60759..344a666e01ae7 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -3,6 +3,7 @@ #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "envoy/network/filter.h" +#include "envoy/stats/timespan.h" namespace Envoy { namespace Server { @@ -147,7 +148,7 @@ void ConnectionHandlerImpl::ActiveListener::onNewConnection( ConnectionHandlerImpl::ActiveConnection::ActiveConnection(ActiveListener& listener, Network::ConnectionPtr&& new_connection) : listener_(listener), connection_(std::move(new_connection)), - conn_length_(listener_.stats_.downstream_cx_length_ms_.allocateSpan()) { + conn_length_(new Stats::Timespan(listener_.stats_.downstream_cx_length_ms_)) { // We just universally set no delay on connections. Theoretically we might at some point want // to make this configurable. connection_->noDelay(true); @@ -163,7 +164,7 @@ ConnectionHandlerImpl::ActiveConnection::~ActiveConnection() { } ListenerStats ConnectionHandlerImpl::generateStats(Stats::Scope& scope) { - return {ALL_LISTENER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_TIMER(scope))}; + return {ALL_LISTENER_STATS(POOL_COUNTER(scope), POOL_GAUGE(scope), POOL_HISTOGRAM(scope))}; } } // namespace Server diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index da92d7886272e..55c740ee05ef4 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -12,6 +12,7 @@ #include "envoy/network/filter.h" #include "envoy/network/listen_socket.h" #include "envoy/network/listener.h" +#include "envoy/stats/timespan.h" #include "common/common/linked_object.h" #include "common/common/non_copyable.h" @@ -22,18 +23,18 @@ namespace Envoy { namespace Server { // clang-format off -#define ALL_LISTENER_STATS(COUNTER, GAUGE, TIMER) \ - COUNTER(downstream_cx_total) \ - COUNTER(downstream_cx_destroy) \ - GAUGE (downstream_cx_active) \ - TIMER (downstream_cx_length_ms) +#define ALL_LISTENER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER (downstream_cx_total) \ + COUNTER (downstream_cx_destroy) \ + GAUGE (downstream_cx_active) \ + HISTOGRAM(downstream_cx_length_ms) // clang-format on /** * Wrapper struct for listener stats. @see stats_macros.h */ struct ListenerStats { - ALL_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_TIMER_STRUCT) + ALL_LISTENER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) }; /** diff --git a/source/server/server.cc b/source/server/server.cc index 0d6ebf4936ea7..a1dec267c0a92 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -115,7 +115,7 @@ void InstanceUtil::flushCountersAndGaugesToSinks(const std::list uint64_t delta = counter->latch(); if (counter->used()) { for (const auto& sink : sinks) { - sink->flushCounter(counter->name(), delta); + sink->flushCounter(*counter, delta); } } } @@ -123,7 +123,7 @@ void InstanceUtil::flushCountersAndGaugesToSinks(const std::list for (const Stats::GaugeSharedPtr& gauge : store.gauges()) { if (gauge->used()) { for (const auto& sink : sinks) { - sink->flushGauge(gauge->name(), gauge->value()); + sink->flushGauge(*gauge, gauge->value()); } } } diff --git a/test/common/dynamo/dynamo_filter_test.cc b/test/common/dynamo/dynamo_filter_test.cc index 5e949fc87d48d..0a7fba2a33cb8 100644 --- a/test/common/dynamo/dynamo_filter_test.cc +++ b/test/common/dynamo/dynamo_filter_test.cc @@ -15,6 +15,7 @@ #include "gtest/gtest.h" using testing::NiceMock; +using testing::Property; using testing::Return; using testing::ReturnRef; using testing::_; @@ -59,11 +60,21 @@ TEST_F(DynamoFilterTest, operatorPresent) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.Get.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.Get.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.Get.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.Get.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, deliverTimingToSinks("prefix.dynamodb.operation.Get.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.Get.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.Get.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.Get.upstream_rq_time")); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.Get.upstream_rq_time_2xx"), _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.Get.upstream_rq_time_200"), _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.Get.upstream_rq_time"), _)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); } @@ -149,21 +160,42 @@ TEST_F(DynamoFilterTest, HandleErrorTypeTablePresent) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total_4xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total_400")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_4xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_400", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time", _)); + + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_4xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_400")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_4xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_400"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.GetItem.upstream_rq_time"), _)); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_4xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_400")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_4xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_400", _)); - EXPECT_CALL(stats_, deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time", _)); + + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_4xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_400")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_4xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_400"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.table.locations.upstream_rq_time"), _)); EXPECT_EQ(Http::FilterDataStatus::Continue, filter_->encodeData(error_data, true)); } @@ -197,12 +229,21 @@ TEST_F(DynamoFilterTest, BatchMultipleTables) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); } @@ -236,12 +277,21 @@ TEST_F(DynamoFilterTest, BatchMultipleTablesUnprocessedKeys) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); @@ -292,12 +342,21 @@ TEST_F(DynamoFilterTest, BatchMultipleTablesNoUnprocessedKeys) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); @@ -344,12 +403,21 @@ TEST_F(DynamoFilterTest, BatchMultipleTablesInvalidResponseBody) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, false)); @@ -390,22 +458,41 @@ TEST_F(DynamoFilterTest, bothOperationAndTableCorrect) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.GetItem.upstream_rq_time"), _)); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_200"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.table.locations.upstream_rq_time"), _)); Http::TestHeaderMapImpl response_headers{{":status", "200"}}; EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter_->encodeHeaders(response_headers, true)); @@ -415,7 +502,7 @@ TEST_F(DynamoFilterTest, operatorPresentRuntimeDisabled) { setup(false); EXPECT_CALL(stats_, counter(_)).Times(0); - EXPECT_CALL(stats_, deliverTimingToSinks(_, _)).Times(0); + EXPECT_CALL(stats_, deliverHistogramToSinks(_, _)).Times(0); Http::TestHeaderMapImpl request_headers{{"x-amz-target", "version.operator"}, {"random", "random"}}; @@ -445,22 +532,41 @@ TEST_F(DynamoFilterTest, PartitionIdStats) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.GetItem.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.GetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.GetItem.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.GetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.operation.GetItem.upstream_rq_time"), _)); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_200"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.table.locations.upstream_rq_time"), _)); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.capacity.GetItem.__partition_id=ition_1")) @@ -517,12 +623,21 @@ TEST_F(DynamoFilterTest, NoPartitionIdStatsForMultipleTables) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_CALL( stats_, @@ -580,22 +695,41 @@ TEST_F(DynamoFilterTest, PartitionIdStatsForSingleTableBatchOperation) { EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.operation.BatchGetItem.upstream_rq_total_200")); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, deliverTimingToSinks( - "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time_200"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.operation.BatchGetItem.upstream_rq_time"), + _)); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_2xx")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total_200")); EXPECT_CALL(stats_, counter("prefix.dynamodb.table.locations.upstream_rq_total")); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_2xx", _)); - EXPECT_CALL(stats_, - deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time_200", _)); - EXPECT_CALL(stats_, deliverTimingToSinks("prefix.dynamodb.table.locations.upstream_rq_time", _)); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_2xx")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time_200")); + EXPECT_CALL(stats_, histogram("prefix.dynamodb.table.locations.upstream_rq_time")); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_2xx"), + _)); + EXPECT_CALL(stats_, deliverHistogramToSinks( + Property(&Stats::Metric::name, + "prefix.dynamodb.table.locations.upstream_rq_time_200"), + _)); + EXPECT_CALL( + stats_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.dynamodb.table.locations.upstream_rq_time"), _)); EXPECT_CALL( stats_, diff --git a/test/common/http/codes_test.cc b/test/common/http/codes_test.cc index 954c31d2e7db1..2e188799b845b 100644 --- a/test/common/http/codes_test.cc +++ b/test/common/http/codes_test.cc @@ -15,6 +15,7 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::Property; using testing::_; namespace Envoy { @@ -181,17 +182,31 @@ TEST(CodeUtilityResponseTimingTest, All) { true, true, "vhost_name", "req_vcluster_name", "from_az", "to_az"}; + EXPECT_CALL(cluster_scope, histogram("prefix.upstream_rq_time")); + EXPECT_CALL(cluster_scope, deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.upstream_rq_time"), 5)); + + EXPECT_CALL(cluster_scope, histogram("prefix.canary.upstream_rq_time")); + EXPECT_CALL( + cluster_scope, + deliverHistogramToSinks(Property(&Stats::Metric::name, "prefix.canary.upstream_rq_time"), 5)); + + EXPECT_CALL(cluster_scope, histogram("prefix.internal.upstream_rq_time")); EXPECT_CALL(cluster_scope, - deliverTimingToSinks("prefix.upstream_rq_time", std::chrono::milliseconds(5))); - EXPECT_CALL(cluster_scope, - deliverTimingToSinks("prefix.canary.upstream_rq_time", std::chrono::milliseconds(5))); - EXPECT_CALL(cluster_scope, deliverTimingToSinks("prefix.internal.upstream_rq_time", - std::chrono::milliseconds(5))); + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.internal.upstream_rq_time"), 5)); EXPECT_CALL(global_store, - deliverTimingToSinks("vhost.vhost_name.vcluster.req_vcluster_name.upstream_rq_time", - std::chrono::milliseconds(5))); - EXPECT_CALL(cluster_scope, deliverTimingToSinks("prefix.zone.from_az.to_az.upstream_rq_time", - std::chrono::milliseconds(5))); + histogram("vhost.vhost_name.vcluster.req_vcluster_name.upstream_rq_time")); + EXPECT_CALL(global_store, + deliverHistogramToSinks( + Property(&Stats::Metric::name, + "vhost.vhost_name.vcluster.req_vcluster_name.upstream_rq_time"), + 5)); + + EXPECT_CALL(cluster_scope, histogram("prefix.zone.from_az.to_az.upstream_rq_time")); + EXPECT_CALL(cluster_scope, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "prefix.zone.from_az.to_az.upstream_rq_time"), 5)); CodeUtility::chargeResponseTiming(info); } diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index a1f65ce430a74..b7e2048e1c438 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -71,7 +71,7 @@ class HttpConnectionManagerImplTest : public Test, public ConnectionManagerConfi log_manager_)}}, codec_(new NiceMock()), stats_{{ALL_HTTP_CONN_MAN_STATS(POOL_COUNTER(fake_stats_), POOL_GAUGE(fake_stats_), - POOL_TIMER(fake_stats_))}, + POOL_HISTOGRAM(fake_stats_))}, "", fake_stats_}, tracing_stats_{CONN_MAN_TRACING_STATS(POOL_COUNTER(fake_stats_))}, diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index 84c95ad44eb64..adfdb9b33ab7a 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -25,6 +25,7 @@ using testing::DoAll; using testing::InSequence; using testing::Invoke; using testing::NiceMock; +using testing::Property; using testing::Return; using testing::ReturnRef; using testing::SaveArg; @@ -185,8 +186,11 @@ struct ActiveTestRequest { * Test all timing stats are set. */ TEST_F(Http1ConnPoolImplTest, VerifyTimingStats) { - EXPECT_CALL(cluster_->stats_store_, deliverTimingToSinks("upstream_cx_connect_ms", _)); - EXPECT_CALL(cluster_->stats_store_, deliverTimingToSinks("upstream_cx_length_ms", _)); + + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); r1.startRequest(); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 73f48add07a25..1737089d27323 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -22,6 +22,7 @@ using testing::DoAll; using testing::InSequence; using testing::Invoke; using testing::NiceMock; +using testing::Property; using testing::Return; using testing::ReturnRef; using testing::SaveArg; @@ -120,8 +121,10 @@ class ActiveTestRequest { TEST_F(Http2ConnPoolImplTest, VerifyConnectionTimingStats) { expectClientCreate(); - EXPECT_CALL(cluster_->stats_store_, deliverTimingToSinks("upstream_cx_connect_ms", _)); - EXPECT_CALL(cluster_->stats_store_, deliverTimingToSinks("upstream_cx_length_ms", _)); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_connect_ms"), _)); + EXPECT_CALL(cluster_->stats_store_, + deliverHistogramToSinks(Property(&Stats::Metric::name, "upstream_cx_length_ms"), _)); ActiveTestRequest r1(*this, 0); EXPECT_CALL(r1.inner_encoder_, encodeHeaders(_, true)); diff --git a/test/common/http/user_agent_test.cc b/test/common/http/user_agent_test.cc index 2403724f0fc8e..a479a7cde225f 100644 --- a/test/common/http/user_agent_test.cc +++ b/test/common/http/user_agent_test.cc @@ -7,18 +7,27 @@ #include "gtest/gtest.h" +using testing::NiceMock; +using testing::Property; +using testing::_; + namespace Envoy { namespace Http { TEST(UserAgentTest, All) { Stats::MockStore stat_store; - Stats::MockTimespan span; + NiceMock original_histogram; + Stats::Timespan span(original_histogram); EXPECT_CALL(stat_store.counter_, inc()).Times(5); EXPECT_CALL(stat_store, counter("test.user_agent.ios.downstream_cx_total")); EXPECT_CALL(stat_store, counter("test.user_agent.ios.downstream_rq_total")); EXPECT_CALL(stat_store, counter("test.user_agent.ios.downstream_cx_destroy_remote_active_rq")); - EXPECT_CALL(span, complete("test.user_agent.ios.downstream_cx_length_ms")); + EXPECT_CALL(stat_store, histogram("test.user_agent.ios.downstream_cx_length_ms")); + EXPECT_CALL( + stat_store, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.user_agent.ios.downstream_cx_length_ms"), _)); { UserAgent ua; @@ -32,7 +41,11 @@ TEST(UserAgentTest, All) { EXPECT_CALL(stat_store, counter("test.user_agent.android.downstream_rq_total")); EXPECT_CALL(stat_store, counter("test.user_agent.android.downstream_cx_destroy_remote_active_rq")); - EXPECT_CALL(span, complete("test.user_agent.android.downstream_cx_length_ms")); + EXPECT_CALL(stat_store, histogram("test.user_agent.android.downstream_cx_length_ms")); + EXPECT_CALL( + stat_store, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.user_agent.android.downstream_cx_length_ms"), _)); { UserAgent ua; diff --git a/test/common/mongo/proxy_test.cc b/test/common/mongo/proxy_test.cc index 40bcc826ef3aa..b6569a9fa4790 100644 --- a/test/common/mongo/proxy_test.cc +++ b/test/common/mongo/proxy_test.cc @@ -22,6 +22,7 @@ using testing::AnyNumber; using testing::AtLeast; using testing::Invoke; using testing::NiceMock; +using testing::Property; using testing::Return; using testing::_; @@ -35,8 +36,7 @@ class MockDecoder : public Decoder { class TestStatStore : public Stats::IsolatedStoreImpl { public: - MOCK_METHOD2(deliverHistogramToSinks, void(const std::string& name, uint64_t value)); - MOCK_METHOD2(deliverTimingToSinks, void(const std::string& name, std::chrono::milliseconds ms)); + MOCK_METHOD2(deliverHistogramToSinks, void(const Stats::Histogram& histogram, uint64_t value)); }; class TestProxyFilter : public ProxyFilter { @@ -198,9 +198,15 @@ TEST_F(MongoProxyFilterTest, Stats) { })); filter_->onData(fake_data_); - EXPECT_CALL(store_, deliverHistogramToSinks("test.collection.test.query.reply_num_docs", 1)); - EXPECT_CALL(store_, deliverHistogramToSinks("test.collection.test.query.reply_size", 22)); - EXPECT_CALL(store_, deliverTimingToSinks("test.collection.test.query.reply_time_ms", _)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_num_docs"), 1)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_size"), 22)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_time_ms"), _)); EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { ReplyMessagePtr message(new ReplyMessageImpl(0, 0)); @@ -270,9 +276,12 @@ TEST_F(MongoProxyFilterTest, CommandStats) { })); filter_->onData(fake_data_); - EXPECT_CALL(store_, deliverHistogramToSinks("test.cmd.foo.reply_num_docs", 1)); - EXPECT_CALL(store_, deliverHistogramToSinks("test.cmd.foo.reply_size", 22)); - EXPECT_CALL(store_, deliverTimingToSinks("test.cmd.foo.reply_time_ms", _)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.cmd.foo.reply_num_docs"), 1)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.cmd.foo.reply_size"), 22)); + EXPECT_CALL(store_, deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.cmd.foo.reply_time_ms"), _)); EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { ReplyMessagePtr message(new ReplyMessageImpl(0, 0)); @@ -312,15 +321,29 @@ TEST_F(MongoProxyFilterTest, CallingFunctionStats) { EXPECT_EQ(1U, store_.counter("test.collection.test.callsite.getByMongoId.query.scatter_get").value()); - EXPECT_CALL(store_, deliverHistogramToSinks("test.collection.test.query.reply_num_docs", 1)); - EXPECT_CALL(store_, deliverHistogramToSinks("test.collection.test.query.reply_size", 22)); - EXPECT_CALL(store_, deliverTimingToSinks("test.collection.test.query.reply_time_ms", _)); - EXPECT_CALL(store_, deliverHistogramToSinks( - "test.collection.test.callsite.getByMongoId.query.reply_num_docs", 1)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_num_docs"), 1)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_size"), 22)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, "test.collection.test.query.reply_time_ms"), _)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, + "test.collection.test.callsite.getByMongoId.query.reply_num_docs"), + 1)); EXPECT_CALL(store_, deliverHistogramToSinks( - "test.collection.test.callsite.getByMongoId.query.reply_size", 22)); - EXPECT_CALL(store_, deliverTimingToSinks( - "test.collection.test.callsite.getByMongoId.query.reply_time_ms", _)); + Property(&Stats::Metric::name, + "test.collection.test.callsite.getByMongoId.query.reply_size"), + 22)); + EXPECT_CALL(store_, + deliverHistogramToSinks( + Property(&Stats::Metric::name, + "test.collection.test.callsite.getByMongoId.query.reply_time_ms"), + _)); EXPECT_CALL(*filter_->decoder_, onData(_)).WillOnce(Invoke([&](Buffer::Instance&) -> void { ReplyMessagePtr message(new ReplyMessageImpl(0, 0)); diff --git a/test/common/stats/BUILD b/test/common/stats/BUILD index b4fb2ec9e60b3..7b937fa8f4b48 100644 --- a/test/common/stats/BUILD +++ b/test/common/stats/BUILD @@ -11,7 +11,10 @@ envoy_package() envoy_cc_test( name = "stats_impl_test", srcs = ["stats_impl_test.cc"], - deps = ["//source/common/stats:stats_lib"], + deps = [ + "//source/common/stats:stats_lib", + "//test/test_common:utility_lib", + ], ) envoy_cc_test( @@ -39,6 +42,7 @@ envoy_cc_test( "//source/common/network:address_lib", "//source/common/network:utility_lib", "//source/common/stats:statsd_lib", + "//test/mocks/stats:stats_mocks", "//test/mocks/thread_local:thread_local_mocks", "//test/test_common:environment_lib", "//test/test_common:network_utility_lib", diff --git a/test/common/stats/stats_impl_test.cc b/test/common/stats/stats_impl_test.cc index a7d5447fa9903..6ab0c41bc4199 100644 --- a/test/common/stats/stats_impl_test.cc +++ b/test/common/stats/stats_impl_test.cc @@ -1,7 +1,11 @@ #include +#include "envoy/stats/stats_macros.h" + #include "common/stats/stats_impl.h" +#include "test/test_common/utility.h" + #include "gtest/gtest.h" namespace Envoy { @@ -21,15 +25,12 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); - Timer& t1 = store.timer("t1"); - Timer& t2 = scope1->timer("t2"); - EXPECT_EQ("t1", t1.name()); - EXPECT_EQ("scope1.t2", t2.name()); - - store.deliverHistogramToSinks("h", 100); - store.deliverTimingToSinks("t", std::chrono::milliseconds(200)); - scope1->deliverHistogramToSinks("h", 100); - scope1->deliverTimingToSinks("t", std::chrono::milliseconds(200)); + Histogram& h1 = store.histogram("h1"); + Histogram& h2 = scope1->histogram("h2"); + EXPECT_EQ("h1", h1.name()); + EXPECT_EQ("scope1.h2", h2.name()); + h1.recordValue(200); + h2.recordValue(200); ScopePtr scope2 = scope1->createScope("foo."); EXPECT_EQ("scope1.foo.bar", scope2->counter("bar").name()); @@ -38,5 +39,35 @@ TEST(StatsIsolatedStoreImplTest, All) { EXPECT_EQ(2UL, store.gauges().size()); } +/** + * Test stats macros. @see stats_macros.h + */ +// clang-format off +#define ALL_TEST_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER (test_counter) \ + GAUGE (test_gauge) \ + HISTOGRAM(test_histogram) +// clang-format on + +struct TestStats { + ALL_TEST_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) +}; + +TEST(StatsMacros, All) { + IsolatedStoreImpl stats_store; + TestStats test_stats{ALL_TEST_STATS(POOL_COUNTER_PREFIX(stats_store, "test."), + POOL_GAUGE_PREFIX(stats_store, "test."), + POOL_HISTOGRAM_PREFIX(stats_store, "test."))}; + + Counter& counter = test_stats.test_counter_; + EXPECT_EQ("test.test_counter", counter.name()); + + Gauge& gauge = test_stats.test_gauge_; + EXPECT_EQ("test.test_gauge", gauge.name()); + + Histogram& histogram = test_stats.test_histogram_; + EXPECT_EQ("test.test_histogram", histogram.name()); +} + } // namespace Stats } // namespace Envoy diff --git a/test/common/stats/statsd_test.cc b/test/common/stats/statsd_test.cc index 90f2450c6985b..e3f8416228aa6 100644 --- a/test/common/stats/statsd_test.cc +++ b/test/common/stats/statsd_test.cc @@ -9,6 +9,7 @@ #include "test/mocks/buffer/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/mocks/network/mocks.h" +#include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/mocks/upstream/mocks.h" @@ -64,10 +65,15 @@ TEST_F(TcpStatsdSinkTest, EmptyFlush) { TEST_F(TcpStatsdSinkTest, BasicFlow) { InSequence s; + NiceMock counter; + counter.name_ = "test_counter"; + + NiceMock gauge; + gauge.name_ = "test_gauge"; sink_->beginFlush(); - sink_->flushCounter("test_counter", 1); - sink_->flushGauge("test_gauge", 2); + sink_->flushCounter(counter, 1); + sink_->flushGauge(gauge, 2); expectCreateConnection(); EXPECT_CALL(*connection_, @@ -78,11 +84,11 @@ TEST_F(TcpStatsdSinkTest, BasicFlow) { connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); expectCreateConnection(); - EXPECT_CALL(*connection_, write(BufferStringEqual("envoy.test_timer:5|ms\n"))); - sink_->onTimespanComplete("test_timer", std::chrono::milliseconds(5)); - EXPECT_CALL(*connection_, write(BufferStringEqual("envoy.histogram_test_timer:15|ms\n"))); - sink_->onHistogramComplete("histogram_test_timer", 15); + NiceMock timer; + timer.name_ = "test_timer"; + EXPECT_CALL(*connection_, write(BufferStringEqual("envoy.test_timer:5|ms\n"))); + sink_->onHistogramComplete(timer, 5); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); tls_.shutdownThread(); @@ -91,9 +97,12 @@ TEST_F(TcpStatsdSinkTest, BasicFlow) { TEST_F(TcpStatsdSinkTest, BufferReallocate) { InSequence s; + NiceMock counter; + counter.name_ = "test_counter"; + sink_->beginFlush(); for (int i = 0; i < 2000; i++) { - sink_->flushCounter("test_counter", 1); + sink_->flushCounter(counter, 1); } expectCreateConnection(); @@ -110,18 +119,21 @@ TEST_F(TcpStatsdSinkTest, BufferReallocate) { TEST_F(TcpStatsdSinkTest, Overflow) { InSequence s; + NiceMock counter; + counter.name_ = "test_counter"; + // Synthetically set buffer above high watermark. Make sure we don't write anything. cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set( 1024 * 1024 * 17); sink_->beginFlush(); - sink_->flushCounter("test_counter", 1); + sink_->flushCounter(counter, 1); sink_->endFlush(); // Lower and make sure we write. cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set( 1024 * 1024 * 15); sink_->beginFlush(); - sink_->flushCounter("test_counter", 1); + sink_->flushCounter(counter, 1); expectCreateConnection(); EXPECT_CALL(*connection_, write(BufferStringEqual("envoy.test_counter:1|c\n"))); sink_->endFlush(); @@ -130,7 +142,7 @@ TEST_F(TcpStatsdSinkTest, Overflow) { cluster_manager_.thread_local_cluster_.cluster_.info_->stats().upstream_cx_tx_bytes_buffered_.set( 1024 * 1024 * 17); sink_->beginFlush(); - sink_->flushCounter("test_counter", 1); + sink_->flushCounter(counter, 1); EXPECT_CALL(*connection_, close(Network::ConnectionCloseType::NoFlush)); sink_->endFlush(); diff --git a/test/common/stats/thread_local_store_test.cc b/test/common/stats/thread_local_store_test.cc index 28a473b2cd721..cec45ff0c4745 100644 --- a/test/common/stats/thread_local_store_test.cc +++ b/test/common/stats/thread_local_store_test.cc @@ -16,6 +16,7 @@ using testing::InSequence; using testing::Invoke; using testing::NiceMock; +using testing::Ref; using testing::Return; using testing::_; @@ -98,14 +99,12 @@ TEST_F(StatsThreadLocalStoreTest, NoTls) { Gauge& g1 = store_->gauge("g1"); EXPECT_EQ(&g1, &store_->gauge("g1")); - Timer& t1 = store_->timer("t1"); - EXPECT_EQ(&t1, &store_->timer("t1")); - - EXPECT_CALL(sink_, onHistogramComplete("h", 100)); - store_->deliverHistogramToSinks("h", 100); - - EXPECT_CALL(sink_, onTimespanComplete("t", std::chrono::milliseconds(200))); - store_->deliverTimingToSinks("t", std::chrono::milliseconds(200)); + Histogram& h1 = store_->histogram("h1"); + EXPECT_EQ(&h1, &store_->histogram("h1")); + EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 200)); + h1.recordValue(200); + EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100)); + store_->deliverHistogramToSinks(h1, 100); EXPECT_EQ(2UL, store_->counters().size()); EXPECT_EQ(&c1, store_->counters().front().get()); @@ -132,8 +131,8 @@ TEST_F(StatsThreadLocalStoreTest, Tls) { Gauge& g1 = store_->gauge("g1"); EXPECT_EQ(&g1, &store_->gauge("g1")); - Timer& t1 = store_->timer("t1"); - EXPECT_EQ(&t1, &store_->timer("t1")); + Histogram& h1 = store_->histogram("h1"); + EXPECT_EQ(&h1, &store_->histogram("h1")); EXPECT_EQ(2UL, store_->counters().size()); EXPECT_EQ(&c1, store_->counters().front().get()); @@ -172,20 +171,18 @@ TEST_F(StatsThreadLocalStoreTest, BasicScope) { EXPECT_EQ("g1", g1.name()); EXPECT_EQ("scope1.g2", g2.name()); - Timer& t1 = store_->timer("t1"); - Timer& t2 = scope1->timer("t2"); - EXPECT_EQ("t1", t1.name()); - EXPECT_EQ("scope1.t2", t2.name()); - - EXPECT_CALL(sink_, onHistogramComplete("scope1.h", 100)); - scope1->deliverHistogramToSinks("h", 100); - - EXPECT_CALL(sink_, onTimespanComplete("scope1.t", std::chrono::milliseconds(200))); - scope1->deliverTimingToSinks("t", std::chrono::milliseconds(200)); + Histogram& h1 = store_->histogram("h1"); + Histogram& h2 = scope1->histogram("h2"); + EXPECT_EQ("h1", h1.name()); + EXPECT_EQ("scope1.h2", h2.name()); + EXPECT_CALL(sink_, onHistogramComplete(Ref(h1), 100)); + h1.recordValue(100); + EXPECT_CALL(sink_, onHistogramComplete(Ref(h2), 200)); + h2.recordValue(200); store_->shutdownThreading(); - scope1->deliverHistogramToSinks("h", 100); - scope1->deliverTimingToSinks("t", std::chrono::milliseconds(200)); + scope1->deliverHistogramToSinks(h1, 100); + scope1->deliverHistogramToSinks(h2, 200); tls_.shutdownThread(); // Includes overflow stat. diff --git a/test/common/stats/udp_statsd_test.cc b/test/common/stats/udp_statsd_test.cc index db13ee24cd0bf..1741c4becbd9e 100644 --- a/test/common/stats/udp_statsd_test.cc +++ b/test/common/stats/udp_statsd_test.cc @@ -4,6 +4,7 @@ #include "common/network/utility.h" #include "common/stats/statsd.h" +#include "test/mocks/stats/mocks.h" #include "test/mocks/thread_local/mocks.h" #include "test/test_common/environment.h" #include "test/test_common/network_utility.h" @@ -33,10 +34,18 @@ TEST_P(UdpStatsdSinkTest, InitWithIpAddress) { EXPECT_NE(fd, -1); // Check that fd has not changed. - sink.flushCounter("test_counter", 1); - sink.flushGauge("test_gauge", 1); - sink.onHistogramComplete("histogram_test_timer", 5); - sink.onTimespanComplete("test_timer", std::chrono::milliseconds(5)); + NiceMock counter; + counter.name_ = "test_counter"; + sink.flushCounter(counter, 1); + + NiceMock gauge; + counter.name_ = "test_gauge"; + sink.flushGauge(gauge, 1); + + NiceMock timer; + timer.name_ = "test_timer"; + sink.onHistogramComplete(timer, 5); + EXPECT_EQ(fd, sink.getFdForTests()); if (GetParam() == Network::Address::IpVersion::v4) { diff --git a/test/integration/server.h b/test/integration/server.h index d6c521713df71..5c7e414714eb4 100644 --- a/test/integration/server.h +++ b/test/integration/server.h @@ -100,14 +100,9 @@ class TestScopeWrapper : public Scope { return ScopePtr{new TestScopeWrapper(lock_, wrapped_scope_->createScope(name))}; } - void deliverHistogramToSinks(const std::string& name, uint64_t value) override { + void deliverHistogramToSinks(const Histogram& histogram, uint64_t value) override { std::unique_lock lock(lock_); - wrapped_scope_->deliverHistogramToSinks(name, value); - } - - void deliverTimingToSinks(const std::string& name, std::chrono::milliseconds ms) override { - std::unique_lock lock(lock_); - wrapped_scope_->deliverTimingToSinks(name, ms); + wrapped_scope_->deliverHistogramToSinks(histogram, value); } Counter& counter(const std::string& name) override { @@ -120,9 +115,9 @@ class TestScopeWrapper : public Scope { return wrapped_scope_->gauge(name); } - Timer& timer(const std::string& name) override { + Histogram& histogram(const std::string& name) override { std::unique_lock lock(lock_); - return wrapped_scope_->timer(name); + return wrapped_scope_->histogram(name); } private: @@ -145,15 +140,14 @@ class TestIsolatedStoreImpl : public StoreRoot { std::unique_lock lock(lock_); return ScopePtr{new TestScopeWrapper(lock_, store_.createScope(name))}; } - void deliverHistogramToSinks(const std::string&, uint64_t) override {} - void deliverTimingToSinks(const std::string&, std::chrono::milliseconds) override {} + void deliverHistogramToSinks(const Histogram&, uint64_t) override {} Gauge& gauge(const std::string& name) override { std::unique_lock lock(lock_); return store_.gauge(name); } - Timer& timer(const std::string& name) override { + Histogram& histogram(const std::string& name) override { std::unique_lock lock(lock_); - return store_.timer(name); + return store_.histogram(name); } // Stats::Store diff --git a/test/mocks/stats/BUILD b/test/mocks/stats/BUILD index 989f5bf4028e7..b119f793bf24e 100644 --- a/test/mocks/stats/BUILD +++ b/test/mocks/stats/BUILD @@ -14,6 +14,7 @@ envoy_cc_mock( hdrs = ["mocks.h"], deps = [ "//include/envoy/stats:stats_interface", + "//include/envoy/stats:timespan", "//include/envoy/thread_local:thread_local_interface", "//include/envoy/upstream:cluster_manager_interface", "//source/common/stats:stats_lib", diff --git a/test/mocks/stats/mocks.cc b/test/mocks/stats/mocks.cc index 9dd2d44aac643..e842a2f9397a1 100644 --- a/test/mocks/stats/mocks.cc +++ b/test/mocks/stats/mocks.cc @@ -3,24 +3,44 @@ #include "gmock/gmock.h" #include "gtest/gtest.h" +using testing::Invoke; +using testing::NiceMock; +using testing::Return; +using testing::ReturnPointee; +using testing::ReturnRef; using testing::_; namespace Envoy { namespace Stats { -MockCounter::MockCounter() {} +MockCounter::MockCounter() { ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); } MockCounter::~MockCounter() {} -MockGauge::MockGauge() {} +MockGauge::MockGauge() { ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); } MockGauge::~MockGauge() {} -MockTimespan::MockTimespan() {} -MockTimespan::~MockTimespan() {} +MockHistogram::MockHistogram() { + ON_CALL(*this, recordValue(_)).WillByDefault(Invoke([this](uint64_t value) { + if (store_ != nullptr) { + store_->deliverHistogramToSinks(*this, value); + } + })); +} +MockHistogram::~MockHistogram() {} MockSink::MockSink() {} MockSink::~MockSink() {} -MockStore::MockStore() { ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_)); } +MockStore::MockStore() { + ON_CALL(*this, counter(_)).WillByDefault(ReturnRef(counter_)); + ON_CALL(*this, histogram(_)).WillByDefault(Invoke([this](const std::string& name) -> Histogram& { + auto* histogram = new NiceMock; + histogram->name_ = name; + histogram->store_ = this; + histograms_.emplace_back(histogram); + return *histogram; + })); +} MockStore::~MockStore() {} MockIsolatedStatsStore::MockIsolatedStatsStore() {} diff --git a/test/mocks/stats/mocks.h b/test/mocks/stats/mocks.h index 25901feed0c26..ca79b3912a3a9 100644 --- a/test/mocks/stats/mocks.h +++ b/test/mocks/stats/mocks.h @@ -6,6 +6,7 @@ #include #include "envoy/stats/stats.h" +#include "envoy/stats/timespan.h" #include "envoy/thread_local/thread_local.h" #include "envoy/upstream/cluster_manager.h" @@ -24,10 +25,12 @@ class MockCounter : public Counter { MOCK_METHOD1(add, void(uint64_t amount)); MOCK_METHOD0(inc, void()); MOCK_METHOD0(latch, uint64_t()); - MOCK_METHOD0(name, std::string()); + MOCK_CONST_METHOD0(name, const std::string&()); MOCK_METHOD0(reset, void()); - MOCK_METHOD0(used, bool()); - MOCK_METHOD0(value, uint64_t()); + MOCK_CONST_METHOD0(used, bool()); + MOCK_CONST_METHOD0(value, uint64_t()); + + std::string name_; }; class MockGauge : public Gauge { @@ -38,20 +41,27 @@ class MockGauge : public Gauge { MOCK_METHOD1(add, void(uint64_t amount)); MOCK_METHOD0(dec, void()); MOCK_METHOD0(inc, void()); - MOCK_METHOD0(name, std::string()); + MOCK_CONST_METHOD0(name, const std::string&()); MOCK_METHOD1(set, void(uint64_t value)); MOCK_METHOD1(sub, void(uint64_t amount)); - MOCK_METHOD0(used, bool()); - MOCK_METHOD0(value, uint64_t()); + MOCK_CONST_METHOD0(used, bool()); + MOCK_CONST_METHOD0(value, uint64_t()); + + std::string name_; }; -class MockTimespan : public Timespan { +class MockHistogram : public Histogram { public: - MockTimespan(); - ~MockTimespan(); + MockHistogram(); + ~MockHistogram(); + + // Note: cannot be mocked because it is accessed as a Property in a gmock EXPECT_CALL. This + // creates a deadlock in gmock and is an unintended use of mock functions. + const std::string& name() const override { return name_; }; + MOCK_METHOD1(recordValue, void(uint64_t value)); - MOCK_METHOD0(complete, void()); - MOCK_METHOD1(complete, void(const std::string& dynamic_name)); + std::string name_; + Store* store_; }; class MockSink : public Sink { @@ -60,11 +70,10 @@ class MockSink : public Sink { ~MockSink(); MOCK_METHOD0(beginFlush, void()); - MOCK_METHOD2(flushCounter, void(const std::string& name, uint64_t delta)); - MOCK_METHOD2(flushGauge, void(const std::string& name, uint64_t value)); + MOCK_METHOD2(flushCounter, void(const Counter& counter, uint64_t delta)); + MOCK_METHOD2(flushGauge, void(const Gauge& gauge, uint64_t value)); MOCK_METHOD0(endFlush, void()); - MOCK_METHOD2(onHistogramComplete, void(const std::string& name, uint64_t value)); - MOCK_METHOD2(onTimespanComplete, void(const std::string& name, std::chrono::milliseconds ms)); + MOCK_METHOD2(onHistogramComplete, void(const Histogram& histogram, uint64_t value)); }; class MockStore : public Store { @@ -74,28 +83,28 @@ class MockStore : public Store { ScopePtr createScope(const std::string& name) override { return ScopePtr{createScope_(name)}; } - MOCK_METHOD2(deliverHistogramToSinks, void(const std::string& name, uint64_t value)); - MOCK_METHOD2(deliverTimingToSinks, void(const std::string&, std::chrono::milliseconds)); + MOCK_METHOD2(deliverHistogramToSinks, void(const Histogram& histogram, uint64_t value)); MOCK_METHOD1(counter, Counter&(const std::string&)); MOCK_CONST_METHOD0(counters, std::list()); MOCK_METHOD1(createScope_, Scope*(const std::string& name)); MOCK_METHOD1(gauge, Gauge&(const std::string&)); MOCK_CONST_METHOD0(gauges, std::list()); - MOCK_METHOD1(timer, Timer&(const std::string& name)); + MOCK_METHOD1(histogram, Histogram&(const std::string& name)); testing::NiceMock counter_; + std::vector> histograms_; }; /** * With IsolatedStoreImpl it's hard to test timing stats. - * MockIsolatedStatsStore mocks only deliverTimingToSinks for better testing. + * MockIsolatedStatsStore mocks only deliverHistogramToSinks for better testing. */ class MockIsolatedStatsStore : public IsolatedStoreImpl { public: MockIsolatedStatsStore(); ~MockIsolatedStatsStore(); - MOCK_METHOD2(deliverTimingToSinks, void(const std::string&, std::chrono::milliseconds)); + MOCK_METHOD2(deliverHistogramToSinks, void(const Histogram& histogram, uint64_t value)); }; } // namespace Stats diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 7235da0d5e974..81c8e85a7073c 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -12,6 +12,7 @@ #include "gtest/gtest.h" using testing::InSequence; +using testing::Property; using testing::SaveArg; using testing::StrictMock; using testing::_; @@ -27,8 +28,8 @@ TEST(ServerInstanceUtil, flushHelper) { store.gauge("world").set(5); std::unique_ptr sink(new StrictMock()); EXPECT_CALL(*sink, beginFlush()); - EXPECT_CALL(*sink, flushCounter("hello", 1)); - EXPECT_CALL(*sink, flushGauge("world", 5)); + EXPECT_CALL(*sink, flushCounter(Property(&Stats::Metric::name, "hello"), 1)); + EXPECT_CALL(*sink, flushGauge(Property(&Stats::Metric::name, "world"), 5)); EXPECT_CALL(*sink, endFlush()); std::list sinks;