Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE;
// Bandwidth limit :ref:`configuration overview <config_http_filters_bandwidth_limit>`.
// [#extension: envoy.filters.http.bandwidth_limit]

// [#next-free-field: 6]
// [#next-free-field: 8]
message BandwidthLimit {
// Defines the mode for the bandwidth limit filter.
// Values represent bitmask.
Expand Down Expand Up @@ -66,4 +66,19 @@ message BandwidthLimit {
// Runtime flag that controls whether the filter is enabled or not. If not specified, defaults
// to enabled.
config.core.v3.RuntimeFeatureFlag runtime_enabled = 5;

// Enable response trailers.
//
// .. note::
//
// * If set true, the response trailers *bandwidth-request-delay-ms* and *bandwidth-response-delay-ms* will be added, prefixed by *response_trailer_prefix*.
// * bandwidth-request-delay-ms: delay time in milliseconds it took for the request stream transfer.
// * bandwidth-response-delay-ms: delay time in milliseconds it took for the response stream transfer.
// * If :ref:`enable_mode <envoy_v3_api_field_extensions.filters.http.bandwidth_limit.v3.BandwidthLimit.enable_mode>` is DISABLED or REQUEST, the trailers will not be set.
// * If both the request and response delay time is 0, the trailers will not be set.
//
bool enable_response_trailers = 6;

// Optional The prefix for the response trailers.
string response_trailer_prefix = 7;
}
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,13 @@ The HTTP bandwidth limit filter outputs statistics in the ``<stat_prefix>.http_b
:widths: 1, 1, 2

request_enabled, Counter, Total number of request streams for which the bandwidth limiter was consulted
request_enforced, Counter, Total number of request streams for which the bandwidth limiter was enforced
request_pending, GAUGE, Number of request streams which are currently pending transfer in bandwidth limiter
request_incoming_size, GAUGE, Size in bytes of incoming request data to bandwidth limiter
request_allowed_size, GAUGE, Size in bytes of outgoing request data from bandwidth limiter
request_transfer_duration, HISTOGRAM, Total time (including added delay) it took for the request stream transfer
response_enabled, Counter, Total number of response streams for which the bandwidth limiter was consulted
response_enforced, Counter, Total number of response streams for which the bandwidth limiter was enforced
response_pending, GAUGE, Number of response streams which are currently pending transfer in bandwidth limiter
response_incoming_size, GAUGE, Size in bytes of incoming response data to bandwidth limiter
response_allowed_size, GAUGE, Size in bytes of outgoing response data from bandwidth limiter
Expand Down
2 changes: 2 additions & 0 deletions docs/root/version_history/current.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ Minor Behavior Changes
----------------------
*Changes that may cause incompatibilities for some users, but should not for most*

* bandwidth_limit: added :ref:`response trailers <envoy_v3_api_field_extensions.filters.http.bandwidth_limit.v3.BandwidthLimit.enable_response_trailers>` when request or response delay are enforced.
* bandwidth_limit: added :ref:`bandwidth limit stats <config_http_filters_bandwidth_limit>` *request_enforced* and *response_enforced*.
* config: the log message for "gRPC config stream closed" now uses the most recent error message, and reports seconds instead of milliseconds for how long the most recent status has been received.
* dns: now respecting the returned DNS TTL for resolved hosts, rather than always relying on the hard-coded :ref:`dns_refresh_rate. <envoy_v3_api_field_config.cluster.v3.Cluster.dns_refresh_rate>` This behavior can be temporarily reverted by setting the runtime guard ``envoy.reloadable_features.use_dns_ttl`` to false.
* listener: destroy per network filter chain stats when a network filter chain is removed during the listener in place update.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,33 @@ namespace Extensions {
namespace HttpFilters {
namespace BandwidthLimitFilter {

namespace {
const Http::LowerCaseString DefaultRequestDelayTrailer =
Http::LowerCaseString("bandwidth-request-delay-ms");
const Http::LowerCaseString DefaultResponseDelayTrailer =
Http::LowerCaseString("bandwidth-response-delay-ms");
const std::chrono::milliseconds ZeroMilliseconds = std::chrono::milliseconds(0);
} // namespace

FilterConfig::FilterConfig(const BandwidthLimit& config, Stats::Scope& scope,
Runtime::Loader& runtime, TimeSource& time_source, bool per_route)
: runtime_(runtime), time_source_(time_source), enable_mode_(config.enable_mode()),
limit_kbps_(PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, limit_kbps, 0)),
fill_interval_(std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(
config, fill_interval, StreamRateLimiter::DefaultFillInterval.count()))),
enabled_(config.runtime_enabled(), runtime),
stats_(generateStats(config.stat_prefix(), scope)) {
stats_(generateStats(config.stat_prefix(), scope)),
request_delay_trailer_(
config.response_trailer_prefix().empty()
? DefaultRequestDelayTrailer
: Http::LowerCaseString(absl::StrCat(config.response_trailer_prefix(), "-",
DefaultRequestDelayTrailer.get()))),
response_delay_trailer_(
config.response_trailer_prefix().empty()
? DefaultResponseDelayTrailer
: Http::LowerCaseString(absl::StrCat(config.response_trailer_prefix(), "-",
DefaultResponseDelayTrailer.get()))),
enable_response_trailers_(config.enable_response_trailers()) {
if (per_route && !config.has_limit_kbps()) {
throw EnvoyException("bandwidthlimitfilter: limit must be set for per route filter config");
}
Expand Down Expand Up @@ -64,7 +83,12 @@ Http::FilterHeadersStatus BandwidthLimiter::decodeHeaders(Http::RequestHeaderMap
updateStatsOnDecodeFinish();
decoder_callbacks_->continueDecoding();
},
[config](uint64_t len) { config.stats().request_allowed_size_.set(len); },
[&config](uint64_t len, bool limit_enforced) {
config.stats().request_allowed_size_.set(len);
if (limit_enforced) {
config.stats().request_enforced_.inc();
}
},
const_cast<FilterConfig*>(&config)->timeSource(), decoder_callbacks_->dispatcher(),
decoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval());
}
Expand Down Expand Up @@ -123,7 +147,12 @@ Http::FilterHeadersStatus BandwidthLimiter::encodeHeaders(Http::ResponseHeaderMa
updateStatsOnEncodeFinish();
encoder_callbacks_->continueEncoding();
},
[config](uint64_t len) { config.stats().response_allowed_size_.set(len); },
[&config](uint64_t len, bool limit_enforced) {
config.stats().response_allowed_size_.set(len);
if (limit_enforced) {
config.stats().response_enforced_.inc();
}
},
const_cast<FilterConfig*>(&config)->timeSource(), encoder_callbacks_->dispatcher(),
encoder_callbacks_->scope(), config.tokenBucket(), config.fillInterval());
}
Expand All @@ -135,6 +164,14 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool
if (response_limiter_ != nullptr) {
const auto& config = getConfig();

// Adds encoded trailers. May only be called in encodeData when end_stream is set to true.
// If upstream has trailers, addEncodedTrailers won't be called
bool trailer_added = false;
if (end_stream && config.enableResponseTrailers()) {
trailers_ = &encoder_callbacks_->addEncodedTrailers();
trailer_added = true;
}

if (!response_latency_) {
response_latency_ = std::make_unique<Stats::HistogramCompletableTimespanImpl>(
config.stats().response_transfer_duration_,
Expand All @@ -143,15 +180,18 @@ Http::FilterDataStatus BandwidthLimiter::encodeData(Buffer::Instance& data, bool
}
config.stats().response_incoming_size_.set(data.length());

response_limiter_->writeData(data, end_stream);
response_limiter_->writeData(data, end_stream, trailer_added);
return Http::FilterDataStatus::StopIterationNoBuffer;
}
ENVOY_LOG(debug, "BandwidthLimiter <encode data>: response_limiter not set");
return Http::FilterDataStatus::Continue;
}

Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap&) {
Http::FilterTrailersStatus
BandwidthLimiter::encodeTrailers(Http::ResponseTrailerMap& response_trailers) {
if (response_limiter_ != nullptr) {
trailers_ = &response_trailers;

if (response_limiter_->onTrailers()) {
return Http::FilterTrailersStatus::StopIteration;
} else {
Expand All @@ -164,6 +204,7 @@ Http::FilterTrailersStatus BandwidthLimiter::encodeTrailers(Http::ResponseTraile

void BandwidthLimiter::updateStatsOnDecodeFinish() {
if (request_latency_) {
request_duration_ = request_latency_.get()->elapsed();
request_latency_->complete();
request_latency_.reset();
getConfig().stats().request_pending_.dec();
Expand All @@ -172,9 +213,22 @@ void BandwidthLimiter::updateStatsOnDecodeFinish() {

void BandwidthLimiter::updateStatsOnEncodeFinish() {
if (response_latency_) {
const auto& config = getConfig();

if (config.enableResponseTrailers() && trailers_ != nullptr) {
auto response_duration = response_latency_.get()->elapsed();
if (request_duration_ > ZeroMilliseconds) {
trailers_->setCopy(config.requestDelayTrailer(), std::to_string(request_duration_.count()));
}
if (response_duration > ZeroMilliseconds) {
trailers_->setCopy(config.responseDelayTrailer(),
std::to_string(response_duration.count()));
}
}

response_latency_->complete();
response_latency_.reset();
getConfig().stats().response_pending_.dec();
config.stats().response_pending_.dec();
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ namespace BandwidthLimitFilter {
#define ALL_BANDWIDTH_LIMIT_STATS(COUNTER, GAUGE, HISTOGRAM) \
COUNTER(request_enabled) \
COUNTER(response_enabled) \
COUNTER(request_enforced) \
COUNTER(response_enforced) \
GAUGE(request_pending, Accumulate) \
GAUGE(response_pending, Accumulate) \
GAUGE(request_incoming_size, Accumulate) \
Expand Down Expand Up @@ -70,6 +72,9 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig {
EnableMode enableMode() const { return enable_mode_; };
const std::shared_ptr<SharedTokenBucketImpl> tokenBucket() const { return token_bucket_; }
std::chrono::milliseconds fillInterval() const { return fill_interval_; }
const Http::LowerCaseString& requestDelayTrailer() const { return request_delay_trailer_; }
const Http::LowerCaseString& responseDelayTrailer() const { return response_delay_trailer_; }
bool enableResponseTrailers() const { return enable_response_trailers_; }

private:
friend class FilterTest;
Expand All @@ -85,6 +90,9 @@ class FilterConfig : public ::Envoy::Router::RouteSpecificFilterConfig {
mutable BandwidthLimitStats stats_;
// Filter chain's shared token bucket
std::shared_ptr<SharedTokenBucketImpl> token_bucket_;
const Http::LowerCaseString request_delay_trailer_;
const Http::LowerCaseString response_delay_trailer_;
const bool enable_response_trailers_;
};

using FilterConfigSharedPtr = std::shared_ptr<FilterConfig>;
Expand Down Expand Up @@ -141,6 +149,8 @@ class BandwidthLimiter : public Http::StreamFilter, Logger::Loggable<Logger::Id:
std::unique_ptr<Envoy::Extensions::HttpFilters::Common::StreamRateLimiter> response_limiter_;
Stats::TimespanPtr request_latency_;
Stats::TimespanPtr response_latency_;
std::chrono::milliseconds request_duration_;
Http::ResponseTrailerMap* trailers_;
};

} // namespace BandwidthLimitFilter
Expand Down
14 changes: 11 additions & 3 deletions source/extensions/filters/http/common/stream_rate_limiter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ StreamRateLimiter::StreamRateLimiter(
uint64_t max_kbps, uint64_t max_buffered_data, std::function<void()> pause_data_cb,
std::function<void()> resume_data_cb,
std::function<void(Buffer::Instance&, bool)> write_data_cb, std::function<void()> continue_cb,
std::function<void(uint64_t)> write_stats_cb, TimeSource& time_source,
std::function<void(uint64_t, bool)> write_stats_cb, TimeSource& time_source,
Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope,
std::shared_ptr<TokenBucket> token_bucket, std::chrono::milliseconds fill_interval)
: fill_interval_(std::move(fill_interval)), write_data_cb_(write_data_cb),
Expand Down Expand Up @@ -63,7 +63,7 @@ void StreamRateLimiter::onTokenTimer() {
// Move the data to write into the output buffer with as little copying as possible.
// NOTE: This might be moving zero bytes, but that should work fine.
data_to_write.move(buffer_, bytes_to_write);
write_stats_cb_(bytes_to_write);
write_stats_cb_(bytes_to_write, buffer_.length() > 0);

// If the buffer still contains data in it, we couldn't get enough tokens, so schedule the next
// token available time.
Expand All @@ -88,10 +88,18 @@ void StreamRateLimiter::onTokenTimer() {
}
}

void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream) {
void StreamRateLimiter::writeData(Buffer::Instance& incoming_buffer, bool end_stream,
bool trailer_added) {
auto len = incoming_buffer.length();
buffer_.move(incoming_buffer);
saw_end_stream_ = end_stream;
// If trailer_added is true, set saw_trailers_ to true to continue encode trailers, added
// after buffer_.move to ensure buffer has data and won't invoke continue_cb_ before
// processing the data in last data frame.
if (trailer_added) {
saw_trailers_ = true;
}

ENVOY_LOG(debug,
"StreamRateLimiter <writeData>: got new {} bytes of data. token "
"timer {} scheduled.",
Expand Down
10 changes: 5 additions & 5 deletions source/extensions/filters/http/common/stream_rate_limiter.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,17 +49,17 @@ class StreamRateLimiter : Logger::Loggable<Logger::Id::filter> {
StreamRateLimiter(uint64_t max_kbps, uint64_t max_buffered_data,
std::function<void()> pause_data_cb, std::function<void()> resume_data_cb,
std::function<void(Buffer::Instance&, bool)> write_data_cb,
std::function<void()> continue_cb, std::function<void(uint64_t)> write_stats_cb,
TimeSource& time_source, Event::Dispatcher& dispatcher,
const ScopeTrackedObject& scope,
std::function<void()> continue_cb,
std::function<void(uint64_t, bool)> write_stats_cb, TimeSource& time_source,
Event::Dispatcher& dispatcher, const ScopeTrackedObject& scope,
std::shared_ptr<TokenBucket> token_bucket = nullptr,
std::chrono::milliseconds fill_interval = DefaultFillInterval);

/**
* Called by the stream to write data. All data writes happen asynchronously, the stream should
* be stopped after this call (all data will be drained from incoming_buffer).
*/
void writeData(Buffer::Instance& incoming_buffer, bool end_stream);
void writeData(Buffer::Instance& incoming_buffer, bool end_stream, bool trailer_added = false);

/**
* Called if the stream receives trailers.
Expand All @@ -83,7 +83,7 @@ class StreamRateLimiter : Logger::Loggable<Logger::Id::filter> {
const std::chrono::milliseconds fill_interval_;
const std::function<void(Buffer::Instance&, bool)> write_data_cb_;
const std::function<void()> continue_cb_;
const std::function<void(uint64_t)> write_stats_cb_;
const std::function<void(uint64_t, bool)> write_stats_cb_;
const ScopeTrackedObject& scope_;
std::shared_ptr<TokenBucket> token_bucket_;
Event::TimerPtr token_timer_;
Expand Down
2 changes: 1 addition & 1 deletion source/extensions/filters/http/fault/fault_filter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ void FaultFilter::maybeSetupResponseRateLimit(const Http::RequestHeaderMap& requ
encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream);
},
[this] { encoder_callbacks_->continueEncoding(); },
[](uint64_t) {
[](uint64_t, bool) {
// write stats callback.
},
config_->timeSource(), decoder_callbacks_->dispatcher(), decoder_callbacks_->scope());
Expand Down
15 changes: 14 additions & 1 deletion test/extensions/filters/http/bandwidth_limit/config_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ TEST(Factory, RouteSpecificFilterConfig) {
enable_mode: REQUEST_AND_RESPONSE
limit_kbps: 10
fill_interval: 0.1s
enable_response_trailers: true
response_trailer_prefix: test
)";

BandwidthLimitFilterConfig factory;
Expand All @@ -53,6 +55,11 @@ TEST(Factory, RouteSpecificFilterConfig) {
EXPECT_EQ(config->fillInterval().count(), 100);
EXPECT_EQ(config->enableMode(), EnableMode::BandwidthLimit_EnableMode_REQUEST_AND_RESPONSE);
EXPECT_FALSE(config->tokenBucket() == nullptr);
EXPECT_EQ(config->enableResponseTrailers(), true);
EXPECT_EQ(const_cast<FilterConfig*>(config)->requestDelayTrailer(),
Http::LowerCaseString("test-bandwidth-request-delay-ms"));
EXPECT_EQ(const_cast<FilterConfig*>(config)->responseDelayTrailer(),
Http::LowerCaseString("test-bandwidth-response-delay-ms"));
}

TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) {
Expand All @@ -77,7 +84,7 @@ TEST(Factory, RouteSpecificFilterConfigDisabledByDefault) {
EXPECT_EQ(config->fillInterval().count(), 100);
}

TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) {
TEST(Factory, RouteSpecificFilterConfigDefault) {
const std::string config_yaml = R"(
stat_prefix: test
enable_mode: REQUEST_AND_RESPONSE
Expand All @@ -96,6 +103,12 @@ TEST(Factory, RouteSpecificFilterConfigDefaultFillInterval) {
const auto* config = dynamic_cast<const FilterConfig*>(route_config.get());
EXPECT_EQ(config->limit(), 10);
EXPECT_EQ(config->fillInterval().count(), 50);
// default trailers
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you also add tests for the enable_response_trailers config value, please?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

added, thanks.

EXPECT_EQ(config->enableResponseTrailers(), false);
EXPECT_EQ(const_cast<FilterConfig*>(config)->requestDelayTrailer(),
Http::LowerCaseString("bandwidth-request-delay-ms"));
EXPECT_EQ(const_cast<FilterConfig*>(config)->responseDelayTrailer(),
Http::LowerCaseString("bandwidth-response-delay-ms"));
}

TEST(Factory, PerRouteConfigNoLimits) {
Expand Down
Loading