Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CODEOWNERS
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,4 @@ extensions/filters/common/original_src @snowp @klarose
/*/extensions/filters/network/local_ratelimit @mattklein123 @junr03
/*/extensions/filters/http/aws_request_signing @rgs1 @derekargueta @mattklein123 @marcomagdy
/*/extensions/filters/http/aws_lambda @mattklein123 @marcomagdy @lavignes
/*/extensions/filters/http/local_ratelimit @rgs1 @mattklein123
2 changes: 2 additions & 0 deletions api/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ proto_library(
"//envoy/config/filter/http/health_check/v2:pkg",
"//envoy/config/filter/http/ip_tagging/v2:pkg",
"//envoy/config/filter/http/jwt_authn/v2alpha:pkg",
"//envoy/config/filter/http/local_rate_limit/v2:pkg",
"//envoy/config/filter/http/lua/v2:pkg",
"//envoy/config/filter/http/on_demand/v2:pkg",
"//envoy/config/filter/http/original_src/v2alpha1:pkg",
Expand Down Expand Up @@ -187,6 +188,7 @@ proto_library(
"//envoy/extensions/filters/http/health_check/v3:pkg",
"//envoy/extensions/filters/http/ip_tagging/v3:pkg",
"//envoy/extensions/filters/http/jwt_authn/v3:pkg",
"//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
"//envoy/extensions/filters/http/original_src/v3:pkg",
Expand Down
13 changes: 13 additions & 0 deletions api/envoy/config/filter/http/local_rate_limit/v2/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# DO NOT EDIT. This file is generated by tools/proto_sync.py.

load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")

licenses(["notice"]) # Apache 2

api_proto_package(
deps = [
"//envoy/api/v2/core:pkg",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

v2 is now frozen, so please add directly to v3, thanks.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah ok.

"//envoy/type:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
syntax = "proto3";

package envoy.config.filter.http.local_rate_limit.v2;

import "envoy/api/v2/core/base.proto";
import "envoy/type/http_status.proto";
import "envoy/type/token_bucket.proto";

import "udpa/annotations/migrate.proto";
import "udpa/annotations/status.proto";
import "validate/validate.proto";

option java_package = "io.envoyproxy.envoy.config.filter.http.local_rate_limit.v2";
option java_outer_classname = "LocalRateLimitProto";
option java_multiple_files = true;
option (udpa.annotations.file_migrate).move_to_package =
"envoy.extensions.filters.http.local_ratelimit.v3";
option (udpa.annotations.file_status).package_version_status = FROZEN;

// [#protodoc-title: Local Rate limit]
// Local Rate limit :ref:`configuration overview <config_http_filters_local_rate_limit>`.
// [#extension: envoy.filters.http.local_ratelimit]

message LocalRateLimit {
// This field allows to send a HTTP response status code to the downstream client other
// than 429 (TooManyRequests) when the request has been rate limited.
type.HttpStatus status = 1;

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have some questions and discussed by #10813.

// The token bucket configuration to use for rate limiting requests that are processed by this
// filter. Each request processed by the filter consumes a single token. If the token is available,
// the request will be allowed. If no tokens are available, the request will receive the configured
// rate limit status.
//
// .. note::
// It's fine for the token bucket to not be set for the global configuration, but it must be set
// for the per route configuration.
//
// .. note::
// When using per route configuration, the bucket becomes unique to that route.
//
// .. note::
// In the current implementation the token bucket's :ref:`fill_interval
// <envoy_api_field_type.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive
// refills.
type.TokenBucket token_bucket = 2;

// This uniquely identifies a specific local rate limit configuration (e.g.: when using per route
// rate limiters). It will be used to construct the
// :ref:`runtime keys <config_http_filters_local_rate_limit_runtime>` that enable and enforce
// the corresponding local rate limiter.
string route_key = 3 [(validate.rules).string = {min_bytes: 1}];
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we just use RuntimeFractionalPercent or RuntimeFeatureFlag here or similar?


// Specifies a list of HTTP headers that should be added to each response for requests that
// have been rate limited.
repeated api.v2.core.HeaderValueOption response_headers_to_add = 4
[(validate.rules).repeated = {max_items: 10}];
}
14 changes: 14 additions & 0 deletions api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# DO NOT EDIT. This file is generated by tools/proto_sync.py.

load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package")

licenses(["notice"]) # Apache 2

api_proto_package(
deps = [
"//envoy/config/core/v3:pkg",
"//envoy/config/filter/http/local_rate_limit/v2:pkg",
"//envoy/type/v3:pkg",
"@com_github_cncf_udpa//udpa/annotations:pkg",
],
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
syntax = "proto3";

package envoy.extensions.filters.http.local_ratelimit.v3;

import "envoy/config/core/v3/base.proto";
import "envoy/type/v3/http_status.proto";
import "envoy/type/v3/token_bucket.proto";

import "udpa/annotations/status.proto";
import "udpa/annotations/versioning.proto";
import "validate/validate.proto";

option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3";
option java_outer_classname = "LocalRateLimitProto";
option java_multiple_files = true;
option (udpa.annotations.file_status).package_version_status = ACTIVE;

// [#protodoc-title: Local Rate limit]
// Local Rate limit :ref:`configuration overview <config_http_filters_local_rate_limit>`.
// [#extension: envoy.filters.http.local_ratelimit]

// [#next-free-field: 11]
message LocalRateLimit {
option (udpa.annotations.versioning).previous_message_type =
"envoy.config.filter.http.local_rate_limit.v2.LocalRateLimit";

// This field allows to send a HTTP response status code to the downstream client other
// than 429 (TooManyRequests) when the request has been rate limited.
type.v3.HttpStatus status = 1;
Comment on lines +27 to +29
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's a possible scenario that the same rate limiter is applied to both grpc and http traffic (e.g. in case of transcoding or virtual_host-level rate limiter).
In this scenarios the status code options would conflict (as gRPC mandates the usage of 200 code).

Should grpc be introduced as first class citizen or are there other options?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You mean, a same route that does both HTTP and gRPC?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, few examples:

  1. There is a single gRPC route /service.Service/Method but it has envoy.filters.http.grpc_json_transcoder filter enabled. It allows the same route to serve both HTTP and gRPC traffic (all of it is converted to gRPC before going to upstream). Transcoding itself is potentially pricey (it requires buffering the whole request and is ≈reflection based), so one may want to put rate limiter before it.
  2. Rate limiter is set for the whole VirtualHost, virtual host can serve traffic for different variations of traffic, e.g. it can have few gRPC and few HTTP upstreams.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So we'd want to be able to configure an HTTP and a gRPC status code -- OK. Do we do this anywhere else in the API?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not anywhere today, though see #10841 where we are adding gRPC fault codes which is somewhat similar.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, Whether the status field could be set to universal? Then the rate-limit filter module convert status-code depends on the requeste protocol. Here is a discuss #10813 .


// The token bucket configuration to use for rate limiting requests that are processed by this
// filter. Each request processed by the filter consumes a single token. If the token is available,
// the request will be allowed. If no tokens are available, the request will receive the configured
// rate limit status.
//
// .. note::
// It's fine for the token bucket to not be set for the global configuration, but it must be set
// for the per route configuration.
//
// .. note::
// When using per route configuration, the bucket becomes unique to that route.
//
// .. note::
// In the current implementation the token bucket's :ref:`fill_interval
// <envoy_api_field_type.v3.TokenBucket.fill_interval>` must be >= 50ms to avoid too aggressive
// refills.
type.v3.TokenBucket token_bucket = 2;

// This uniquely identifies a specific local rate limit configuration (e.g.: when using per route
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought the convention was that per-filter configs use the filter name as their key, so we don't technically need this.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is different -- lets say you are configuring local rate limiters on two different routes (/foo and /bar). This allows you to configure individual runtime keys for each route, so you can enable/enforce separately.

Is there a better way to distinguish the per-filter config of multiple routes/vhosts for things like runtime gatekeepers?

// rate limiters). It will be used to construct the
// :ref:`runtime keys <config_http_filters_local_rate_limit_runtime>` that enable and enforce
// the corresponding local rate limiter.
string route_key = 3 [(validate.rules).string = {min_bytes: 1}];

// Specifies a list of HTTP headers that should be added to each response for requests that
// have been rate limited.
repeated config.core.v3.HeaderValueOption response_headers_to_add = 10
[(validate.rules).repeated = {max_items: 10}];
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you consider making this a variation on the existing rate limit filter, with some additional config for local response? Asking since it might be the case that there is a bunch of shared config, e.g. request_type, failure_mode_deny.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I did, but ended up concluding that there is much more in common with the network local rate limiter (than the existing HTTP global rate limiter).

@mattklein123 thoughts?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think originally had a similar inclination to @htuch about building this into the existing rate limit filter, but seeing this config it's simpler enough that I can buy having a separate filter for it. The only thing that would give me pause is whether someone is going to want to match on different types of things (descriptors) and whether we might be better served by constructing a descriptor as a key to a local token bucket? I'm not sure if this is worth it or not but just throwing it out there and I obviously didn't bother doing this for the network rate limit case (albeit a much simpler case). Thoughts?

2 changes: 2 additions & 0 deletions api/versioning/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ proto_library(
"//envoy/extensions/filters/http/health_check/v3:pkg",
"//envoy/extensions/filters/http/ip_tagging/v3:pkg",
"//envoy/extensions/filters/http/jwt_authn/v3:pkg",
"//envoy/extensions/filters/http/local_ratelimit/v3:pkg",
"//envoy/extensions/filters/http/lua/v3:pkg",
"//envoy/extensions/filters/http/on_demand/v3:pkg",
"//envoy/extensions/filters/http/original_src/v3:pkg",
Expand Down Expand Up @@ -177,6 +178,7 @@ proto_library(
"//envoy/config/filter/http/health_check/v2:pkg",
"//envoy/config/filter/http/ip_tagging/v2:pkg",
"//envoy/config/filter/http/jwt_authn/v2alpha:pkg",
"//envoy/config/filter/http/local_rate_limit/v2:pkg",
"//envoy/config/filter/http/lua/v2:pkg",
"//envoy/config/filter/http/on_demand/v2:pkg",
"//envoy/config/filter/http/original_src/v2alpha1:pkg",
Expand Down
1 change: 1 addition & 0 deletions docs/root/configuration/http/http_filters/http_filters.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ HTTP filters
header_to_metadata_filter
ip_tagging_filter
jwt_authn_filter
local_rate_limit_filter
lua_filter
on_demand_updates_filter
original_src_filter
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
.. _config_http_filters_local_rate_limit:

Local rate limit
================

* Local rate limiting :ref:`architecture overview <arch_overview_local_rate_limit>`
* :ref:`v2 API reference <envoy_api_msg_config.filter.http.local_rate_limit.v2.LocalRateLimit>`
* This filter should be configured with the name *envoy.filters.http.local_ratelimit*.

The HTTP local rate limit filter applies a :ref:`token bucket
<envoy_api_field_config.filter.http.local_rate_limit.v2.LocalRateLimit.token_bucket>` rate
limit when the request's route or virtual host has a per filter
:ref:`local rate limit configuration <envoy_api_msg_config.filter.http.local_rate_limit.v2.LocalRateLimit>`.

If the local rate limit token bucket is checked, and there are no token availables, a 429 response is returned
(the response is configurable). The local rate limit filter also sets the
:ref:`x-envoy-ratelimited<config_http_filters_router_x-envoy-ratelimited>` header. Additional response
headers may be configured.

Statistics
----------

The local rate limit filter outputs statistics in the *cluster.<route target cluster>.local_ratelimit.* namespace.
429 responses -- or the configured status code -- are emitted to the normal cluster :ref:`dynamic HTTP statistics
<config_cluster_manager_cluster_stats_dynamic_http>`.
Comment on lines +23 to +25
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you might want an optional stat prefix here. If you start having multiple per-route configs, it would be very unclear which limit is being hit. Also, I would make sure we are setting the RL access log code also.


.. csv-table::
:header: Name, Type, Description
:widths: 1, 1, 2

ok, Counter, Total under limit responses from the token bucket
over_limit, Counter, Total responses without an available token

.. _config_http_filters_local_rate_limit_runtime:

Runtime
-------

The HTTP rate limit filter supports the following runtime settings:

local_ratelimit.<route_key>.http_filter_enabled
% of requests that will check the local rate limit decision, but not enforce, for a given *route_key* specified
in the :ref:`local rate limit configuration <envoy_api_msg_config.filter.http.local_rate_limit.v2.LocalRateLimit>`.
Defaults to 100.

local_ratelimit.<route_key>.http_filter_enforcing
% of requests that will enforce the local rate limit decision for a given *route_key* specified in the
:ref:`local rate limit configuration <envoy_api_msg_config.filter.http.local_rate_limit.v2.LocalRateLimit>`.
Defaults to 100. This can be used to test what would happen before fully enforcing the outcome.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions source/extensions/extensions_build_config.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ EXTENSIONS = {
"envoy.filters.http.health_check": "//source/extensions/filters/http/health_check:config",
"envoy.filters.http.ip_tagging": "//source/extensions/filters/http/ip_tagging:config",
"envoy.filters.http.jwt_authn": "//source/extensions/filters/http/jwt_authn:config",
"envoy.filters.http.local_ratelimit": "//source/extensions/filters/http/local_ratelimit:config",
"envoy.filters.http.lua": "//source/extensions/filters/http/lua:config",
"envoy.filters.http.on_demand": "//source/extensions/filters/http/on_demand:config",
"envoy.filters.http.original_src": "//source/extensions/filters/http/original_src:config",
Expand Down
Loading