From e9f62348d72dab127a67dfb6a48eef71965a2367 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Mon, 24 Jul 2023 15:38:09 +0200 Subject: [PATCH 01/16] Add VhRateLimits option Signed-off-by: shadi-altarsha --- apis/projectcontour/v1/httpproxy.go | 29 +- .../v1/zz_generated.deepcopy.go | 79 ++-- examples/contour/01-crds.yaml | 7 + examples/render/contour-deployment.yaml | 7 + .../render/contour-gateway-provisioner.yaml | 7 + examples/render/contour-gateway.yaml | 7 + examples/render/contour.yaml | 7 + internal/dag/dag.go | 21 + internal/dag/httpproxy_processor.go | 38 +- internal/dag/httpproxy_processor_test.go | 12 +- internal/dag/policy.go | 18 +- internal/dag/policy_test.go | 385 +++++++++++++++++- .../featuretests/v3/globalratelimit_test.go | 19 +- .../featuretests/v3/localratelimit_test.go | 16 +- .../docs/main/config/api-reference.html | 189 ++++++--- .../default_global_rate_limiting_test.go | 8 +- .../httpproxy/global_rate_limiting_test.go | 8 +- .../e2e/httpproxy/local_rate_limiting_test.go | 4 +- 18 files changed, 712 insertions(+), 149 deletions(-) diff --git a/apis/projectcontour/v1/httpproxy.go b/apis/projectcontour/v1/httpproxy.go index db77734597f..d2b83972219 100644 --- a/apis/projectcontour/v1/httpproxy.go +++ b/apis/projectcontour/v1/httpproxy.go @@ -322,7 +322,7 @@ type VirtualHost struct { CORSPolicy *CORSPolicy `json:"corsPolicy,omitempty"` // The policy for rate limiting on the virtual host. // +optional - RateLimitPolicy *RateLimitPolicy `json:"rateLimitPolicy,omitempty"` + RateLimitPolicy *VhostRateLimitPolicy `json:"rateLimitPolicy,omitempty"` // Providers to use for verifying JSON Web Tokens (JWTs) on the virtual host. // +optional JWTProviders []JWTProvider `json:"jwtProviders,omitempty"` @@ -564,7 +564,7 @@ type Route struct { CookieRewritePolicies []CookieRewritePolicy `json:"cookieRewritePolicies,omitempty"` // The policy for rate limiting on the route. // +optional - RateLimitPolicy *RateLimitPolicy `json:"rateLimitPolicy,omitempty"` + RateLimitPolicy *RouteRateLimitPolicy `json:"rateLimitPolicy,omitempty"` // RequestRedirectPolicy defines an HTTP redirection. // +optional @@ -785,8 +785,8 @@ type CookieDomainRewrite struct { Value string `json:"value"` } -// RateLimitPolicy defines rate limiting parameters. -type RateLimitPolicy struct { +// VhostRateLimitPolicy defines rate limiting parameters on the virtual host level. +type VhostRateLimitPolicy struct { // Local defines local rate limiting parameters, i.e. parameters // for rate limiting that occurs within each Envoy pod as requests // are handled. @@ -800,6 +800,27 @@ type RateLimitPolicy struct { Global *GlobalRateLimitPolicy `json:"global,omitempty"` } +// RouteRateLimitPolicy defines rate limiting parameters on the route-level. +type RouteRateLimitPolicy struct { + // Local defines local rate limiting parameters, i.e. parameters + // for rate limiting that occurs within each Envoy pod as requests + // are handled. + // +optional + Local *LocalRateLimitPolicy `json:"local,omitempty"` + + // Global defines global rate limiting parameters, i.e. parameters + // defining descriptors that are sent to an external rate limit + // service (RLS) for a rate limit decision on each request. + // +optional + Global *GlobalRateLimitPolicy `json:"global,omitempty"` + + // VhRateLimits defines how the route should handle rate limits defined by the virtual host. + // Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. + // Allowed values are: Override, Include, or ignore. + // +optional + VhRateLimits string `json:"vhRateLimits,omitempty"` +} + // LocalRateLimitPolicy defines local rate limiting parameters. type LocalRateLimitPolicy struct { // Requests defines how many requests per unit of time should diff --git a/apis/projectcontour/v1/zz_generated.deepcopy.go b/apis/projectcontour/v1/zz_generated.deepcopy.go index 3c3537ef24e..7a16deba294 100644 --- a/apis/projectcontour/v1/zz_generated.deepcopy.go +++ b/apis/projectcontour/v1/zz_generated.deepcopy.go @@ -873,31 +873,6 @@ func (in *RateLimitDescriptorEntry) DeepCopy() *RateLimitDescriptorEntry { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RateLimitPolicy) DeepCopyInto(out *RateLimitPolicy) { - *out = *in - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } - if in.Global != nil { - in, out := &in.Global, &out.Global - *out = new(GlobalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RateLimitPolicy. -func (in *RateLimitPolicy) DeepCopy() *RateLimitPolicy { - if in == nil { - return nil - } - out := new(RateLimitPolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteAddressDescriptor) DeepCopyInto(out *RemoteAddressDescriptor) { *out = *in @@ -1099,7 +1074,7 @@ func (in *Route) DeepCopyInto(out *Route) { } if in.RateLimitPolicy != nil { in, out := &in.RateLimitPolicy, &out.RateLimitPolicy - *out = new(RateLimitPolicy) + *out = new(RouteRateLimitPolicy) (*in).DeepCopyInto(*out) } if in.RequestRedirectPolicy != nil { @@ -1144,6 +1119,31 @@ func (in *Route) DeepCopy() *Route { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteRateLimitPolicy) DeepCopyInto(out *RouteRateLimitPolicy) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRateLimitPolicy. +func (in *RouteRateLimitPolicy) DeepCopy() *RouteRateLimitPolicy { + if in == nil { + return nil + } + out := new(RouteRateLimitPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Service) DeepCopyInto(out *Service) { *out = *in @@ -1446,6 +1446,31 @@ func (in *UpstreamValidation) DeepCopy() *UpstreamValidation { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VhostRateLimitPolicy) DeepCopyInto(out *VhostRateLimitPolicy) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VhostRateLimitPolicy. +func (in *VhostRateLimitPolicy) DeepCopy() *VhostRateLimitPolicy { + if in == nil { + return nil + } + out := new(VhostRateLimitPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { *out = *in @@ -1466,7 +1491,7 @@ func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { } if in.RateLimitPolicy != nil { in, out := &in.RateLimitPolicy, &out.RateLimitPolicy - *out = new(RateLimitPolicy) + *out = new(VhostRateLimitPolicy) (*in).DeepCopyInto(*out) } if in.JWTProviders != nil { diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index 3e81d9fce04..afe71cef2e4 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -5951,6 +5951,13 @@ spec: - requests - unit type: object + vhRateLimits: + description: 'VhRateLimits defines how the route should + handle rate limits defined by the virtual host. Default + value is Override, which means use the virtual host rate + limits unless the route has a rate limit policy. Allowed + values are: Override, Include, or ignore.' + type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index 862ceca2f1c..a8f2bddc61e 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -6170,6 +6170,13 @@ spec: - requests - unit type: object + vhRateLimits: + description: 'VhRateLimits defines how the route should + handle rate limits defined by the virtual host. Default + value is Override, which means use the virtual host rate + limits unless the route has a rate limit policy. Allowed + values are: Override, Include, or ignore.' + type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index 0978046bb73..1fa56522638 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -5962,6 +5962,13 @@ spec: - requests - unit type: object + vhRateLimits: + description: 'VhRateLimits defines how the route should + handle rate limits defined by the virtual host. Default + value is Override, which means use the virtual host rate + limits unless the route has a rate limit policy. Allowed + values are: Override, Include, or ignore.' + type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index e457342a634..b32c0d6939e 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -6173,6 +6173,13 @@ spec: - requests - unit type: object + vhRateLimits: + description: 'VhRateLimits defines how the route should + handle rate limits defined by the virtual host. Default + value is Override, which means use the virtual host rate + limits unless the route has a rate limit policy. Allowed + values are: Override, Include, or ignore.' + type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 9991783a76b..31ca9ca6075 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -6170,6 +6170,13 @@ spec: - requests - unit type: object + vhRateLimits: + description: 'VhRateLimits defines how the route should + handle rate limits defined by the virtual host. Default + value is Override, which means use the virtual host rate + limits unless the route has a rate limit policy. Allowed + values are: Override, Include, or ignore.' + type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/internal/dag/dag.go b/internal/dag/dag.go index 09918e17b21..c6d6a62c91c 100644 --- a/internal/dag/dag.go +++ b/internal/dag/dag.go @@ -335,6 +335,9 @@ type Route struct { // RateLimitPolicy defines if/how requests for the route are rate limited. RateLimitPolicy *RateLimitPolicy + // RateLimitPerRoute defines how the route should handle rate limits defined by the virtual host. + RateLimitPerRoute *RateLimitPerRoute + // RequestHashPolicies is a list of policies for configuring hashes on // request attributes. RequestHashPolicies []RequestHashPolicy @@ -571,6 +574,24 @@ type HeaderValueMatchDescriptorEntry struct { Value string } +type VhRateLimitsType int + +const ( + // VhRateLimitsOverride (Default) will use the virtual host rate limits unless the route has a rate limit policy. + VhRateLimitsOverride VhRateLimitsType = iota + + // VhRateLimitsInclude will use the virtual host rate limits even if the route has a rate limit policy. + VhRateLimitsInclude + + // VhRateLimitsIgnore will ignore the virtual host rate limits even if the route does not have a rate limit policy. + VhRateLimitsIgnore +) + +// RateLimitPerRoute configures how the route should handle the rate limits defined by the virtual host. +type RateLimitPerRoute struct { + VhRateLimits VhRateLimitsType +} + // RemoteAddressDescriptorEntry configures a descriptor entry // that contains the remote address (i.e. client IP). type RemoteAddressDescriptorEntry struct{} diff --git a/internal/dag/httpproxy_processor.go b/internal/dag/httpproxy_processor.go index e2b2ddbfa1c..ebcebe96f52 100644 --- a/internal/dag/httpproxy_processor.go +++ b/internal/dag/httpproxy_processor.go @@ -791,13 +791,20 @@ func (p *HTTPProxyProcessor) computeRoutes( return nil } - rlp, err := rateLimitPolicy(route.RateLimitPolicy) + rlp, err := routeRateLimitPolicy(route.RateLimitPolicy) if err != nil { validCond.AddErrorf(contour_api_v1.ConditionTypeRouteError, "RateLimitPolicyNotValid", "route.rateLimitPolicy is invalid: %s", err) return nil } + vrl, err := rateLimitPerRoute(route.RateLimitPolicy) + if err != nil { + validCond.AddErrorf(contour_api_v1.ConditionTypeRouteError, "RateLimitPerRouteNotValid", + "route.rateLimitPerRoute is invalid: %s", err) + return nil + } + requestHashPolicies, lbPolicy := loadBalancerRequestHashPolicies(route.LoadBalancerPolicy, validCond) redirectPolicy, err := redirectRoutePolicy(route.RequestRedirectPolicy) @@ -807,7 +814,7 @@ func (p *HTTPProxyProcessor) computeRoutes( return nil } - internalRedirectPolicy := internalRedirectPolicy(route.InternalRedirectPolicy) + irp := internalRedirectPolicy(route.InternalRedirectPolicy) directPolicy := directResponsePolicy(route.DirectResponsePolicy) @@ -823,10 +830,11 @@ func (p *HTTPProxyProcessor) computeRoutes( ResponseHeadersPolicy: respHP, CookieRewritePolicies: cookieRP, RateLimitPolicy: rlp, + RateLimitPerRoute: vrl, RequestHashPolicies: requestHashPolicies, Redirect: redirectPolicy, DirectResponse: directPolicy, - InternalRedirectPolicy: internalRedirectPolicy, + InternalRedirectPolicy: irp, } if p.SetSourceMetadataOnRoutes { @@ -1424,7 +1432,7 @@ func (p *HTTPProxyProcessor) computeSecureVirtualHostAuthorization(validCond *co } func computeVirtualHostRateLimitPolicy(proxy *contour_api_v1.HTTPProxy, rls *contour_api_v1alpha1.RateLimitServiceConfig, validCond *contour_api_v1.DetailedCondition) (*RateLimitPolicy, bool) { - rlp, err := rateLimitPolicy(proxy.Spec.VirtualHost.RateLimitPolicy) + rlp, err := vhostRateLimitPolicy(proxy.Spec.VirtualHost.RateLimitPolicy) if err != nil { validCond.AddErrorf(contour_api_v1.ConditionTypeVirtualHostError, "RateLimitPolicyNotValid", "Spec.VirtualHost.RateLimitPolicy is invalid: %s", err) @@ -1968,3 +1976,25 @@ func slowStartConfig(slowStart *contour_api_v1.SlowStartPolicy) (*SlowStartConfi MinWeightPercent: slowStart.MinimumWeightPercent, }, nil } + +func rateLimitPerRoute(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPerRoute, error) { + if in == nil || in.VhRateLimits == "" { + return nil, nil + } + + vrl := VhRateLimitsOverride + switch in.VhRateLimits { + case "Override": + vrl = VhRateLimitsOverride + case "Include": + vrl = VhRateLimitsInclude + case "Ignore": + vrl = VhRateLimitsIgnore + default: + return nil, fmt.Errorf("error parsing rateLimitPerRoute config, %s is not supported", in) + } + + return &RateLimitPerRoute{ + VhRateLimits: vrl, + }, nil +} diff --git a/internal/dag/httpproxy_processor_test.go b/internal/dag/httpproxy_processor_test.go index 52f984ba17b..953ca6a545f 100644 --- a/internal/dag/httpproxy_processor_test.go +++ b/internal/dag/httpproxy_processor_test.go @@ -1029,7 +1029,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -1092,7 +1092,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -1129,7 +1129,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -1238,7 +1238,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "second", @@ -1297,7 +1297,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "second", @@ -1349,7 +1349,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { diff --git a/internal/dag/policy.go b/internal/dag/policy.go index 8f6966ccbbf..676d4deca84 100644 --- a/internal/dag/policy.go +++ b/internal/dag/policy.go @@ -538,20 +538,32 @@ func prefixReplacementsAreValid(replacements []contour_api_v1.ReplacePrefix) (st return "", nil } -func rateLimitPolicy(in *contour_api_v1.RateLimitPolicy) (*RateLimitPolicy, error) { +func vhostRateLimitPolicy(in *contour_api_v1.VhostRateLimitPolicy) (*RateLimitPolicy, error) { if in == nil || (in.Local == nil && (in.Global == nil || len(in.Global.Descriptors) == 0)) { return nil, nil } + return rateLimitPolicy(in.Local, in.Global) +} + +func routeRateLimitPolicy(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPolicy, error) { + if in == nil || (in.Local == nil && (in.Global == nil || len(in.Global.Descriptors) == 0)) { + return nil, nil + } + + return rateLimitPolicy(in.Local, in.Global) +} + +func rateLimitPolicy(lrl *contour_api_v1.LocalRateLimitPolicy, grl *contour_api_v1.GlobalRateLimitPolicy) (*RateLimitPolicy, error) { rp := &RateLimitPolicy{} - local, err := localRateLimitPolicy(in.Local) + local, err := localRateLimitPolicy(lrl) if err != nil { return nil, err } rp.Local = local - global, err := globalRateLimitPolicy(in.Global) + global, err := globalRateLimitPolicy(grl) if err != nil { return nil, err } diff --git a/internal/dag/policy_test.go b/internal/dag/policy_test.go index f222ff109e2..e29a0aed213 100644 --- a/internal/dag/policy_test.go +++ b/internal/dag/policy_test.go @@ -654,9 +654,9 @@ func TestHeadersPolicy(t *testing.T) { } } -func TestRateLimitPolicy(t *testing.T) { +func TestVhostRateLimitPolicy(t *testing.T) { tests := map[string]struct { - in *contour_api_v1.RateLimitPolicy + in *contour_api_v1.VhostRateLimitPolicy want *RateLimitPolicy wantErr string }{ @@ -665,11 +665,11 @@ func TestRateLimitPolicy(t *testing.T) { want: nil, }, "nil local rate limit policy": { - in: &contour_api_v1.RateLimitPolicy{}, + in: &contour_api_v1.VhostRateLimitPolicy{}, want: nil, }, "local - no burst": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 3, Unit: "second", @@ -684,7 +684,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "local - burst": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 3, Unit: "second", @@ -700,7 +700,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "local - custom response status code": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "minute", @@ -717,7 +717,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "local - custom response headers to add": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -746,7 +746,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "local - duplicate response header": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -765,7 +765,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: "duplicate header addition: \"Duplicate-Header\"", }, "local - invalid response header name": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -780,7 +780,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: `invalid header name "Invalid-Header!": [a valid HTTP header must consist of alphanumeric characters or '-' (e.g. 'X-Header-Name', regex used for validation is '[-A-Za-z0-9]+')]`, }, "local - invalid unit": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "invalid-unit", @@ -789,7 +789,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: "invalid unit \"invalid-unit\" in local rate limit policy", }, "local - invalid requests": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 0, Unit: "second", @@ -798,7 +798,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: "invalid requests value 0 in local rate limit policy", }, "global - multiple descriptors": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -876,7 +876,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "global - multiple descriptor entries set": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -893,7 +893,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: "rate limit descriptor entry must have exactly one field set", }, "global - no descriptor entries set": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -907,7 +907,7 @@ func TestRateLimitPolicy(t *testing.T) { wantErr: "rate limit descriptor entry must have exactly one field set", }, "global - header value match": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -954,7 +954,7 @@ func TestRateLimitPolicy(t *testing.T) { }, }, "global and local": { - in: &contour_api_v1.RateLimitPolicy{ + in: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 20, Unit: "second", @@ -994,7 +994,358 @@ func TestRateLimitPolicy(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - rlp, err := rateLimitPolicy(tc.in) + rlp, err := vhostRateLimitPolicy(tc.in) + + if tc.wantErr != "" { + assert.EqualError(t, err, tc.wantErr) + } else { + assert.Equal(t, tc.want, rlp) + } + }) + } +} + +func TestRouteRateLimitPolicy(t *testing.T) { + tests := map[string]struct { + in *contour_api_v1.RouteRateLimitPolicy + want *RateLimitPolicy + wantErr string + }{ + "nil input": { + in: nil, + want: nil, + }, + "nil local rate limit policy": { + in: &contour_api_v1.RouteRateLimitPolicy{}, + want: nil, + }, + "local - no burst": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 3, + Unit: "second", + }, + }, + want: &RateLimitPolicy{ + Local: &LocalRateLimitPolicy{ + MaxTokens: 3, + TokensPerFill: 3, + FillInterval: time.Second, + }, + }, + }, + "local - burst": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 3, + Unit: "second", + Burst: 4, + }, + }, + want: &RateLimitPolicy{ + Local: &LocalRateLimitPolicy{ + MaxTokens: 7, + TokensPerFill: 3, + FillInterval: time.Second, + }, + }, + }, + "local - custom response status code": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 10, + Unit: "minute", + ResponseStatusCode: 431, + }, + }, + want: &RateLimitPolicy{ + Local: &LocalRateLimitPolicy{ + MaxTokens: 10, + TokensPerFill: 10, + FillInterval: time.Minute, + ResponseStatusCode: 431, + }, + }, + }, + "local - custom response headers to add": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 10, + Unit: "hour", + ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ + { + Name: "header-1", + Value: "header-value-1", + }, + { + Name: "header-2", + Value: "header-value-2", + }, + }, + }, + }, + want: &RateLimitPolicy{ + Local: &LocalRateLimitPolicy{ + MaxTokens: 10, + TokensPerFill: 10, + FillInterval: time.Hour, + ResponseHeadersToAdd: map[string]string{ + "Header-1": "header-value-1", + "Header-2": "header-value-2", + }, + }, + }, + }, + "local - duplicate response header": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 10, + Unit: "hour", + ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ + { + Name: "duplicate-header", + Value: "header-value-1", + }, + { + Name: "duplicate-header", + Value: "header-value-2", + }, + }, + }, + }, + wantErr: "duplicate header addition: \"Duplicate-Header\"", + }, + "local - invalid response header name": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 10, + Unit: "hour", + ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ + { + Name: "invalid-header!", + Value: "header-value-1", + }, + }, + }, + }, + wantErr: `invalid header name "Invalid-Header!": [a valid HTTP header must consist of alphanumeric characters or '-' (e.g. 'X-Header-Name', regex used for validation is '[-A-Za-z0-9]+')]`, + }, + "local - invalid unit": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 10, + Unit: "invalid-unit", + }, + }, + wantErr: "invalid unit \"invalid-unit\" in local rate limit policy", + }, + "local - invalid requests": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 0, + Unit: "second", + }, + }, + wantErr: "invalid requests value 0 in local rate limit policy", + }, + "global - multiple descriptors": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "generic-key-key", + Value: "generic-key-value", + }, + }, + { + RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, + }, + { + RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ + HeaderName: "X-Header", + DescriptorKey: "request-header-key", + }, + }, + }, + }, + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, + }, + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "generic-key-key-2", + Value: "generic-key-value-2", + }, + }, + }, + }, + }, + }, + }, + want: &RateLimitPolicy{ + Global: &GlobalRateLimitPolicy{ + Descriptors: []*RateLimitDescriptor{ + { + Entries: []RateLimitDescriptorEntry{ + { + GenericKey: &GenericKeyDescriptorEntry{ + Key: "generic-key-key", + Value: "generic-key-value", + }, + }, + { + RemoteAddress: &RemoteAddressDescriptorEntry{}, + }, + { + HeaderMatch: &HeaderMatchDescriptorEntry{ + HeaderName: "X-Header", + Key: "request-header-key", + }, + }, + }, + }, + { + Entries: []RateLimitDescriptorEntry{ + { + RemoteAddress: &RemoteAddressDescriptorEntry{}, + }, + { + GenericKey: &GenericKeyDescriptorEntry{ + Key: "generic-key-key-2", + Value: "generic-key-value-2", + }, + }, + }, + }, + }, + }, + }, + }, + "global - multiple descriptor entries set": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{}, + RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, + }, + }, + }, + }, + }, + }, + wantErr: "rate limit descriptor entry must have exactly one field set", + }, + "global - no descriptor entries set": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + {}, + }, + }, + }, + }, + }, + wantErr: "rate limit descriptor entry must have exactly one field set", + }, + "global - header value match": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RequestHeaderValueMatch: &contour_api_v1.RequestHeaderValueMatchDescriptor{ + Headers: []contour_api_v1.HeaderMatchCondition{ + { + Name: "X-Header", + NotPresent: true, + }, + }, + ExpectMatch: true, + Value: "header-is-not-present", + }, + }, + }, + }, + }, + }, + }, + want: &RateLimitPolicy{ + Global: &GlobalRateLimitPolicy{ + Descriptors: []*RateLimitDescriptor{ + { + Entries: []RateLimitDescriptorEntry{ + { + HeaderValueMatch: &HeaderValueMatchDescriptorEntry{ + Headers: []HeaderMatchCondition{ + { + Name: "X-Header", + MatchType: "present", + Invert: true, + }, + }, + ExpectMatch: true, + Value: "header-is-not-present", + }, + }, + }, + }, + }, + }, + }, + }, + "global and local": { + in: &contour_api_v1.RouteRateLimitPolicy{ + Local: &contour_api_v1.LocalRateLimitPolicy{ + Requests: 20, + Unit: "second", + }, + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, + }, + }, + }, + }, + }, + }, + want: &RateLimitPolicy{ + Local: &LocalRateLimitPolicy{ + MaxTokens: 20, + TokensPerFill: 20, + FillInterval: time.Second, + }, + Global: &GlobalRateLimitPolicy{ + Descriptors: []*RateLimitDescriptor{ + { + Entries: []RateLimitDescriptorEntry{ + { + RemoteAddress: &RemoteAddressDescriptorEntry{}, + }, + }, + }, + }, + }, + }, + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + rlp, err := routeRateLimitPolicy(tc.in) if tc.wantErr != "" { assert.EqualError(t, err, tc.wantErr) diff --git a/internal/featuretests/v3/globalratelimit_test.go b/internal/featuretests/v3/globalratelimit_test.go index 7cefb8c7ba0..af8372ab7a1 100644 --- a/internal/featuretests/v3/globalratelimit_test.go +++ b/internal/featuretests/v3/globalratelimit_test.go @@ -111,7 +111,7 @@ func globalRateLimitNoRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWra Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -188,7 +188,6 @@ func globalRateLimitNoRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWra ), }) } - } func globalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Contour, tls tlsConfig) { @@ -200,7 +199,7 @@ func globalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -292,7 +291,7 @@ func globalRateLimitRouteRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -306,7 +305,7 @@ func globalRateLimitRouteRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -390,7 +389,7 @@ func globalRateLimitVhostAndRouteRateLimitDefined(t *testing.T, rh ResourceEvent Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -415,7 +414,7 @@ func globalRateLimitVhostAndRouteRateLimitDefined(t *testing.T, rh ResourceEvent Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -524,7 +523,7 @@ func defaultGlobalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventH Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -622,7 +621,7 @@ func globalRateLimitMultipleDescriptorsAndEntries(t *testing.T, rh ResourceEvent Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -636,7 +635,7 @@ func globalRateLimitMultipleDescriptorsAndEntries(t *testing.T, rh ResourceEvent Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ // first descriptor diff --git a/internal/featuretests/v3/localratelimit_test.go b/internal/featuretests/v3/localratelimit_test.go index 1d870798eef..a50be0b305f 100644 --- a/internal/featuretests/v3/localratelimit_test.go +++ b/internal/featuretests/v3/localratelimit_test.go @@ -112,7 +112,7 @@ func vhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Cont Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -191,7 +191,7 @@ func routeRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Con Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -211,7 +211,7 @@ func routeRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Con Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 5, Unit: "second", @@ -295,7 +295,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -316,7 +316,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -336,7 +336,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 5, Unit: "second", @@ -456,7 +456,7 @@ func customResponseCode(t *testing.T, rh ResourceEventHandlerWrapper, c *Contour Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -529,7 +529,7 @@ func customResponseHeaders(t *testing.T, rh ResourceEventHandlerWrapper, c *Cont Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index ea5f20f24f0..6fd17aff4b4 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -1226,7 +1226,8 @@

GlobalRateLimitPolicy

(Appears on: -RateLimitPolicy, +RouteRateLimitPolicy, +VhostRateLimitPolicy, RateLimitServiceConfig)

@@ -2496,7 +2497,8 @@

LocalRateLimitPolicy

(Appears on: -RateLimitPolicy) +RouteRateLimitPolicy, +VhostRateLimitPolicy)

LocalRateLimitPolicy defines local rate limiting parameters.

@@ -2998,60 +3000,6 @@

RateLimitDescriptorEntry -

RateLimitPolicy -

-

-(Appears on: -Route, -VirtualHost) -

-

-

RateLimitPolicy defines rate limiting parameters.

-

- - - - - - - - - - - - - - - - - -
FieldDescription
-local -
- - -LocalRateLimitPolicy - - -
-(Optional) -

Local defines local rate limiting parameters, i.e. parameters -for rate limiting that occurs within each Envoy pod as requests -are handled.

-
-global -
- - -GlobalRateLimitPolicy - - -
-(Optional) -

Global defines global rate limiting parameters, i.e. parameters -defining descriptors that are sent to an external rate limit -service (RLS) for a rate limit decision on each request.

-

RedirectResponseCode (uint32 alias)

@@ -3745,8 +3693,8 @@

Route rateLimitPolicy
- -RateLimitPolicy + +RouteRateLimitPolicy @@ -3851,6 +3799,74 @@

Route +

RouteRateLimitPolicy +

+

+(Appears on: +Route) +

+

+

RouteRateLimitPolicy defines rate limiting parameters on the route-level.

+

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+local +
+ + +LocalRateLimitPolicy + + +
+(Optional) +

Local defines local rate limiting parameters, i.e. parameters +for rate limiting that occurs within each Envoy pod as requests +are handled.

+
+global +
+ + +GlobalRateLimitPolicy + + +
+(Optional) +

Global defines global rate limiting parameters, i.e. parameters +defining descriptors that are sent to an external rate limit +service (RLS) for a rate limit decision on each request.

+
+vhRateLimits +
+ +string + +
+(Optional) +

VhRateLimits defines how the route should handle rate limits defined by the virtual host. +Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. +Allowed values are: Override, Include, or ignore.

+

Service

@@ -4703,6 +4719,59 @@

UpstreamValidation +

VhostRateLimitPolicy +

+

+(Appears on: +VirtualHost) +

+

+

VhostRateLimitPolicy defines rate limiting parameters on the virtual host level.

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+local +
+ + +LocalRateLimitPolicy + + +
+(Optional) +

Local defines local rate limiting parameters, i.e. parameters +for rate limiting that occurs within each Envoy pod as requests +are handled.

+
+global +
+ + +GlobalRateLimitPolicy + + +
+(Optional) +

Global defines global rate limiting parameters, i.e. parameters +defining descriptors that are sent to an external rate limit +service (RLS) for a rate limit decision on each request.

+

VirtualHost

@@ -4792,8 +4861,8 @@

VirtualHost rateLimitPolicy
- -RateLimitPolicy + +VhostRateLimitPolicy diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index b9f00a643eb..9e2e07ee3db 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -82,7 +82,7 @@ func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ Fqdn: "defaultglobalratelimitvhostnontls.projectcontour.io", - RateLimitPolicy: &contourv1.RateLimitPolicy{ + RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -144,7 +144,7 @@ func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ Fqdn: "defaultglobalratelimitvhostnontls.projectcontour.io", - RateLimitPolicy: &contourv1.RateLimitPolicy{ + RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -250,7 +250,7 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { TLS: &contourv1.TLS{ SecretName: "echo", }, - RateLimitPolicy: &contourv1.RateLimitPolicy{ + RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -316,7 +316,7 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { TLS: &contourv1.TLS{ SecretName: "echo", }, - RateLimitPolicy: &contourv1.RateLimitPolicy{ + RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { diff --git a/test/e2e/httpproxy/global_rate_limiting_test.go b/test/e2e/httpproxy/global_rate_limiting_test.go index 0ef50bbe943..f2edff5ce6d 100644 --- a/test/e2e/httpproxy/global_rate_limiting_test.go +++ b/test/e2e/httpproxy/global_rate_limiting_test.go @@ -71,7 +71,7 @@ func testGlobalRateLimitingVirtualHostNonTLS(namespace string) { } // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -167,7 +167,7 @@ func testGlobalRateLimitingRouteNonTLS(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -265,7 +265,7 @@ func testGlobalRateLimitingVirtualHostTLS(namespace string) { return err } - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -365,7 +365,7 @@ func testGlobalRateLimitingRouteTLS(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { diff --git a/test/e2e/httpproxy/local_rate_limiting_test.go b/test/e2e/httpproxy/local_rate_limiting_test.go index ead9fa74bb7..aca1bae774c 100644 --- a/test/e2e/httpproxy/local_rate_limiting_test.go +++ b/test/e2e/httpproxy/local_rate_limiting_test.go @@ -71,7 +71,7 @@ func testLocalRateLimitingVirtualHost(namespace string) { return err } - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ Local: &contourv1.LocalRateLimitPolicy{ Requests: 1, Unit: "hour", @@ -158,7 +158,7 @@ func testLocalRateLimitingRoute(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ Local: &contourv1.LocalRateLimitPolicy{ Requests: 1, Unit: "hour", From 0ff5a4b1b9bab808cd04974c7651a3f8b6320265 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 25 Jul 2023 14:50:00 +0200 Subject: [PATCH 02/16] render perRouteLimit on envoy Signed-off-by: shadi-altarsha --- apis/projectcontour/v1/httpproxy.go | 2 +- internal/envoy/v3/ratelimit.go | 9 + internal/envoy/v3/route.go | 4 + test/e2e/httpproxy/httpproxy_test.go | 57 ++ test/e2e/httpproxy/vh_rate_limits_test.go | 1066 +++++++++++++++++++++ 5 files changed, 1137 insertions(+), 1 deletion(-) create mode 100644 test/e2e/httpproxy/vh_rate_limits_test.go diff --git a/apis/projectcontour/v1/httpproxy.go b/apis/projectcontour/v1/httpproxy.go index d2b83972219..58796fa8c9e 100644 --- a/apis/projectcontour/v1/httpproxy.go +++ b/apis/projectcontour/v1/httpproxy.go @@ -816,7 +816,7 @@ type RouteRateLimitPolicy struct { // VhRateLimits defines how the route should handle rate limits defined by the virtual host. // Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. - // Allowed values are: Override, Include, or ignore. + // Allowed values are: Override, Include, or Ignore. // +optional VhRateLimits string `json:"vhRateLimits,omitempty"` } diff --git a/internal/envoy/v3/ratelimit.go b/internal/envoy/v3/ratelimit.go index 1aebd499570..bbfd02f8579 100644 --- a/internal/envoy/v3/ratelimit.go +++ b/internal/envoy/v3/ratelimit.go @@ -163,3 +163,12 @@ func enableXRateLimitHeaders(enable bool) ratelimit_filter_v3.RateLimit_XRateLim } return ratelimit_filter_v3.RateLimit_OFF } + +// rateLimitPerRoute returns a per-route config to configure vhost rate limits. +func rateLimitPerRoute(mode int) *anypb.Any { + return protobuf.MustMarshalAny( + &ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: ratelimit_filter_v3.RateLimitPerRoute_VhRateLimitsOptions(mode), + }, + ) +} diff --git a/internal/envoy/v3/route.go b/internal/envoy/v3/route.go index 609dcc2c76d..f48b904b585 100644 --- a/internal/envoy/v3/route.go +++ b/internal/envoy/v3/route.go @@ -145,6 +145,10 @@ func buildRoute(dagRoute *dag.Route, vhostName string, secure bool) *envoy_route route.TypedPerFilterConfig["envoy.filters.http.local_ratelimit"] = LocalRateLimitConfig(dagRoute.RateLimitPolicy.Local, "vhost."+vhostName) } + if dagRoute.RateLimitPerRoute != nil { + route.TypedPerFilterConfig["envoy.filters.http.ratelimit"] = rateLimitPerRoute(int(dagRoute.RateLimitPerRoute.VhRateLimits)) + } + // Apply per-route authorization policy modifications. if dagRoute.AuthDisabled { route.TypedPerFilterConfig["envoy.filters.http.ext_authz"] = routeAuthzDisabled() diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index fa327366305..73da21c9b0e 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -500,6 +500,63 @@ descriptors: f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostTLS)) }) + Context("vh rate limits", func() { + withRateLimitService := func(body e2e.NamespacedTestBody) e2e.NamespacedTestBody { + return func(namespace string) { + Context("with rate limit service", func() { + BeforeEach(func() { + contourConfig.RateLimitService = config.RateLimitService{ + ExtensionService: fmt.Sprintf("%s/%s", namespace, f.Deployment.RateLimitExtensionService.Name), + Domain: "contour", + FailOpen: false, + } + contourConfiguration.Spec.RateLimitService = &contour_api_v1alpha1.RateLimitServiceConfig{ + ExtensionService: contour_api_v1alpha1.NamespacedName{ + Name: f.Deployment.RateLimitExtensionService.Name, + Namespace: namespace, + }, + Domain: "contour", + FailOpen: ref.To(false), + EnableXRateLimitHeaders: ref.To(false), + } + require.NoError(f.T(), + f.Deployment.EnsureRateLimitResources( + namespace, + ` +domain: contour +descriptors: + - key: generic_key + value: vhostlimit + rate_limit: + unit: hour + requests_per_unit: 1 + - key: route_limit_key + value: routelimit + rate_limit: + unit: hour + requests_per_unit: 1 + - key: generic_key + value: tlsvhostlimit + rate_limit: + unit: hour + requests_per_unit: 1 + - key: route_limit_key + value: tlsroutelimit + rate_limit: + unit: hour + requests_per_unit: 1`)) + }) + + body(namespace) + }) + } + } + + f.NamespacedTest("httpproxy-global-vh-rate-limits-vhost-non-tls", withRateLimitService(testGlobalWithVhostRateLimitsWithNonTLSVirtualHost)) + + f.NamespacedTest("httpproxy-global-vh-rate-limits-vhost-tls", withRateLimitService(testGlobalWithVhostRateLimitsWithTLSVirtualHost)) + }) + Context("cookie-rewriting", func() { f.NamespacedTest("app-cookie-rewrite", testAppCookieRewrite) diff --git a/test/e2e/httpproxy/vh_rate_limits_test.go b/test/e2e/httpproxy/vh_rate_limits_test.go new file mode 100644 index 00000000000..6b536326e67 --- /dev/null +++ b/test/e2e/httpproxy/vh_rate_limits_test.go @@ -0,0 +1,1066 @@ +// Copyright Project Contour Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build e2e + +package httpproxy + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" + "github.com/projectcontour/contour/test/e2e" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func testGlobalWithVhostRateLimitsWithNonTLSVirtualHost(namespace string) { + Specify("vhost_rate_limits is set to the default override mode (implicitly)", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request to confirm that route level rate limits got exceeded. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits is set to the default override mode (explicitly)", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Override", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request to confirm that route level rate limits got exceeded. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits is set to include mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Include", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level that allows one request per hour + // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits is set to ignore mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Ignore", + } + + return f.Client.Update(context.TODO(), p) + })) + + // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy + // set by the virtual host. Make another request to confirm 200. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) +} + +func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { + Specify("vhost_rate_limits option is set to the default override mode (implicitly)", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhosttls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhosttls.projectcontour.io", + TLS: &contourv1.TLS{ + SecretName: "echo", + }, + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "tlsvhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "tlsroutelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request to confirm that route level rate limits got exceeded. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits option is set to the default override mode (explicitly)", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhosttls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhosttls.projectcontour.io", + TLS: &contourv1.TLS{ + SecretName: "echo", + }, + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "tlsvhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Override", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "tlsroutelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request to confirm that route level rate limits got exceeded. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits option is set to include mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhosttls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhosttls.projectcontour.io", + TLS: &contourv1.TLS{ + SecretName: "echo", + }, + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "tlsvhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Include", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "tlsroutelimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level that allows one request per hour + // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("vhost_rate_limits option is set to ignore mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "vhratelimitsvhosttls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "vhratelimitsvhosttls.projectcontour.io", + TLS: &contourv1.TLS{ + SecretName: "echo", + }, + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "tlsvhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make a request against the proxy, confirm a 200 response + // is returned since we're allowed one request per hour. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make another request against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Ignore", + } + + return f.Client.Update(context.TODO(), p) + })) + + // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy + // set by the virtual host. Make another request to confirm 200. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) +} From 27f8cd91b7a4dc1d7ffbe7855efd302d6ad31d22 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 25 Jul 2023 18:13:04 +0200 Subject: [PATCH 03/16] add tests Signed-off-by: shadi-altarsha --- .../default_global_rate_limiting_test.go | 103 +++++++++ test/e2e/httpproxy/httpproxy_test.go | 15 +- test/e2e/httpproxy/vh_rate_limits_test.go | 204 ++++++------------ 3 files changed, 167 insertions(+), 155 deletions(-) diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index 9e2e07ee3db..50b736d05f6 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -16,6 +16,7 @@ package httpproxy import ( + "context" "net/http" . "github.com/onsi/ginkgo/v2" @@ -23,6 +24,8 @@ import ( "github.com/projectcontour/contour/test/e2e" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" ) func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { @@ -362,3 +365,103 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) } + +func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { + Specify("default global rate limit policy is applied vhost rate limits is override", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "defaultglobalratelimitvhratelimits", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 429 from the proxy confirming + // that we've exceeded the rate limit. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + RequestHeader: &contourv1.RequestHeaderDescriptor{ + HeaderName: "X-HTTPProxy-Descriptor", + DescriptorKey: "customHeader", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(200), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make requests against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-HTTPProxy-Descriptor": "test_value", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) +} diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index 73da21c9b0e..bd187370a4f 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -498,6 +498,7 @@ descriptors: f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-non-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostNonTLS)) f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostTLS)) + f.NamespacedTest("httpproxy-default-global-rate-limiting-vh-rate-limits", withRateLimitService(testDefaultGlobalRateLimitingWithVhRateLimits)) }) Context("vh rate limits", func() { @@ -532,16 +533,6 @@ descriptors: requests_per_unit: 1 - key: route_limit_key value: routelimit - rate_limit: - unit: hour - requests_per_unit: 1 - - key: generic_key - value: tlsvhostlimit - rate_limit: - unit: hour - requests_per_unit: 1 - - key: route_limit_key - value: tlsroutelimit rate_limit: unit: hour requests_per_unit: 1`)) @@ -552,9 +543,9 @@ descriptors: } } - f.NamespacedTest("httpproxy-global-vh-rate-limits-vhost-non-tls", withRateLimitService(testGlobalWithVhostRateLimitsWithNonTLSVirtualHost)) + f.NamespacedTest("httpproxy-global-rate-limit-with-vh-rate-limits-option", withRateLimitService(testGlobalWithVhostRateLimits)) - f.NamespacedTest("httpproxy-global-vh-rate-limits-vhost-tls", withRateLimitService(testGlobalWithVhostRateLimitsWithTLSVirtualHost)) + f.NamespacedTest("httpproxy-local-rate-limit-with-vh-rate-limits-option", withRateLimitService(testLocalWithVhostRateLimits)) }) Context("cookie-rewriting", func() { diff --git a/test/e2e/httpproxy/vh_rate_limits_test.go b/test/e2e/httpproxy/vh_rate_limits_test.go index 6b536326e67..1587b48a563 100644 --- a/test/e2e/httpproxy/vh_rate_limits_test.go +++ b/test/e2e/httpproxy/vh_rate_limits_test.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func testGlobalWithVhostRateLimitsWithNonTLSVirtualHost(namespace string) { +func testGlobalWithVhostRateLimits(namespace string) { Specify("vhost_rate_limits is set to the default override mode (implicitly)", func() { t := f.T() @@ -538,24 +538,20 @@ func testGlobalWithVhostRateLimitsWithNonTLSVirtualHost(namespace string) { }) } -func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { - Specify("vhost_rate_limits option is set to the default override mode (implicitly)", func() { +func testLocalWithVhostRateLimits(namespace string) { + Specify("vhost_rate_limits is set to the default override mode (implicitly)", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") - f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") p := &contourv1.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "vhratelimitsvhosttls", + Name: "vhratelimitsvhostnontls", }, Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhosttls.projectcontour.io", - TLS: &contourv1.TLS{ - SecretName: "echo", - }, + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", }, Routes: []contourv1.Route{ { @@ -578,7 +574,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Wait until we get a 200 from the proxy confirming // the pods are up and serving traffic. - res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -591,20 +587,11 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the virtual host. + // Add a local rate limit policy on the virtual host. p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "tlsvhostlimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -613,7 +600,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make a request against the proxy, confirm a 200 response // is returned since we're allowed one request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -623,7 +610,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make another request against the proxy, confirm a 429 response // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -636,21 +623,11 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the route. + // Add a local rate limit policy on the route. p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "tlsroutelimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -660,7 +637,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // After adding rate limits on the route level, make another request // to confirm a 200 response since we override the policy by default on the route level, // and the new limit allows 1 request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -669,7 +646,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -678,23 +655,19 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) - Specify("vhost_rate_limits option is set to the default override mode (explicitly)", func() { + Specify("vhost_rate_limits is set to the default override mode (explicitly)", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") - f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") p := &contourv1.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "vhratelimitsvhosttls", + Name: "vhratelimitsvhostnontls", }, Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhosttls.projectcontour.io", - TLS: &contourv1.TLS{ - SecretName: "echo", - }, + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", }, Routes: []contourv1.Route{ { @@ -717,7 +690,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Wait until we get a 200 from the proxy confirming // the pods are up and serving traffic. - res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -730,20 +703,11 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the virtual host. + // Add a local rate limit policy on the virtual host. p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "tlsvhostlimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -752,7 +716,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make a request against the proxy, confirm a 200 response // is returned since we're allowed one request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -762,7 +726,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make another request against the proxy, confirm a 429 response // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -775,22 +739,12 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the route. + // Add a local rate limit policy on the route. p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ VhRateLimits: "Override", - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "tlsroutelimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -800,7 +754,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // After adding rate limits on the route level, make another request // to confirm a 200 response since we override the policy by default on the route level, // and the new limit allows 1 request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -809,7 +763,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -818,23 +772,19 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) - Specify("vhost_rate_limits option is set to include mode", func() { + Specify("vhost_rate_limits is set to include mode", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") - f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") p := &contourv1.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "vhratelimitsvhosttls", + Name: "vhratelimitsvhostnontls", }, Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhosttls.projectcontour.io", - TLS: &contourv1.TLS{ - SecretName: "echo", - }, + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", }, Routes: []contourv1.Route{ { @@ -857,7 +807,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Wait until we get a 200 from the proxy confirming // the pods are up and serving traffic. - res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -870,20 +820,11 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the virtual host. + // Add a global local limit policy on the virtual host. p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "tlsvhostlimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -892,7 +833,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make a request against the proxy, confirm a 200 response // is returned since we're allowed one request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -902,7 +843,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make another request against the proxy, confirm a 429 response // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -915,22 +856,12 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the route. + // Add a local rate limit policy on the route. p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ VhRateLimits: "Include", - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "tlsroutelimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -939,7 +870,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // After adding rate limits on the route level that allows one request per hour // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -948,23 +879,19 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) - Specify("vhost_rate_limits option is set to ignore mode", func() { + Specify("vhost_rate_limits is set to ignore mode", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") - f.Certs.CreateSelfSignedCert(namespace, "echo-cert", "echo", "vhratelimitsvhosttls.projectcontour.io") p := &contourv1.HTTPProxy{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, - Name: "vhratelimitsvhosttls", + Name: "vhratelimitsvhostnontls", }, Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhosttls.projectcontour.io", - TLS: &contourv1.TLS{ - SecretName: "echo", - }, + Fqdn: "vhratelimitsvhostnontls.projectcontour.io", }, Routes: []contourv1.Route{ { @@ -987,7 +914,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Wait until we get a 200 from the proxy confirming // the pods are up and serving traffic. - res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -1000,20 +927,11 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the virtual host. + // Add a local rate limit policy on the virtual host. p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "tlsvhostlimit", - }, - }, - }, - }, - }, + Local: &contourv1.LocalRateLimitPolicy{ + Requests: 1, + Unit: "hour", }, } @@ -1022,7 +940,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make a request against the proxy, confirm a 200 response // is returned since we're allowed one request per hour. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), @@ -1032,7 +950,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // Make another request against the proxy, confirm a 429 response // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(429), @@ -1045,7 +963,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { return err } - // Add a global rate limit policy on the route. + // Add a local rate limit policy on the route. p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ VhRateLimits: "Ignore", } @@ -1055,7 +973,7 @@ func testGlobalWithVhostRateLimitsWithTLSVirtualHost(namespace string) { // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy // set by the virtual host. Make another request to confirm 200. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", Condition: e2e.HasStatusCode(200), From dcd74b8a368d61b247bbd6702cffce38c7132ab0 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Mon, 7 Aug 2023 16:12:18 +0200 Subject: [PATCH 04/16] Add more e2e tests Signed-off-by: shadi-altarsha --- .../default_global_rate_limiting_test.go | 273 +++++++++++++++++- test/e2e/httpproxy/vh_rate_limits_test.go | 52 +--- 2 files changed, 275 insertions(+), 50 deletions(-) diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index 50b736d05f6..ccd3c34f68e 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -367,7 +367,7 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { } func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { - Specify("default global rate limit policy is applied vhost rate limits is override", func() { + Specify("default global rate limit policy is applied and vhost rate limits is set to override mode (implicitly)", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") @@ -452,7 +452,205 @@ func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { // Make requests against the proxy, confirm a 429 response // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-HTTPProxy-Descriptor": "test_value", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("default global rate limit policy is applied and vhost rate limits is set to override mode (explicitly)", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "defaultglobalratelimitvhratelimits", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 429 from the proxy confirming + // that we've exceeded the rate limit. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Override", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + RequestHeader: &contourv1.RequestHeaderDescriptor{ + HeaderName: "X-HTTPProxy-Descriptor", + DescriptorKey: "customHeader", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level, make another request + // to confirm a 200 response since we override the policy by default on the route level, + // and the new limit allows 1 request per hour. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(200), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + // Make requests against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-HTTPProxy-Descriptor": "test_value", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("default global rate limit policy is applied and vhost rate limits is set to include mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "defaultglobalratelimitvhratelimits", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 429 from the proxy confirming + // that we've exceeded the rate limit. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Include", + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + RequestHeader: &contourv1.RequestHeaderDescriptor{ + HeaderName: "X-HTTPProxy-Descriptor", + DescriptorKey: "customHeader", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // After adding rate limits on the route level that allows one request per hour + // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + // Make requests against the proxy, confirm a 429 response + // is now gotten since we've exceeded the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Condition: e2e.HasStatusCode(429), RequestOpts: []func(*http.Request){ @@ -464,4 +662,75 @@ func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { require.NotNil(t, res, "request never succeeded") require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) + + Specify("default global rate limit policy is applied and vhost rate limits is set to ignore mode", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "defaultglobalratelimitvhratelimits", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 429 from the proxy confirming + // that we've exceeded the rate limit. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + VhRateLimits: "Ignore", + } + + return f.Client.Update(context.TODO(), p) + })) + + // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy + // set by the virtual host. Make another request to confirm 200. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Default-Header": "test_value_1", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) } diff --git a/test/e2e/httpproxy/vh_rate_limits_test.go b/test/e2e/httpproxy/vh_rate_limits_test.go index 1587b48a563..3cab39c165f 100644 --- a/test/e2e/httpproxy/vh_rate_limits_test.go +++ b/test/e2e/httpproxy/vh_rate_limits_test.go @@ -96,18 +96,7 @@ func testGlobalWithVhostRateLimits(namespace string) { return f.Client.Update(context.TODO(), p) })) - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. + // Confirm a 429 response is now gotten since we've exceeded the rate limit. res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", @@ -231,18 +220,7 @@ func testGlobalWithVhostRateLimits(namespace string) { return f.Client.Update(context.TODO(), p) })) - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. + // Confirm a 429 response is now gotten since we've exceeded the rate limit. res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", @@ -367,18 +345,7 @@ func testGlobalWithVhostRateLimits(namespace string) { return f.Client.Update(context.TODO(), p) })) - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. + // Confirm a 429 response is now gotten since we've exceeded the rate limit. res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", @@ -493,18 +460,7 @@ func testGlobalWithVhostRateLimits(namespace string) { return f.Client.Update(context.TODO(), p) })) - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. + // Confirm a 429 response is now gotten since we've exceeded the rate limit. res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Path: "/echo", From 890a899e88a90591344bca01c9222d52134d5dc8 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Mon, 7 Aug 2023 16:15:05 +0200 Subject: [PATCH 05/16] Improve CRD description Signed-off-by: shadi-altarsha --- examples/contour/01-crds.yaml | 2 +- examples/render/contour-deployment.yaml | 2 +- examples/render/contour-gateway-provisioner.yaml | 2 +- examples/render/contour-gateway.yaml | 2 +- examples/render/contour.yaml | 2 +- site/content/docs/main/config/api-reference.html | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index afe71cef2e4..62001e7929d 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -5956,7 +5956,7 @@ spec: handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or ignore.' + values are: Override, Include, or Ignore.' type: string type: object requestHeadersPolicy: diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index a8f2bddc61e..c752ffd87a3 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -6175,7 +6175,7 @@ spec: handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or ignore.' + values are: Override, Include, or Ignore.' type: string type: object requestHeadersPolicy: diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index 1fa56522638..7a40915c7e5 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -5967,7 +5967,7 @@ spec: handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or ignore.' + values are: Override, Include, or Ignore.' type: string type: object requestHeadersPolicy: diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index b32c0d6939e..5a7c1cfda08 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -6178,7 +6178,7 @@ spec: handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or ignore.' + values are: Override, Include, or Ignore.' type: string type: object requestHeadersPolicy: diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 31ca9ca6075..51667d4461b 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -6175,7 +6175,7 @@ spec: handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or ignore.' + values are: Override, Include, or Ignore.' type: string type: object requestHeadersPolicy: diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index 6fd17aff4b4..ef6ef862110 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -3862,7 +3862,7 @@

RouteRateLimitPolicy (Optional)

VhRateLimits defines how the route should handle rate limits defined by the virtual host. Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. -Allowed values are: Override, Include, or ignore.

+Allowed values are: Override, Include, or Ignore.

From 6d1a7a19a5458c336481c35f75dc28e880b7e974 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 8 Aug 2023 12:49:21 +0200 Subject: [PATCH 06/16] Add unit tests for rateLimitPerRoute Signed-off-by: shadi-altarsha --- internal/dag/httpproxy_processor.go | 2 +- internal/dag/httpproxy_processor_test.go | 284 +++++++++++++++++++++++ 2 files changed, 285 insertions(+), 1 deletion(-) diff --git a/internal/dag/httpproxy_processor.go b/internal/dag/httpproxy_processor.go index ebcebe96f52..d6e73760116 100644 --- a/internal/dag/httpproxy_processor.go +++ b/internal/dag/httpproxy_processor.go @@ -1991,7 +1991,7 @@ func rateLimitPerRoute(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPerRo case "Ignore": vrl = VhRateLimitsIgnore default: - return nil, fmt.Errorf("error parsing rateLimitPerRoute config, %s is not supported", in) + return nil, fmt.Errorf("error parsing rateLimitPerRoute config, %s is not supported", in.VhRateLimits) } return &RateLimitPerRoute{ diff --git a/internal/dag/httpproxy_processor_test.go b/internal/dag/httpproxy_processor_test.go index 953ca6a545f..507f24915a2 100644 --- a/internal/dag/httpproxy_processor_test.go +++ b/internal/dag/httpproxy_processor_test.go @@ -14,6 +14,7 @@ package dag import ( + "errors" "net" "testing" "time" @@ -1386,3 +1387,286 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }) } } + +func TestRateLimitPerRoute(t *testing.T) { + tests := map[string]struct { + httpproxy *contour_api_v1.HTTPProxy + want *RateLimitPerRoute + expectedError error + }{ + "route RateLimitPolicy is not set": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + }, + }, + }, + }, + want: nil, + expectedError: nil, + }, + "VhRateLimits value is not set in RateLimitPolicy": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: nil, + expectedError: nil, + }, + "VhRateLimits value is set to Override in RateLimitPolicy": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + VhRateLimits: "Override", + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: &RateLimitPerRoute{ + VhRateLimits: 0, + }, + expectedError: nil, + }, + "VhRateLimits value is set to Include in RateLimitPolicy": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + VhRateLimits: "Include", + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: &RateLimitPerRoute{ + VhRateLimits: 1, + }, + expectedError: nil, + }, + "VhRateLimits value is set to Ignore in RateLimitPolicy": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + VhRateLimits: "Ignore", + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: &RateLimitPerRoute{ + VhRateLimits: 2, + }, + expectedError: nil, + }, + "VhRateLimits value is set to an unsupported value in RateLimitPolicy": { + httpproxy: &contour_api_v1.HTTPProxy{ + ObjectMeta: v1.ObjectMeta{ + Namespace: "ns", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "foo.projectcontour.io", + }, + Routes: []contour_api_v1.Route{ + { + Services: []contour_api_v1.Service{ + { + Name: "foo", + Port: 80, + }, + }, + Conditions: []contour_api_v1.MatchCondition{ + { + Prefix: "/bar", + }, + }, + RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + VhRateLimits: "wrong_value", + Global: &contour_api_v1.GlobalRateLimitPolicy{ + Descriptors: []contour_api_v1.RateLimitDescriptor{ + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + GenericKey: &contour_api_v1.GenericKeyDescriptor{ + Key: "route_limit_key", + Value: "routelimit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: nil, + expectedError: errors.New("error parsing rateLimitPerRoute config, wrong_value is not supported"), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + for _, route := range tc.httpproxy.Spec.Routes { + got, err := rateLimitPerRoute(route.RateLimitPolicy) + require.Equal(t, tc.expectedError, err) + require.Equal(t, tc.want, got) + } + }) + } +} From 86676037b933f6b5e5908d0d8a3dd2a4745cdfdc Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 8 Aug 2023 12:52:48 +0200 Subject: [PATCH 07/16] Fix linting issue Signed-off-by: shadi-altarsha --- internal/dag/httpproxy_processor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/dag/httpproxy_processor.go b/internal/dag/httpproxy_processor.go index d6e73760116..7a9304da297 100644 --- a/internal/dag/httpproxy_processor.go +++ b/internal/dag/httpproxy_processor.go @@ -1982,7 +1982,7 @@ func rateLimitPerRoute(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPerRo return nil, nil } - vrl := VhRateLimitsOverride + var vrl VhRateLimitsType switch in.VhRateLimits { case "Override": vrl = VhRateLimitsOverride From bd9af46def75faf63e754c58aa2bf920fe917f5b Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 8 Aug 2023 14:47:40 +0200 Subject: [PATCH 08/16] Add a changelog entry Signed-off-by: shadi-altarsha --- .../unreleased/5657-shadialtarsha-minor.md | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 changelogs/unreleased/5657-shadialtarsha-minor.md diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md new file mode 100644 index 00000000000..dcffe69e17c --- /dev/null +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -0,0 +1,42 @@ +## Add support for extensions.filters.http.ratelimit.v3.RateLimitPerRoute + +Envoy has extensions.filters.http.ratelimit.v3.RateLimitPerRoute API which allows control over the Vhost Rate Limits on the route level. + +### Sample Configurations +#### httpproxy.yaml +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: echo +spec: + virtualhost: + fqdn: local.projectcontour.io + rateLimitPolicy: + global: + disabled: true + local: + requests: 100 + unit: hour + burst: 20 + routes: + - conditions: + - prefix: / + services: + - name: ingress-conformance-echo + port: 80 + - conditions: + - prefix: /foo + rateLimitPolicy: + vhRateLimits: "Ignore" + global: + descriptors: + - entries: + - remoteAddress: {} + - entries: + - genericKey: + value: foo + services: + - name: ingress-conformance-echo + port: 80 +``` From 171d135a1f7f93c97ac28978130919023d3a222b Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Wed, 16 Aug 2023 12:45:24 +0200 Subject: [PATCH 09/16] Refactor the solution Signed-off-by: shadi-altarsha --- apis/projectcontour/v1/httpproxy.go | 29 +- .../v1/zz_generated.deepcopy.go | 79 +- .../unreleased/5657-shadialtarsha-minor.md | 12 +- examples/contour/01-crds.yaml | 7 - examples/render/contour-deployment.yaml | 7 - .../render/contour-gateway-provisioner.yaml | 7 - examples/render/contour-gateway.yaml | 7 - examples/render/contour.yaml | 7 - internal/dag/httpproxy_processor.go | 36 +- internal/dag/httpproxy_processor_test.go | 207 +--- internal/dag/policy.go | 18 +- internal/dag/policy_test.go | 385 +------ .../featuretests/v3/globalratelimit_test.go | 18 +- .../featuretests/v3/localratelimit_test.go | 16 +- .../docs/main/config/api-reference.html | 189 ++-- .../default_global_rate_limiting_test.go | 310 +----- .../httpproxy/global_rate_limiting_test.go | 215 +++- test/e2e/httpproxy/httpproxy_test.go | 51 +- .../e2e/httpproxy/local_rate_limiting_test.go | 4 +- test/e2e/httpproxy/vh_rate_limits_test.go | 940 ------------------ 20 files changed, 386 insertions(+), 2158 deletions(-) delete mode 100644 test/e2e/httpproxy/vh_rate_limits_test.go diff --git a/apis/projectcontour/v1/httpproxy.go b/apis/projectcontour/v1/httpproxy.go index 58796fa8c9e..db77734597f 100644 --- a/apis/projectcontour/v1/httpproxy.go +++ b/apis/projectcontour/v1/httpproxy.go @@ -322,7 +322,7 @@ type VirtualHost struct { CORSPolicy *CORSPolicy `json:"corsPolicy,omitempty"` // The policy for rate limiting on the virtual host. // +optional - RateLimitPolicy *VhostRateLimitPolicy `json:"rateLimitPolicy,omitempty"` + RateLimitPolicy *RateLimitPolicy `json:"rateLimitPolicy,omitempty"` // Providers to use for verifying JSON Web Tokens (JWTs) on the virtual host. // +optional JWTProviders []JWTProvider `json:"jwtProviders,omitempty"` @@ -564,7 +564,7 @@ type Route struct { CookieRewritePolicies []CookieRewritePolicy `json:"cookieRewritePolicies,omitempty"` // The policy for rate limiting on the route. // +optional - RateLimitPolicy *RouteRateLimitPolicy `json:"rateLimitPolicy,omitempty"` + RateLimitPolicy *RateLimitPolicy `json:"rateLimitPolicy,omitempty"` // RequestRedirectPolicy defines an HTTP redirection. // +optional @@ -785,8 +785,8 @@ type CookieDomainRewrite struct { Value string `json:"value"` } -// VhostRateLimitPolicy defines rate limiting parameters on the virtual host level. -type VhostRateLimitPolicy struct { +// RateLimitPolicy defines rate limiting parameters. +type RateLimitPolicy struct { // Local defines local rate limiting parameters, i.e. parameters // for rate limiting that occurs within each Envoy pod as requests // are handled. @@ -800,27 +800,6 @@ type VhostRateLimitPolicy struct { Global *GlobalRateLimitPolicy `json:"global,omitempty"` } -// RouteRateLimitPolicy defines rate limiting parameters on the route-level. -type RouteRateLimitPolicy struct { - // Local defines local rate limiting parameters, i.e. parameters - // for rate limiting that occurs within each Envoy pod as requests - // are handled. - // +optional - Local *LocalRateLimitPolicy `json:"local,omitempty"` - - // Global defines global rate limiting parameters, i.e. parameters - // defining descriptors that are sent to an external rate limit - // service (RLS) for a rate limit decision on each request. - // +optional - Global *GlobalRateLimitPolicy `json:"global,omitempty"` - - // VhRateLimits defines how the route should handle rate limits defined by the virtual host. - // Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. - // Allowed values are: Override, Include, or Ignore. - // +optional - VhRateLimits string `json:"vhRateLimits,omitempty"` -} - // LocalRateLimitPolicy defines local rate limiting parameters. type LocalRateLimitPolicy struct { // Requests defines how many requests per unit of time should diff --git a/apis/projectcontour/v1/zz_generated.deepcopy.go b/apis/projectcontour/v1/zz_generated.deepcopy.go index 7a16deba294..3c3537ef24e 100644 --- a/apis/projectcontour/v1/zz_generated.deepcopy.go +++ b/apis/projectcontour/v1/zz_generated.deepcopy.go @@ -873,6 +873,31 @@ func (in *RateLimitDescriptorEntry) DeepCopy() *RateLimitDescriptorEntry { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RateLimitPolicy) DeepCopyInto(out *RateLimitPolicy) { + *out = *in + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } + if in.Global != nil { + in, out := &in.Global, &out.Global + *out = new(GlobalRateLimitPolicy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RateLimitPolicy. +func (in *RateLimitPolicy) DeepCopy() *RateLimitPolicy { + if in == nil { + return nil + } + out := new(RateLimitPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RemoteAddressDescriptor) DeepCopyInto(out *RemoteAddressDescriptor) { *out = *in @@ -1074,7 +1099,7 @@ func (in *Route) DeepCopyInto(out *Route) { } if in.RateLimitPolicy != nil { in, out := &in.RateLimitPolicy, &out.RateLimitPolicy - *out = new(RouteRateLimitPolicy) + *out = new(RateLimitPolicy) (*in).DeepCopyInto(*out) } if in.RequestRedirectPolicy != nil { @@ -1119,31 +1144,6 @@ func (in *Route) DeepCopy() *Route { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RouteRateLimitPolicy) DeepCopyInto(out *RouteRateLimitPolicy) { - *out = *in - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } - if in.Global != nil { - in, out := &in.Global, &out.Global - *out = new(GlobalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteRateLimitPolicy. -func (in *RouteRateLimitPolicy) DeepCopy() *RouteRateLimitPolicy { - if in == nil { - return nil - } - out := new(RouteRateLimitPolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Service) DeepCopyInto(out *Service) { *out = *in @@ -1446,31 +1446,6 @@ func (in *UpstreamValidation) DeepCopy() *UpstreamValidation { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VhostRateLimitPolicy) DeepCopyInto(out *VhostRateLimitPolicy) { - *out = *in - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } - if in.Global != nil { - in, out := &in.Global, &out.Global - *out = new(GlobalRateLimitPolicy) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VhostRateLimitPolicy. -func (in *VhostRateLimitPolicy) DeepCopy() *VhostRateLimitPolicy { - if in == nil { - return nil - } - out := new(VhostRateLimitPolicy) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { *out = *in @@ -1491,7 +1466,7 @@ func (in *VirtualHost) DeepCopyInto(out *VirtualHost) { } if in.RateLimitPolicy != nil { in, out := &in.RateLimitPolicy, &out.RateLimitPolicy - *out = new(VhostRateLimitPolicy) + *out = new(RateLimitPolicy) (*in).DeepCopyInto(*out) } if in.JWTProviders != nil { diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md index dcffe69e17c..3fe6bd98d63 100644 --- a/changelogs/unreleased/5657-shadialtarsha-minor.md +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -1,6 +1,6 @@ -## Add support for extensions.filters.http.ratelimit.v3.RateLimitPerRoute +## Disable the virtualhost's Global RateLimit policy -Envoy has extensions.filters.http.ratelimit.v3.RateLimitPerRoute API which allows control over the Vhost Rate Limits on the route level. +Setting `global.disabled` flag to false on a specific route should disable the vhost global rate limit policy. ### Sample Configurations #### httpproxy.yaml @@ -28,14 +28,8 @@ spec: - conditions: - prefix: /foo rateLimitPolicy: - vhRateLimits: "Ignore" global: - descriptors: - - entries: - - remoteAddress: {} - - entries: - - genericKey: - value: foo + disabled: true services: - name: ingress-conformance-echo port: 80 diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index 62001e7929d..3e81d9fce04 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -5951,13 +5951,6 @@ spec: - requests - unit type: object - vhRateLimits: - description: 'VhRateLimits defines how the route should - handle rate limits defined by the virtual host. Default - value is Override, which means use the virtual host rate - limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or Ignore.' - type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index c752ffd87a3..862ceca2f1c 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -6170,13 +6170,6 @@ spec: - requests - unit type: object - vhRateLimits: - description: 'VhRateLimits defines how the route should - handle rate limits defined by the virtual host. Default - value is Override, which means use the virtual host rate - limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or Ignore.' - type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index 7a40915c7e5..0978046bb73 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -5962,13 +5962,6 @@ spec: - requests - unit type: object - vhRateLimits: - description: 'VhRateLimits defines how the route should - handle rate limits defined by the virtual host. Default - value is Override, which means use the virtual host rate - limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or Ignore.' - type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index 5a7c1cfda08..e457342a634 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -6173,13 +6173,6 @@ spec: - requests - unit type: object - vhRateLimits: - description: 'VhRateLimits defines how the route should - handle rate limits defined by the virtual host. Default - value is Override, which means use the virtual host rate - limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or Ignore.' - type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 51667d4461b..9991783a76b 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -6170,13 +6170,6 @@ spec: - requests - unit type: object - vhRateLimits: - description: 'VhRateLimits defines how the route should - handle rate limits defined by the virtual host. Default - value is Override, which means use the virtual host rate - limits unless the route has a rate limit policy. Allowed - values are: Override, Include, or Ignore.' - type: string type: object requestHeadersPolicy: description: The policy for managing request headers during diff --git a/internal/dag/httpproxy_processor.go b/internal/dag/httpproxy_processor.go index 7a9304da297..255a1fa0b71 100644 --- a/internal/dag/httpproxy_processor.go +++ b/internal/dag/httpproxy_processor.go @@ -791,19 +791,14 @@ func (p *HTTPProxyProcessor) computeRoutes( return nil } - rlp, err := routeRateLimitPolicy(route.RateLimitPolicy) + rlp, err := rateLimitPolicy(route.RateLimitPolicy) if err != nil { validCond.AddErrorf(contour_api_v1.ConditionTypeRouteError, "RateLimitPolicyNotValid", "route.rateLimitPolicy is invalid: %s", err) return nil } - vrl, err := rateLimitPerRoute(route.RateLimitPolicy) - if err != nil { - validCond.AddErrorf(contour_api_v1.ConditionTypeRouteError, "RateLimitPerRouteNotValid", - "route.rateLimitPerRoute is invalid: %s", err) - return nil - } + vrl := rateLimitPerRoute(route.RateLimitPolicy) requestHashPolicies, lbPolicy := loadBalancerRequestHashPolicies(route.LoadBalancerPolicy, validCond) @@ -1432,7 +1427,7 @@ func (p *HTTPProxyProcessor) computeSecureVirtualHostAuthorization(validCond *co } func computeVirtualHostRateLimitPolicy(proxy *contour_api_v1.HTTPProxy, rls *contour_api_v1alpha1.RateLimitServiceConfig, validCond *contour_api_v1.DetailedCondition) (*RateLimitPolicy, bool) { - rlp, err := vhostRateLimitPolicy(proxy.Spec.VirtualHost.RateLimitPolicy) + rlp, err := rateLimitPolicy(proxy.Spec.VirtualHost.RateLimitPolicy) if err != nil { validCond.AddErrorf(contour_api_v1.ConditionTypeVirtualHostError, "RateLimitPolicyNotValid", "Spec.VirtualHost.RateLimitPolicy is invalid: %s", err) @@ -1977,24 +1972,13 @@ func slowStartConfig(slowStart *contour_api_v1.SlowStartPolicy) (*SlowStartConfi }, nil } -func rateLimitPerRoute(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPerRoute, error) { - if in == nil || in.VhRateLimits == "" { - return nil, nil - } - - var vrl VhRateLimitsType - switch in.VhRateLimits { - case "Override": - vrl = VhRateLimitsOverride - case "Include": - vrl = VhRateLimitsInclude - case "Ignore": - vrl = VhRateLimitsIgnore - default: - return nil, fmt.Errorf("error parsing rateLimitPerRoute config, %s is not supported", in.VhRateLimits) +func rateLimitPerRoute(in *contour_api_v1.RateLimitPolicy) *RateLimitPerRoute { + // Ignore the virtual host global rate limit policy if disabled is true + if in != nil && in.Global != nil && in.Global.Disabled { + return &RateLimitPerRoute{ + VhRateLimits: VhRateLimitsIgnore, + } } - return &RateLimitPerRoute{ - VhRateLimits: vrl, - }, nil + return nil } diff --git a/internal/dag/httpproxy_processor_test.go b/internal/dag/httpproxy_processor_test.go index 507f24915a2..b090d3c4945 100644 --- a/internal/dag/httpproxy_processor_test.go +++ b/internal/dag/httpproxy_processor_test.go @@ -14,7 +14,6 @@ package dag import ( - "errors" "net" "testing" "time" @@ -1030,7 +1029,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -1093,7 +1092,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -1130,7 +1129,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -1239,7 +1238,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "second", @@ -1298,7 +1297,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "second", @@ -1350,7 +1349,7 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { }, Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -1390,11 +1389,10 @@ func TestValidateVirtualHostRateLimitPolicy(t *testing.T) { func TestRateLimitPerRoute(t *testing.T) { tests := map[string]struct { - httpproxy *contour_api_v1.HTTPProxy - want *RateLimitPerRoute - expectedError error + httpproxy *contour_api_v1.HTTPProxy + want *RateLimitPerRoute }{ - "route RateLimitPolicy is not set": { + "route doesn't disable the global rate limit functionality": { httpproxy: &contour_api_v1.HTTPProxy{ ObjectMeta: v1.ObjectMeta{ Namespace: "ns", @@ -1420,151 +1418,9 @@ func TestRateLimitPerRoute(t *testing.T) { }, }, }, - want: nil, - expectedError: nil, - }, - "VhRateLimits value is not set in RateLimitPolicy": { - httpproxy: &contour_api_v1.HTTPProxy{ - ObjectMeta: v1.ObjectMeta{ - Namespace: "ns", - }, - Spec: contour_api_v1.HTTPProxySpec{ - VirtualHost: &contour_api_v1.VirtualHost{ - Fqdn: "foo.projectcontour.io", - }, - Routes: []contour_api_v1.Route{ - { - Services: []contour_api_v1.Service{ - { - Name: "foo", - Port: 80, - }, - }, - Conditions: []contour_api_v1.MatchCondition{ - { - Prefix: "/bar", - }, - }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - want: nil, - expectedError: nil, - }, - "VhRateLimits value is set to Override in RateLimitPolicy": { - httpproxy: &contour_api_v1.HTTPProxy{ - ObjectMeta: v1.ObjectMeta{ - Namespace: "ns", - }, - Spec: contour_api_v1.HTTPProxySpec{ - VirtualHost: &contour_api_v1.VirtualHost{ - Fqdn: "foo.projectcontour.io", - }, - Routes: []contour_api_v1.Route{ - { - Services: []contour_api_v1.Service{ - { - Name: "foo", - Port: 80, - }, - }, - Conditions: []contour_api_v1.MatchCondition{ - { - Prefix: "/bar", - }, - }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ - VhRateLimits: "Override", - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - want: &RateLimitPerRoute{ - VhRateLimits: 0, - }, - expectedError: nil, - }, - "VhRateLimits value is set to Include in RateLimitPolicy": { - httpproxy: &contour_api_v1.HTTPProxy{ - ObjectMeta: v1.ObjectMeta{ - Namespace: "ns", - }, - Spec: contour_api_v1.HTTPProxySpec{ - VirtualHost: &contour_api_v1.VirtualHost{ - Fqdn: "foo.projectcontour.io", - }, - Routes: []contour_api_v1.Route{ - { - Services: []contour_api_v1.Service{ - { - Name: "foo", - Port: 80, - }, - }, - Conditions: []contour_api_v1.MatchCondition{ - { - Prefix: "/bar", - }, - }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ - VhRateLimits: "Include", - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - want: &RateLimitPerRoute{ - VhRateLimits: 1, - }, - expectedError: nil, + want: nil, }, - "VhRateLimits value is set to Ignore in RateLimitPolicy": { + "route disables the global rate limit functionality": { httpproxy: &contour_api_v1.HTTPProxy{ ObjectMeta: v1.ObjectMeta{ Namespace: "ns", @@ -1586,21 +1442,9 @@ func TestRateLimitPerRoute(t *testing.T) { Prefix: "/bar", }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ - VhRateLimits: "Ignore", + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, + Disabled: true, }, }, }, @@ -1610,9 +1454,8 @@ func TestRateLimitPerRoute(t *testing.T) { want: &RateLimitPerRoute{ VhRateLimits: 2, }, - expectedError: nil, }, - "VhRateLimits value is set to an unsupported value in RateLimitPolicy": { + "route doesn't disable the global rate limit functionality explicitly": { httpproxy: &contour_api_v1.HTTPProxy{ ObjectMeta: v1.ObjectMeta{ Namespace: "ns", @@ -1634,37 +1477,23 @@ func TestRateLimitPerRoute(t *testing.T) { Prefix: "/bar", }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ - VhRateLimits: "wrong_value", + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, + Disabled: false, }, }, }, }, }, }, - want: nil, - expectedError: errors.New("error parsing rateLimitPerRoute config, wrong_value is not supported"), + want: nil, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { for _, route := range tc.httpproxy.Spec.Routes { - got, err := rateLimitPerRoute(route.RateLimitPolicy) - require.Equal(t, tc.expectedError, err) + got := rateLimitPerRoute(route.RateLimitPolicy) require.Equal(t, tc.want, got) } }) diff --git a/internal/dag/policy.go b/internal/dag/policy.go index 676d4deca84..8f6966ccbbf 100644 --- a/internal/dag/policy.go +++ b/internal/dag/policy.go @@ -538,32 +538,20 @@ func prefixReplacementsAreValid(replacements []contour_api_v1.ReplacePrefix) (st return "", nil } -func vhostRateLimitPolicy(in *contour_api_v1.VhostRateLimitPolicy) (*RateLimitPolicy, error) { +func rateLimitPolicy(in *contour_api_v1.RateLimitPolicy) (*RateLimitPolicy, error) { if in == nil || (in.Local == nil && (in.Global == nil || len(in.Global.Descriptors) == 0)) { return nil, nil } - return rateLimitPolicy(in.Local, in.Global) -} - -func routeRateLimitPolicy(in *contour_api_v1.RouteRateLimitPolicy) (*RateLimitPolicy, error) { - if in == nil || (in.Local == nil && (in.Global == nil || len(in.Global.Descriptors) == 0)) { - return nil, nil - } - - return rateLimitPolicy(in.Local, in.Global) -} - -func rateLimitPolicy(lrl *contour_api_v1.LocalRateLimitPolicy, grl *contour_api_v1.GlobalRateLimitPolicy) (*RateLimitPolicy, error) { rp := &RateLimitPolicy{} - local, err := localRateLimitPolicy(lrl) + local, err := localRateLimitPolicy(in.Local) if err != nil { return nil, err } rp.Local = local - global, err := globalRateLimitPolicy(grl) + global, err := globalRateLimitPolicy(in.Global) if err != nil { return nil, err } diff --git a/internal/dag/policy_test.go b/internal/dag/policy_test.go index e29a0aed213..f222ff109e2 100644 --- a/internal/dag/policy_test.go +++ b/internal/dag/policy_test.go @@ -654,9 +654,9 @@ func TestHeadersPolicy(t *testing.T) { } } -func TestVhostRateLimitPolicy(t *testing.T) { +func TestRateLimitPolicy(t *testing.T) { tests := map[string]struct { - in *contour_api_v1.VhostRateLimitPolicy + in *contour_api_v1.RateLimitPolicy want *RateLimitPolicy wantErr string }{ @@ -665,11 +665,11 @@ func TestVhostRateLimitPolicy(t *testing.T) { want: nil, }, "nil local rate limit policy": { - in: &contour_api_v1.VhostRateLimitPolicy{}, + in: &contour_api_v1.RateLimitPolicy{}, want: nil, }, "local - no burst": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 3, Unit: "second", @@ -684,7 +684,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "local - burst": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 3, Unit: "second", @@ -700,7 +700,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "local - custom response status code": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "minute", @@ -717,7 +717,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "local - custom response headers to add": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -746,7 +746,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "local - duplicate response header": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -765,7 +765,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: "duplicate header addition: \"Duplicate-Header\"", }, "local - invalid response header name": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "hour", @@ -780,7 +780,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: `invalid header name "Invalid-Header!": [a valid HTTP header must consist of alphanumeric characters or '-' (e.g. 'X-Header-Name', regex used for validation is '[-A-Za-z0-9]+')]`, }, "local - invalid unit": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 10, Unit: "invalid-unit", @@ -789,7 +789,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: "invalid unit \"invalid-unit\" in local rate limit policy", }, "local - invalid requests": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 0, Unit: "second", @@ -798,7 +798,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: "invalid requests value 0 in local rate limit policy", }, "global - multiple descriptors": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -876,7 +876,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "global - multiple descriptor entries set": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -893,7 +893,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: "rate limit descriptor entry must have exactly one field set", }, "global - no descriptor entries set": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -907,7 +907,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { wantErr: "rate limit descriptor entry must have exactly one field set", }, "global - header value match": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -954,7 +954,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { }, }, "global and local": { - in: &contour_api_v1.VhostRateLimitPolicy{ + in: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 20, Unit: "second", @@ -994,358 +994,7 @@ func TestVhostRateLimitPolicy(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { - rlp, err := vhostRateLimitPolicy(tc.in) - - if tc.wantErr != "" { - assert.EqualError(t, err, tc.wantErr) - } else { - assert.Equal(t, tc.want, rlp) - } - }) - } -} - -func TestRouteRateLimitPolicy(t *testing.T) { - tests := map[string]struct { - in *contour_api_v1.RouteRateLimitPolicy - want *RateLimitPolicy - wantErr string - }{ - "nil input": { - in: nil, - want: nil, - }, - "nil local rate limit policy": { - in: &contour_api_v1.RouteRateLimitPolicy{}, - want: nil, - }, - "local - no burst": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 3, - Unit: "second", - }, - }, - want: &RateLimitPolicy{ - Local: &LocalRateLimitPolicy{ - MaxTokens: 3, - TokensPerFill: 3, - FillInterval: time.Second, - }, - }, - }, - "local - burst": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 3, - Unit: "second", - Burst: 4, - }, - }, - want: &RateLimitPolicy{ - Local: &LocalRateLimitPolicy{ - MaxTokens: 7, - TokensPerFill: 3, - FillInterval: time.Second, - }, - }, - }, - "local - custom response status code": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 10, - Unit: "minute", - ResponseStatusCode: 431, - }, - }, - want: &RateLimitPolicy{ - Local: &LocalRateLimitPolicy{ - MaxTokens: 10, - TokensPerFill: 10, - FillInterval: time.Minute, - ResponseStatusCode: 431, - }, - }, - }, - "local - custom response headers to add": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 10, - Unit: "hour", - ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ - { - Name: "header-1", - Value: "header-value-1", - }, - { - Name: "header-2", - Value: "header-value-2", - }, - }, - }, - }, - want: &RateLimitPolicy{ - Local: &LocalRateLimitPolicy{ - MaxTokens: 10, - TokensPerFill: 10, - FillInterval: time.Hour, - ResponseHeadersToAdd: map[string]string{ - "Header-1": "header-value-1", - "Header-2": "header-value-2", - }, - }, - }, - }, - "local - duplicate response header": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 10, - Unit: "hour", - ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ - { - Name: "duplicate-header", - Value: "header-value-1", - }, - { - Name: "duplicate-header", - Value: "header-value-2", - }, - }, - }, - }, - wantErr: "duplicate header addition: \"Duplicate-Header\"", - }, - "local - invalid response header name": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 10, - Unit: "hour", - ResponseHeadersToAdd: []contour_api_v1.HeaderValue{ - { - Name: "invalid-header!", - Value: "header-value-1", - }, - }, - }, - }, - wantErr: `invalid header name "Invalid-Header!": [a valid HTTP header must consist of alphanumeric characters or '-' (e.g. 'X-Header-Name', regex used for validation is '[-A-Za-z0-9]+')]`, - }, - "local - invalid unit": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 10, - Unit: "invalid-unit", - }, - }, - wantErr: "invalid unit \"invalid-unit\" in local rate limit policy", - }, - "local - invalid requests": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 0, - Unit: "second", - }, - }, - wantErr: "invalid requests value 0 in local rate limit policy", - }, - "global - multiple descriptors": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "generic-key-key", - Value: "generic-key-value", - }, - }, - { - RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, - }, - { - RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ - HeaderName: "X-Header", - DescriptorKey: "request-header-key", - }, - }, - }, - }, - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, - }, - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{ - Key: "generic-key-key-2", - Value: "generic-key-value-2", - }, - }, - }, - }, - }, - }, - }, - want: &RateLimitPolicy{ - Global: &GlobalRateLimitPolicy{ - Descriptors: []*RateLimitDescriptor{ - { - Entries: []RateLimitDescriptorEntry{ - { - GenericKey: &GenericKeyDescriptorEntry{ - Key: "generic-key-key", - Value: "generic-key-value", - }, - }, - { - RemoteAddress: &RemoteAddressDescriptorEntry{}, - }, - { - HeaderMatch: &HeaderMatchDescriptorEntry{ - HeaderName: "X-Header", - Key: "request-header-key", - }, - }, - }, - }, - { - Entries: []RateLimitDescriptorEntry{ - { - RemoteAddress: &RemoteAddressDescriptorEntry{}, - }, - { - GenericKey: &GenericKeyDescriptorEntry{ - Key: "generic-key-key-2", - Value: "generic-key-value-2", - }, - }, - }, - }, - }, - }, - }, - }, - "global - multiple descriptor entries set": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - GenericKey: &contour_api_v1.GenericKeyDescriptor{}, - RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, - }, - }, - }, - }, - }, - }, - wantErr: "rate limit descriptor entry must have exactly one field set", - }, - "global - no descriptor entries set": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - {}, - }, - }, - }, - }, - }, - wantErr: "rate limit descriptor entry must have exactly one field set", - }, - "global - header value match": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - RequestHeaderValueMatch: &contour_api_v1.RequestHeaderValueMatchDescriptor{ - Headers: []contour_api_v1.HeaderMatchCondition{ - { - Name: "X-Header", - NotPresent: true, - }, - }, - ExpectMatch: true, - Value: "header-is-not-present", - }, - }, - }, - }, - }, - }, - }, - want: &RateLimitPolicy{ - Global: &GlobalRateLimitPolicy{ - Descriptors: []*RateLimitDescriptor{ - { - Entries: []RateLimitDescriptorEntry{ - { - HeaderValueMatch: &HeaderValueMatchDescriptorEntry{ - Headers: []HeaderMatchCondition{ - { - Name: "X-Header", - MatchType: "present", - Invert: true, - }, - }, - ExpectMatch: true, - Value: "header-is-not-present", - }, - }, - }, - }, - }, - }, - }, - }, - "global and local": { - in: &contour_api_v1.RouteRateLimitPolicy{ - Local: &contour_api_v1.LocalRateLimitPolicy{ - Requests: 20, - Unit: "second", - }, - Global: &contour_api_v1.GlobalRateLimitPolicy{ - Descriptors: []contour_api_v1.RateLimitDescriptor{ - { - Entries: []contour_api_v1.RateLimitDescriptorEntry{ - { - RemoteAddress: &contour_api_v1.RemoteAddressDescriptor{}, - }, - }, - }, - }, - }, - }, - want: &RateLimitPolicy{ - Local: &LocalRateLimitPolicy{ - MaxTokens: 20, - TokensPerFill: 20, - FillInterval: time.Second, - }, - Global: &GlobalRateLimitPolicy{ - Descriptors: []*RateLimitDescriptor{ - { - Entries: []RateLimitDescriptorEntry{ - { - RemoteAddress: &RemoteAddressDescriptorEntry{}, - }, - }, - }, - }, - }, - }, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - rlp, err := routeRateLimitPolicy(tc.in) + rlp, err := rateLimitPolicy(tc.in) if tc.wantErr != "" { assert.EqualError(t, err, tc.wantErr) diff --git a/internal/featuretests/v3/globalratelimit_test.go b/internal/featuretests/v3/globalratelimit_test.go index af8372ab7a1..216d2c46ab5 100644 --- a/internal/featuretests/v3/globalratelimit_test.go +++ b/internal/featuretests/v3/globalratelimit_test.go @@ -111,7 +111,7 @@ func globalRateLimitNoRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWra Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -199,7 +199,7 @@ func globalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -291,7 +291,7 @@ func globalRateLimitRouteRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -305,7 +305,7 @@ func globalRateLimitRouteRateLimitDefined(t *testing.T, rh ResourceEventHandlerW Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -389,7 +389,7 @@ func globalRateLimitVhostAndRouteRateLimitDefined(t *testing.T, rh ResourceEvent Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -414,7 +414,7 @@ func globalRateLimitVhostAndRouteRateLimitDefined(t *testing.T, rh ResourceEvent Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -523,7 +523,7 @@ func defaultGlobalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventH Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ { @@ -621,7 +621,7 @@ func globalRateLimitMultipleDescriptorsAndEntries(t *testing.T, rh ResourceEvent Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -635,7 +635,7 @@ func globalRateLimitMultipleDescriptorsAndEntries(t *testing.T, rh ResourceEvent Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Global: &contour_api_v1.GlobalRateLimitPolicy{ Descriptors: []contour_api_v1.RateLimitDescriptor{ // first descriptor diff --git a/internal/featuretests/v3/localratelimit_test.go b/internal/featuretests/v3/localratelimit_test.go index a50be0b305f..1d870798eef 100644 --- a/internal/featuretests/v3/localratelimit_test.go +++ b/internal/featuretests/v3/localratelimit_test.go @@ -112,7 +112,7 @@ func vhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Cont Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -191,7 +191,7 @@ func routeRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Con Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -211,7 +211,7 @@ func routeRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Con Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 5, Unit: "second", @@ -295,7 +295,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Spec: contour_api_v1.HTTPProxySpec{ VirtualHost: &contour_api_v1.VirtualHost{ Fqdn: "foo.com", - RateLimitPolicy: &contour_api_v1.VhostRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -316,7 +316,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -336,7 +336,7 @@ func vhostAndRouteRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWrapper Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 5, Unit: "second", @@ -456,7 +456,7 @@ func customResponseCode(t *testing.T, rh ResourceEventHandlerWrapper, c *Contour Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", @@ -529,7 +529,7 @@ func customResponseHeaders(t *testing.T, rh ResourceEventHandlerWrapper, c *Cont Port: 80, }, }, - RateLimitPolicy: &contour_api_v1.RouteRateLimitPolicy{ + RateLimitPolicy: &contour_api_v1.RateLimitPolicy{ Local: &contour_api_v1.LocalRateLimitPolicy{ Requests: 100, Unit: "minute", diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index ef6ef862110..ea5f20f24f0 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -1226,8 +1226,7 @@

GlobalRateLimitPolicy

(Appears on: -RouteRateLimitPolicy, -VhostRateLimitPolicy, +RateLimitPolicy, RateLimitServiceConfig)

@@ -2497,8 +2496,7 @@

LocalRateLimitPolicy

(Appears on: -RouteRateLimitPolicy, -VhostRateLimitPolicy) +RateLimitPolicy)

LocalRateLimitPolicy defines local rate limiting parameters.

@@ -3000,6 +2998,60 @@

RateLimitDescriptorEntry +

RateLimitPolicy +

+

+(Appears on: +Route, +VirtualHost) +

+

+

RateLimitPolicy defines rate limiting parameters.

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+local +
+ + +LocalRateLimitPolicy + + +
+(Optional) +

Local defines local rate limiting parameters, i.e. parameters +for rate limiting that occurs within each Envoy pod as requests +are handled.

+
+global +
+ + +GlobalRateLimitPolicy + + +
+(Optional) +

Global defines global rate limiting parameters, i.e. parameters +defining descriptors that are sent to an external rate limit +service (RLS) for a rate limit decision on each request.

+

RedirectResponseCode (uint32 alias)

@@ -3693,8 +3745,8 @@

Route rateLimitPolicy
- -RouteRateLimitPolicy + +RateLimitPolicy @@ -3799,74 +3851,6 @@

Route -

RouteRateLimitPolicy -

-

-(Appears on: -Route) -

-

-

RouteRateLimitPolicy defines rate limiting parameters on the route-level.

-

- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
-local -
- - -LocalRateLimitPolicy - - -
-(Optional) -

Local defines local rate limiting parameters, i.e. parameters -for rate limiting that occurs within each Envoy pod as requests -are handled.

-
-global -
- - -GlobalRateLimitPolicy - - -
-(Optional) -

Global defines global rate limiting parameters, i.e. parameters -defining descriptors that are sent to an external rate limit -service (RLS) for a rate limit decision on each request.

-
-vhRateLimits -
- -string - -
-(Optional) -

VhRateLimits defines how the route should handle rate limits defined by the virtual host. -Default value is Override, which means use the virtual host rate limits unless the route has a rate limit policy. -Allowed values are: Override, Include, or Ignore.

-

Service

@@ -4719,59 +4703,6 @@

UpstreamValidation -

VhostRateLimitPolicy -

-

-(Appears on: -VirtualHost) -

-

-

VhostRateLimitPolicy defines rate limiting parameters on the virtual host level.

-

- - - - - - - - - - - - - - - - - -
FieldDescription
-local -
- - -LocalRateLimitPolicy - - -
-(Optional) -

Local defines local rate limiting parameters, i.e. parameters -for rate limiting that occurs within each Envoy pod as requests -are handled.

-
-global -
- - -GlobalRateLimitPolicy - - -
-(Optional) -

Global defines global rate limiting parameters, i.e. parameters -defining descriptors that are sent to an external rate limit -service (RLS) for a rate limit decision on each request.

-

VirtualHost

@@ -4861,8 +4792,8 @@

VirtualHost rateLimitPolicy
- -VhostRateLimitPolicy + +RateLimitPolicy diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index ccd3c34f68e..ce282401655 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -85,7 +85,7 @@ func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ Fqdn: "defaultglobalratelimitvhostnontls.projectcontour.io", - RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ + RateLimitPolicy: &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -147,7 +147,7 @@ func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { Spec: contourv1.HTTPProxySpec{ VirtualHost: &contourv1.VirtualHost{ Fqdn: "defaultglobalratelimitvhostnontls.projectcontour.io", - RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ + RateLimitPolicy: &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -253,7 +253,7 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { TLS: &contourv1.TLS{ SecretName: "echo", }, - RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ + RateLimitPolicy: &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Disabled: true, }, @@ -319,7 +319,7 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { TLS: &contourv1.TLS{ SecretName: "echo", }, - RateLimitPolicy: &contourv1.VhostRateLimitPolicy{ + RateLimitPolicy: &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -366,8 +366,8 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { }) } -func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { - Specify("default global rate limit policy is applied and vhost rate limits is set to override mode (implicitly)", func() { +func testDefaultGlobalRateLimitingWithVhRateLimitsIgnore(namespace string) { + Specify("default global rate limit policy is applied and route opted out from the virtual host rate limit policy", func() { t := f.T() f.Fixtures.Echo.Deploy(namespace, "echo") @@ -389,201 +389,9 @@ func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { Port: 80, }, }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 429 from the proxy confirming - // that we've exceeded the rate limit. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - RequestHeader: &contourv1.RequestHeaderDescriptor{ - HeaderName: "X-HTTPProxy-Descriptor", - DescriptorKey: "customHeader", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(200), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make requests against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-HTTPProxy-Descriptor": "test_value", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("default global rate limit policy is applied and vhost rate limits is set to override mode (explicitly)", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "defaultglobalratelimitvhratelimits", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 429 from the proxy confirming - // that we've exceeded the rate limit. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Override", - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - RequestHeader: &contourv1.RequestHeaderDescriptor{ - HeaderName: "X-HTTPProxy-Descriptor", - DescriptorKey: "customHeader", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(200), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make requests against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-HTTPProxy-Descriptor": "test_value", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("default global rate limit policy is applied and vhost rate limits is set to include mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "defaultglobalratelimitvhratelimits", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ + Conditions: []contourv1.MatchCondition{ { - Name: "echo", - Port: 80, + Prefix: "/echo", }, }, }, @@ -597,6 +405,7 @@ func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ Host: p.Spec.VirtualHost.Fqdn, Condition: e2e.HasStatusCode(429), + Path: "/echo", RequestOpts: []func(*http.Request){ e2e.OptSetHeaders(map[string]string{ "X-Default-Header": "test_value_1", @@ -612,107 +421,10 @@ func testDefaultGlobalRateLimitingWithVhRateLimits(namespace string) { } // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Include", + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - RequestHeader: &contourv1.RequestHeaderDescriptor{ - HeaderName: "X-HTTPProxy-Descriptor", - DescriptorKey: "customHeader", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level that allows one request per hour - // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - // Make requests against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-HTTPProxy-Descriptor": "test_value", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("default global rate limit policy is applied and vhost rate limits is set to ignore mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "defaultglobalratelimitvhratelimits", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - }, + Disabled: true, }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 429 from the proxy confirming - // that we've exceeded the rate limit. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Condition: e2e.HasStatusCode(429), - RequestOpts: []func(*http.Request){ - e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", - }), - }, - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Ignore", } return f.Client.Update(context.TODO(), p) diff --git a/test/e2e/httpproxy/global_rate_limiting_test.go b/test/e2e/httpproxy/global_rate_limiting_test.go index f2edff5ce6d..12587d7a466 100644 --- a/test/e2e/httpproxy/global_rate_limiting_test.go +++ b/test/e2e/httpproxy/global_rate_limiting_test.go @@ -71,7 +71,7 @@ func testGlobalRateLimitingVirtualHostNonTLS(namespace string) { } // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -167,7 +167,7 @@ func testGlobalRateLimitingRouteNonTLS(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -265,7 +265,7 @@ func testGlobalRateLimitingVirtualHostTLS(namespace string) { return err } - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -365,7 +365,7 @@ func testGlobalRateLimitingRouteTLS(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ Global: &contourv1.GlobalRateLimitPolicy{ Descriptors: []contourv1.RateLimitDescriptor{ { @@ -413,3 +413,210 @@ func testGlobalRateLimitingRouteTLS(namespace string) { require.Truef(t, ok, "expected 200 response code for non-rate-limited route, got %d", res.StatusCode) }) } + +func testDisableVirtualHostGlobalRateLimitingOnRoute(namespace string) { + Specify("global rate limit policy set on virtualhost is applied with disabled set to false on a route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "globalratelimitvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "globalratelimitvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Wait until we confirm a 429 response is now gotten when we exceed the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Set disabled to false explicitly on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Disabled: false, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Confirm we still see a 429 response. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("global rate limit policy set on virtualhost is applied with disabled set to true on a route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "globalratelimitvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "globalratelimitvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "vhostlimit", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Wait until we confirm a 429 response is now gotten when we exceed the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Disable Vhost global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Disabled: true, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make another request against the proxy, confirm a 200 response + // is now gotten since the route explicitly opted out from the vhost global rate limiting + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) +} diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index bd187370a4f..6f010cf8e3c 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -421,6 +421,8 @@ descriptors: f.NamespacedTest("httpproxy-global-rate-limiting-vhost-tls", withRateLimitService(testGlobalRateLimitingVirtualHostTLS)) f.NamespacedTest("httpproxy-global-rate-limiting-route-tls", withRateLimitService(testGlobalRateLimitingRouteTLS)) + + f.NamespacedTest("httpproxy-global-rate-limiting-vhost-disable-per-route", withRateLimitService(testDisableVirtualHostGlobalRateLimitingOnRoute)) }) Context("default global rate limiting", func() { @@ -498,54 +500,7 @@ descriptors: f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-non-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostNonTLS)) f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostTLS)) - f.NamespacedTest("httpproxy-default-global-rate-limiting-vh-rate-limits", withRateLimitService(testDefaultGlobalRateLimitingWithVhRateLimits)) - }) - - Context("vh rate limits", func() { - withRateLimitService := func(body e2e.NamespacedTestBody) e2e.NamespacedTestBody { - return func(namespace string) { - Context("with rate limit service", func() { - BeforeEach(func() { - contourConfig.RateLimitService = config.RateLimitService{ - ExtensionService: fmt.Sprintf("%s/%s", namespace, f.Deployment.RateLimitExtensionService.Name), - Domain: "contour", - FailOpen: false, - } - contourConfiguration.Spec.RateLimitService = &contour_api_v1alpha1.RateLimitServiceConfig{ - ExtensionService: contour_api_v1alpha1.NamespacedName{ - Name: f.Deployment.RateLimitExtensionService.Name, - Namespace: namespace, - }, - Domain: "contour", - FailOpen: ref.To(false), - EnableXRateLimitHeaders: ref.To(false), - } - require.NoError(f.T(), - f.Deployment.EnsureRateLimitResources( - namespace, - ` -domain: contour -descriptors: - - key: generic_key - value: vhostlimit - rate_limit: - unit: hour - requests_per_unit: 1 - - key: route_limit_key - value: routelimit - rate_limit: - unit: hour - requests_per_unit: 1`)) - }) - - body(namespace) - }) - } - } - - f.NamespacedTest("httpproxy-global-rate-limit-with-vh-rate-limits-option", withRateLimitService(testGlobalWithVhostRateLimits)) - - f.NamespacedTest("httpproxy-local-rate-limit-with-vh-rate-limits-option", withRateLimitService(testLocalWithVhostRateLimits)) + f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-rate-limits-ignore", withRateLimitService(testDefaultGlobalRateLimitingWithVhRateLimitsIgnore)) }) Context("cookie-rewriting", func() { diff --git a/test/e2e/httpproxy/local_rate_limiting_test.go b/test/e2e/httpproxy/local_rate_limiting_test.go index aca1bae774c..ead9fa74bb7 100644 --- a/test/e2e/httpproxy/local_rate_limiting_test.go +++ b/test/e2e/httpproxy/local_rate_limiting_test.go @@ -71,7 +71,7 @@ func testLocalRateLimitingVirtualHost(namespace string) { return err } - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ Local: &contourv1.LocalRateLimitPolicy{ Requests: 1, Unit: "hour", @@ -158,7 +158,7 @@ func testLocalRateLimitingRoute(namespace string) { return err } - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ Local: &contourv1.LocalRateLimitPolicy{ Requests: 1, Unit: "hour", diff --git a/test/e2e/httpproxy/vh_rate_limits_test.go b/test/e2e/httpproxy/vh_rate_limits_test.go deleted file mode 100644 index 3cab39c165f..00000000000 --- a/test/e2e/httpproxy/vh_rate_limits_test.go +++ /dev/null @@ -1,940 +0,0 @@ -// Copyright Project Contour Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build e2e - -package httpproxy - -import ( - "context" - - . "github.com/onsi/ginkgo/v2" - contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" - "github.com/projectcontour/contour/test/e2e" - "github.com/stretchr/testify/require" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func testGlobalWithVhostRateLimits(namespace string) { - Specify("vhost_rate_limits is set to the default override mode (implicitly)", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Confirm a 429 response is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to the default override mode (explicitly)", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Confirm a 429 response is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Override", - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to include mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Confirm a 429 response is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Include", - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Key: "route_limit_key", - Value: "routelimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level that allows one request per hour - // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to ignore mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Global: &contourv1.GlobalRateLimitPolicy{ - Descriptors: []contourv1.RateLimitDescriptor{ - { - Entries: []contourv1.RateLimitDescriptorEntry{ - { - GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", - }, - }, - }, - }, - }, - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Confirm a 429 response is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Ignore", - } - - return f.Client.Update(context.TODO(), p) - })) - - // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy - // set by the virtual host. Make another request to confirm 200. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - }) -} - -func testLocalWithVhostRateLimits(namespace string) { - Specify("vhost_rate_limits is set to the default override mode (implicitly)", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to the default override mode (explicitly)", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Override", - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level, make another request - // to confirm a 200 response since we override the policy by default on the route level, - // and the new limit allows 1 request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request to confirm that route level rate limits got exceeded. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to include mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a global local limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Include", - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // After adding rate limits on the route level that allows one request per hour - // but vhost_rate_limits is in include mode, make another request to confirm a 429 response. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - }) - - Specify("vhost_rate_limits is set to ignore mode", func() { - t := f.T() - - f.Fixtures.Echo.Deploy(namespace, "echo") - - p := &contourv1.HTTPProxy{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: "vhratelimitsvhostnontls", - }, - Spec: contourv1.HTTPProxySpec{ - VirtualHost: &contourv1.VirtualHost{ - Fqdn: "vhratelimitsvhostnontls.projectcontour.io", - }, - Routes: []contourv1.Route{ - { - Services: []contourv1.Service{ - { - Name: "echo", - Port: 80, - }, - }, - Conditions: []contourv1.MatchCondition{ - { - Prefix: "/echo", - }, - }, - }, - }, - }, - } - p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) - - // Wait until we get a 200 from the proxy confirming - // the pods are up and serving traffic. - res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the virtual host. - p.Spec.VirtualHost.RateLimitPolicy = &contourv1.VhostRateLimitPolicy{ - Local: &contourv1.LocalRateLimitPolicy{ - Requests: 1, - Unit: "hour", - }, - } - - return f.Client.Update(context.TODO(), p) - })) - - // Make a request against the proxy, confirm a 200 response - // is returned since we're allowed one request per hour. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - - // Make another request against the proxy, confirm a 429 response - // is now gotten since we've exceeded the rate limit. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(429), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) - - require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { - return err - } - - // Add a local rate limit policy on the route. - p.Spec.Routes[0].RateLimitPolicy = &contourv1.RouteRateLimitPolicy{ - VhRateLimits: "Ignore", - } - - return f.Client.Update(context.TODO(), p) - })) - - // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy - // set by the virtual host. Make another request to confirm 200. - res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ - Host: p.Spec.VirtualHost.Fqdn, - Path: "/echo", - Condition: e2e.HasStatusCode(200), - }) - require.NotNil(t, res, "request never succeeded") - require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) - }) -} From 26a35dfb8804719761a8247128594d66f2c73ceb Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Thu, 17 Aug 2023 11:20:41 +0200 Subject: [PATCH 10/16] add test for envoy ratelimitsPerRoute Signed-off-by: shadi-altarsha --- internal/envoy/v3/ratelimit.go | 4 ++-- internal/envoy/v3/ratelimit_test.go | 37 +++++++++++++++++++++++++++++ internal/envoy/v3/route.go | 2 +- 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/internal/envoy/v3/ratelimit.go b/internal/envoy/v3/ratelimit.go index bbfd02f8579..c926d29c876 100644 --- a/internal/envoy/v3/ratelimit.go +++ b/internal/envoy/v3/ratelimit.go @@ -165,10 +165,10 @@ func enableXRateLimitHeaders(enable bool) ratelimit_filter_v3.RateLimit_XRateLim } // rateLimitPerRoute returns a per-route config to configure vhost rate limits. -func rateLimitPerRoute(mode int) *anypb.Any { +func rateLimitPerRoute(r *dag.RateLimitPerRoute) *anypb.Any { return protobuf.MustMarshalAny( &ratelimit_filter_v3.RateLimitPerRoute{ - VhRateLimits: ratelimit_filter_v3.RateLimitPerRoute_VhRateLimitsOptions(mode), + VhRateLimits: ratelimit_filter_v3.RateLimitPerRoute_VhRateLimitsOptions(r.VhRateLimits), }, ) } diff --git a/internal/envoy/v3/ratelimit_test.go b/internal/envoy/v3/ratelimit_test.go index 6010b72808d..e07c96d9913 100644 --- a/internal/envoy/v3/ratelimit_test.go +++ b/internal/envoy/v3/ratelimit_test.go @@ -411,3 +411,40 @@ func TestGlobalRateLimitFilter(t *testing.T) { }) } } + +func TestRateLimitPerRoute(t *testing.T) { + tests := map[string]struct { + name string + cfg *dag.RateLimitPerRoute + want *anypb.Any + }{ + "VhRateLimits in Override mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsOverride, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 0, + }), + }, "VhRateLimits in Include mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsInclude, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 1, + }), + }, "VhRateLimits in Ignore mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsIgnore, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 2, + }), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.want, rateLimitPerRoute(tc.cfg)) + }) + } +} diff --git a/internal/envoy/v3/route.go b/internal/envoy/v3/route.go index f48b904b585..ca83fda40f3 100644 --- a/internal/envoy/v3/route.go +++ b/internal/envoy/v3/route.go @@ -146,7 +146,7 @@ func buildRoute(dagRoute *dag.Route, vhostName string, secure bool) *envoy_route } if dagRoute.RateLimitPerRoute != nil { - route.TypedPerFilterConfig["envoy.filters.http.ratelimit"] = rateLimitPerRoute(int(dagRoute.RateLimitPerRoute.VhRateLimits)) + route.TypedPerFilterConfig["envoy.filters.http.ratelimit"] = rateLimitPerRoute(dagRoute.RateLimitPerRoute) } // Apply per-route authorization policy modifications. From dece9efa6b75a8e75bee2f95f804b0db0448b93c Mon Sep 17 00:00:00 2001 From: Shadi Altarsha <61504589+shadialtarsha@users.noreply.github.com> Date: Thu, 17 Aug 2023 23:52:52 +0200 Subject: [PATCH 11/16] Update changelogs/unreleased/5657-shadialtarsha-minor.md Co-authored-by: Steve Kriss Signed-off-by: shadi-altarsha --- changelogs/unreleased/5657-shadialtarsha-minor.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md index 3fe6bd98d63..a8c7612a1ac 100644 --- a/changelogs/unreleased/5657-shadialtarsha-minor.md +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -1,6 +1,6 @@ ## Disable the virtualhost's Global RateLimit policy -Setting `global.disabled` flag to false on a specific route should disable the vhost global rate limit policy. +Setting `rateLimitPolicy.global.disabled` flag to true on a specific route now disables the global rate limit policy inherited from the virtual host for that route. ### Sample Configurations #### httpproxy.yaml From af28f65201449f9385ca19f442620c43fe2e4078 Mon Sep 17 00:00:00 2001 From: Shadi Altarsha <61504589+shadialtarsha@users.noreply.github.com> Date: Thu, 17 Aug 2023 23:52:59 +0200 Subject: [PATCH 12/16] Update changelogs/unreleased/5657-shadialtarsha-minor.md Co-authored-by: Steve Kriss Signed-off-by: shadi-altarsha --- changelogs/unreleased/5657-shadialtarsha-minor.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md index a8c7612a1ac..2c914ca2985 100644 --- a/changelogs/unreleased/5657-shadialtarsha-minor.md +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -1,4 +1,4 @@ -## Disable the virtualhost's Global RateLimit policy +## Specific routes can now opt out of the virtual host's global rate limit policy Setting `rateLimitPolicy.global.disabled` flag to true on a specific route now disables the global rate limit policy inherited from the virtual host for that route. From 9cdff28edd5bf691b52bfa72e4d1665e04561710 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Thu, 17 Aug 2023 23:57:10 +0200 Subject: [PATCH 13/16] Revert remvoing 5363-shadialtarsha-minor.md Signed-off-by: shadi-altarsha --- .../unreleased/5363-shadialtarsha-minor.md | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 changelogs/unreleased/5363-shadialtarsha-minor.md diff --git a/changelogs/unreleased/5363-shadialtarsha-minor.md b/changelogs/unreleased/5363-shadialtarsha-minor.md new file mode 100644 index 00000000000..a263b06f91c --- /dev/null +++ b/changelogs/unreleased/5363-shadialtarsha-minor.md @@ -0,0 +1,28 @@ +## Default Global RateLimit Policy + +This Change adds the ability to define a default global rate limit policy in the Contour configuration +to be used as a global rate limit policy by all HTTPProxy objects. +HTTPProxy object can decide to opt out and disable this feature using `disabled` config. + +### Sample Configurations +#### contour.yaml +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: contour + namespace: projectcontour +data: + contour.yaml: | + rateLimitService: + extensionService: projectcontour/ratelimit + domain: contour + failOpen: false + defaultGlobalRateLimitPolicy: + descriptors: + - entries: + - remoteAddress: {} + - entries: + - genericKey: + value: foo +``` From 4ee2c114d57c6427b147f7c8ff4174597d473ca0 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Fri, 18 Aug 2023 00:01:41 +0200 Subject: [PATCH 14/16] Improve the example Signed-off-by: shadi-altarsha --- changelogs/unreleased/5657-shadialtarsha-minor.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md index 2c914ca2985..8155abf4e0b 100644 --- a/changelogs/unreleased/5657-shadialtarsha-minor.md +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -3,6 +3,7 @@ Setting `rateLimitPolicy.global.disabled` flag to true on a specific route now disables the global rate limit policy inherited from the virtual host for that route. ### Sample Configurations +In the example below, `/foo` route is opted out from the global rate limit policy defined by the virtualhost. #### httpproxy.yaml ```yaml apiVersion: projectcontour.io/v1 @@ -14,11 +15,12 @@ spec: fqdn: local.projectcontour.io rateLimitPolicy: global: - disabled: true - local: - requests: 100 - unit: hour - burst: 20 + descriptors: + - entries: + - remoteAddress: {} + - genericKey: + key: vhost + value: local.projectcontour.io routes: - conditions: - prefix: / From e005233a98c3f1eff498fcec6942f8dc04a52e72 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Tue, 5 Sep 2023 17:47:38 +0200 Subject: [PATCH 15/16] CR fixes Signed-off-by: shadi-altarsha --- .../default_global_rate_limiting_test.go | 4 +-- .../httpproxy/global_rate_limiting_test.go | 4 +-- test/e2e/httpproxy/httpproxy_test.go | 29 +++++++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index ce282401655..7682d803745 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -408,7 +408,7 @@ func testDefaultGlobalRateLimitingWithVhRateLimitsIgnore(namespace string) { Path: "/echo", RequestOpts: []func(*http.Request){ e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", + "X-Another-Header": "randomvalue", }), }, }) @@ -438,7 +438,7 @@ func testDefaultGlobalRateLimitingWithVhRateLimitsIgnore(namespace string) { Condition: e2e.HasStatusCode(200), RequestOpts: []func(*http.Request){ e2e.OptSetHeaders(map[string]string{ - "X-Default-Header": "test_value_1", + "X-Another-Header": "randomvalue", }), }, }) diff --git a/test/e2e/httpproxy/global_rate_limiting_test.go b/test/e2e/httpproxy/global_rate_limiting_test.go index 12587d7a466..d0e4788021a 100644 --- a/test/e2e/httpproxy/global_rate_limiting_test.go +++ b/test/e2e/httpproxy/global_rate_limiting_test.go @@ -471,7 +471,7 @@ func testDisableVirtualHostGlobalRateLimitingOnRoute(namespace string) { Entries: []contourv1.RateLimitDescriptorEntry{ { GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", + Value: "randomvalue", }, }, }, @@ -573,7 +573,7 @@ func testDisableVirtualHostGlobalRateLimitingOnRoute(namespace string) { Entries: []contourv1.RateLimitDescriptorEntry{ { GenericKey: &contourv1.GenericKeyDescriptor{ - Value: "vhostlimit", + Value: "randomvalue", }, }, }, diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index 6f010cf8e3c..46b67fae270 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -404,6 +404,11 @@ descriptors: requests_per_unit: 1 - key: generic_key value: tlsroutelimit + rate_limit: + unit: hour + requests_per_unit: 1 + - key: generic_key + value: randomvalue rate_limit: unit: hour requests_per_unit: 1`)) @@ -446,6 +451,16 @@ descriptors: }, }, }, + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ + HeaderName: "X-Another-Header", + DescriptorKey: "anotherHeader", + }, + }, + }, + }, }, }, } @@ -469,6 +484,16 @@ descriptors: }, }, }, + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ + HeaderName: "X-Another-Header", + DescriptorKey: "anotherHeader", + }, + }, + }, + }, }, }, } @@ -488,6 +513,10 @@ descriptors: unit: hour requests_per_unit: 1 - key: customHeader + rate_limit: + unit: hour + requests_per_unit: 1 + - key: anotherHeader rate_limit: unit: hour requests_per_unit: 1`)) From d77620f85e009227e6b4eb2b6468b098c8c62572 Mon Sep 17 00:00:00 2001 From: shadi-altarsha Date: Mon, 25 Sep 2023 13:21:38 +0200 Subject: [PATCH 16/16] remove a file from unreleased Signed-off-by: shadi-altarsha --- .../unreleased/5363-shadialtarsha-minor.md | 28 ------------------- 1 file changed, 28 deletions(-) delete mode 100644 changelogs/unreleased/5363-shadialtarsha-minor.md diff --git a/changelogs/unreleased/5363-shadialtarsha-minor.md b/changelogs/unreleased/5363-shadialtarsha-minor.md deleted file mode 100644 index a263b06f91c..00000000000 --- a/changelogs/unreleased/5363-shadialtarsha-minor.md +++ /dev/null @@ -1,28 +0,0 @@ -## Default Global RateLimit Policy - -This Change adds the ability to define a default global rate limit policy in the Contour configuration -to be used as a global rate limit policy by all HTTPProxy objects. -HTTPProxy object can decide to opt out and disable this feature using `disabled` config. - -### Sample Configurations -#### contour.yaml -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: contour - namespace: projectcontour -data: - contour.yaml: | - rateLimitService: - extensionService: projectcontour/ratelimit - domain: contour - failOpen: false - defaultGlobalRateLimitPolicy: - descriptors: - - entries: - - remoteAddress: {} - - entries: - - genericKey: - value: foo -```