diff --git a/.codespell.ignorewords b/.codespell.ignorewords index b186489610c..4d617fc6873 100644 --- a/.codespell.ignorewords +++ b/.codespell.ignorewords @@ -6,3 +6,4 @@ od als wit aks +immediatedly diff --git a/.github/workflows/build_daily.yaml b/.github/workflows/build_daily.yaml index fd477060b20..021a3a0b10e 100644 --- a/.github/workflows/build_daily.yaml +++ b/.github/workflows/build_daily.yaml @@ -10,7 +10,7 @@ on: env: GOPROXY: https://proxy.golang.org/ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - GO_VERSION: 1.21.0 + GO_VERSION: 1.21.3 jobs: e2e-envoy-xds: runs-on: ubuntu-latest diff --git a/.github/workflows/prbuild.yaml b/.github/workflows/prbuild.yaml index f3de8e126bc..b679bebad46 100644 --- a/.github/workflows/prbuild.yaml +++ b/.github/workflows/prbuild.yaml @@ -11,7 +11,7 @@ on: env: GOPROXY: https://proxy.golang.org/ SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - GO_VERSION: 1.21.0 + GO_VERSION: 1.21.3 jobs: lint: runs-on: ubuntu-latest diff --git a/.golangci.yml b/.golangci.yml index 5bc02ac688f..c3c40bc5527 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -32,7 +32,30 @@ linters-settings: - http.DefaultTransport revive: rules: - - name: use-any + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: empty-block + - name: error-naming + - name: error-return + - name: error-strings + - name: errorf + - name: exported + - name: increment-decrement + - name: indent-error-flow + - name: package-comments + - name: range + - name: receiver-naming + - name: redefines-builtin-id + - name: superfluous-else + - name: time-naming + - name: unexported-return + - name: unreachable-code + - name: unused-parameter + - name: use-any + - name: var-declaration + - name: var-naming issues: exclude-rules: diff --git a/Makefile b/Makefile index 5f4cb94e0a2..c9e2fb8a9c6 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ IMAGE := $(REGISTRY)/$(PROJECT) SRCDIRS := ./cmd ./internal ./apis LOCAL_BOOTSTRAP_CONFIG = localenvoyconfig.yaml SECURE_LOCAL_BOOTSTRAP_CONFIG = securelocalenvoyconfig.yaml -ENVOY_IMAGE = docker.io/envoyproxy/envoy:v1.27.0 +ENVOY_IMAGE = docker.io/envoyproxy/envoy:v1.27.2 GATEWAY_API_VERSION ?= $(shell grep "sigs.k8s.io/gateway-api" go.mod | awk '{print $$2}') # Used to supply a local Envoy docker container an IP to connect to that is running @@ -44,7 +44,7 @@ endif IMAGE_PLATFORMS ?= linux/amd64,linux/arm64 # Base build image to use. -BUILD_BASE_IMAGE ?= golang:1.21.0 +BUILD_BASE_IMAGE ?= golang:1.21.3 # Enable build with CGO. BUILD_CGO_ENABLED ?= 0 diff --git a/apis/projectcontour/v1/httpproxy.go b/apis/projectcontour/v1/httpproxy.go index db77734597f..498be837a8b 100644 --- a/apis/projectcontour/v1/httpproxy.go +++ b/apis/projectcontour/v1/httpproxy.go @@ -551,6 +551,18 @@ type Route struct { // +optional PathRewritePolicy *PathRewritePolicy `json:"pathRewritePolicy,omitempty"` // The policy for managing request headers during proxying. + // + // You may dynamically rewrite the Host header to be forwarded + // upstream to the content of a request header using + // the below format "%REQ(X-Header-Name)%". If the value of the header + // is empty, it is ignored. + // + // *NOTE: Pay attention to the potential security implications of using this option. + // Provided header must come from trusted source. + // + // **NOTE: The header rewrite is only done while forwarding and has no bearing + // on the routing decision. + // // +optional RequestHeadersPolicy *HeadersPolicy `json:"requestHeadersPolicy,omitempty"` // The policy for managing response headers during proxying. @@ -1268,7 +1280,7 @@ type LoadBalancerPolicy struct { } // HeadersPolicy defines how headers are managed during forwarding. -// The `Host` header is treated specially and if set in a HTTP response +// The `Host` header is treated specially and if set in a HTTP request // will be used as the SNI server name when forwarding over TLS. It is an // error to attempt to set the `Host` header in a HTTP response. type HeadersPolicy struct { diff --git a/apis/projectcontour/v1alpha1/contourconfig.go b/apis/projectcontour/v1alpha1/contourconfig.go index 5699d5e1f30..d231d19bbaa 100644 --- a/apis/projectcontour/v1alpha1/contourconfig.go +++ b/apis/projectcontour/v1alpha1/contourconfig.go @@ -391,6 +391,27 @@ type EnvoyListenerConfig struct { // Single set of options are applied to all listeners. // +optional SocketOptions *SocketOptions `json:"socketOptions,omitempty"` + + // Defines the limit on number of HTTP requests that Envoy will process from a single + // connection in a single I/O cycle. Requests over this limit are processed in subsequent + // I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is + // detected. Configures the http.max_requests_per_io_cycle Envoy runtime setting. The default + // value when this is not set is no limit. + // + // +kubebuilder:validation:Minimum=1 + // +optional + MaxRequestsPerIOCycle *uint32 `json:"maxRequestsPerIOCycle,omitempty"` + + // Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the + // SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed + // for a peer on a single HTTP/2 connection. It is recommended to not set this lower + // than 100 but this field can be used to bound resource usage by HTTP/2 connections + // and mitigate attacks like CVE-2023-44487. The default value when this is not set is + // unlimited. + // + // +kubebuilder:validation:Minimum=1 + // +optional + HTTP2MaxConcurrentStreams *uint32 `json:"httpMaxConcurrentStreams,omitempty"` } // SocketOptions defines configurable socket options for Envoy listeners. diff --git a/apis/projectcontour/v1alpha1/contourdeployment.go b/apis/projectcontour/v1alpha1/contourdeployment.go index 22f5fa0faf8..eb9b57b39ca 100644 --- a/apis/projectcontour/v1alpha1/contourdeployment.go +++ b/apis/projectcontour/v1alpha1/contourdeployment.go @@ -119,6 +119,12 @@ type ContourSettings struct { // the annotations for Prometheus will be appended or overwritten with predefined value. // +optional PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + + // PodLabels defines labels to add to the Contour pods. + // If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + // the one here has a higher priority. + // +optional + PodLabels map[string]string `json:"podLabels,omitempty"` } // DeploymentSettings contains settings for Deployment resources. @@ -185,6 +191,12 @@ type EnvoySettings struct { // +optional PodAnnotations map[string]string `json:"podAnnotations,omitempty"` + // PodLabels defines labels to add to the Envoy pods. + // If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + // the one here has a higher priority. + // +optional + PodLabels map[string]string `json:"podLabels,omitempty"` + // Compute Resources required by envoy container. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ @@ -215,6 +227,15 @@ type EnvoySettings struct { // +kubebuilder:validation:Minimum=0 // +optional BaseID int32 `json:"baseID,omitempty"` + + // OverloadMaxHeapSize defines the maximum heap memory of the envoy controlled by the overload manager. + // When the value is greater than 0, the overload manager is enabled, + // and when envoy reaches 95% of the maximum heap size, it performs a shrink heap operation, + // When it reaches 98% of the maximum heap size, Envoy Will stop accepting requests. + // More info: https://projectcontour.io/docs/main/config/overload-manager/ + // + // +optional + OverloadMaxHeapSize uint64 `json:"overloadMaxHeapSize,omitempty"` } // WorkloadType is the type of Kubernetes workload to use for a component. diff --git a/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go b/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go index d889c429aa5..26f8714fc76 100644 --- a/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go +++ b/apis/projectcontour/v1alpha1/zz_generated.deepcopy.go @@ -372,6 +372,13 @@ func (in *ContourSettings) DeepCopyInto(out *ContourSettings) { (*out)[key] = val } } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContourSettings. @@ -582,6 +589,16 @@ func (in *EnvoyListenerConfig) DeepCopyInto(out *EnvoyListenerConfig) { *out = new(SocketOptions) **out = **in } + if in.MaxRequestsPerIOCycle != nil { + in, out := &in.MaxRequestsPerIOCycle, &out.MaxRequestsPerIOCycle + *out = new(uint32) + **out = **in + } + if in.HTTP2MaxConcurrentStreams != nil { + in, out := &in.HTTP2MaxConcurrentStreams, &out.HTTP2MaxConcurrentStreams + *out = new(uint32) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvoyListenerConfig. @@ -648,6 +665,13 @@ func (in *EnvoySettings) DeepCopyInto(out *EnvoySettings) { (*out)[key] = val } } + if in.PodLabels != nil { + in, out := &in.PodLabels, &out.PodLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } in.Resources.DeepCopyInto(&out.Resources) if in.DaemonSet != nil { in, out := &in.DaemonSet, &out.DaemonSet diff --git a/changelogs/CHANGELOG-v1.24.6.md b/changelogs/CHANGELOG-v1.24.6.md new file mode 100644 index 00000000000..9fe5c51fd91 --- /dev/null +++ b/changelogs/CHANGELOG-v1.24.6.md @@ -0,0 +1,57 @@ +We are delighted to present version v1.24.6 of Contour, our layer 7 HTTP reverse proxy for Kubernetes clusters. + +- [All Changes](#all-changes) +- [Installing/Upgrading](#installing-and-upgrading) +- [Compatible Kubernetes Versions](#compatible-kubernetes-versions) + +# All Changes + +This release includes various dependency bumps and fixes for [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487), including: + +- Update to Envoy v1.25.11. See the release notes for v1.25.10 [here](https://www.envoyproxy.io/docs/envoy/v1.25.10/version_history/v1.25/v1.25.10) and v1.25.11 [here](https://www.envoyproxy.io/docs/envoy/v1.25.11/version_history/v1.25/v1.25.11). +- Update to Go v1.20.10. See the [Go release notes](https://go.dev/doc/devel/release#go1.20.minor) for more information. + +Additional mitigations have been added for CVE-2023-44487 in the form of new configuration fields: + +## Max HTTP requests per IO cycle is configurable as an additional mitigation for HTTP/2 CVE-2023-44487 + +Envoy mitigates CVE-2023-44487 with some default runtime settings, however the `http.max_requests_per_io_cycle` does not have a default value. +This change allows configuring this runtime setting via Contour configuration to allow administrators of Contour to prevent abusive connections from starving resources from other valid connections. +The default is left as the existing behavior (no limit) so as not to impact existing valid traffic. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + max-requests-per-io-cycle: 10 +``` + +(Note this can be used in addition to the existing Listener configuration field `listener.max-requests-per-connection` which is used primarily for HTTP/1.1 connections and is an approximate limit for HTTP/2) + +## HTTP/2 max concurrent streams is configurable + +This field can be used to limit the number of concurrent streams Envoy will allow on a single connection from a downstream peer. +It can be used to tune resource usage and as a mitigation for DOS attacks arising from vulnerabilities like CVE-2023-44487. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + http2-max-concurrent-streams: 50 +``` + + +# Installing and Upgrading + +For a fresh install of Contour, consult the [getting started documentation](https://projectcontour.io/getting-started/). + +To upgrade an existing Contour installation, please consult the [upgrade documentation](https://projectcontour.io/resources/upgrading/). + + +# Compatible Kubernetes Versions + +Contour v1.24.6 is tested against Kubernetes 1.24 through 1.26. + + +# Are you a Contour user? We would love to know! +If you're using Contour and want to add your organization to our adopters list, please visit this [page](https://github.com/projectcontour/contour/blob/master/ADOPTERS.md). If you prefer to keep your organization name anonymous but still give us feedback into your usage and scenarios for Contour, please post on this [GitHub thread](https://github.com/projectcontour/contour/issues/1269). diff --git a/changelogs/CHANGELOG-v1.25.3.md b/changelogs/CHANGELOG-v1.25.3.md new file mode 100644 index 00000000000..071b64e5510 --- /dev/null +++ b/changelogs/CHANGELOG-v1.25.3.md @@ -0,0 +1,57 @@ +We are delighted to present version v1.25.3 of Contour, our layer 7 HTTP reverse proxy for Kubernetes clusters. + +- [All Changes](#all-changes) +- [Installing/Upgrading](#installing-and-upgrading) +- [Compatible Kubernetes Versions](#compatible-kubernetes-versions) + +# All Changes + +This release includes various dependency bumps and fixes for [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487), including: + +- Update to Envoy v1.26.6. See the release notes for v1.26.5 [here](https://www.envoyproxy.io/docs/envoy/v1.26.5/version_history/v1.26/v1.26.5) and v1.26.6 [here](https://www.envoyproxy.io/docs/envoy/v1.26.6/version_history/v1.26/v1.26.6). +- Update to Go v1.20.10. See the [Go release notes](https://go.dev/doc/devel/release#go1.20.minor) for more information. + +Additional mitigations have been added for CVE-2023-44487 in the form of new configuration fields: + +## Max HTTP requests per IO cycle is configurable as an additional mitigation for HTTP/2 CVE-2023-44487 + +Envoy mitigates CVE-2023-44487 with some default runtime settings, however the `http.max_requests_per_io_cycle` does not have a default value. +This change allows configuring this runtime setting via Contour configuration to allow administrators of Contour to prevent abusive connections from starving resources from other valid connections. +The default is left as the existing behavior (no limit) so as not to impact existing valid traffic. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + max-requests-per-io-cycle: 10 +``` + +(Note this can be used in addition to the existing Listener configuration field `listener.max-requests-per-connection` which is used primarily for HTTP/1.1 connections and is an approximate limit for HTTP/2) + +## HTTP/2 max concurrent streams is configurable + +This field can be used to limit the number of concurrent streams Envoy will allow on a single connection from a downstream peer. +It can be used to tune resource usage and as a mitigation for DOS attacks arising from vulnerabilities like CVE-2023-44487. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + http2-max-concurrent-streams: 50 +``` + + +# Installing and Upgrading + +For a fresh install of Contour, consult the [getting started documentation](https://projectcontour.io/getting-started/). + +To upgrade an existing Contour installation, please consult the [upgrade documentation](https://projectcontour.io/resources/upgrading/). + + +# Compatible Kubernetes Versions + +Contour v1.25.3 is tested against Kubernetes 1.25 through 1.27. + + +# Are you a Contour user? We would love to know! +If you're using Contour and want to add your organization to our adopters list, please visit this [page](https://github.com/projectcontour/contour/blob/master/ADOPTERS.md). If you prefer to keep your organization name anonymous but still give us feedback into your usage and scenarios for Contour, please post on this [GitHub thread](https://github.com/projectcontour/contour/issues/1269). diff --git a/changelogs/CHANGELOG-v1.26.1.md b/changelogs/CHANGELOG-v1.26.1.md new file mode 100644 index 00000000000..1d0b4caf4d5 --- /dev/null +++ b/changelogs/CHANGELOG-v1.26.1.md @@ -0,0 +1,57 @@ +We are delighted to present version v1.26.1 of Contour, our layer 7 HTTP reverse proxy for Kubernetes clusters. + +- [All Changes](#all-changes) +- [Installing/Upgrading](#installing-and-upgrading) +- [Compatible Kubernetes Versions](#compatible-kubernetes-versions) + +# All Changes + +This release includes various dependency bumps and fixes for [CVE-2023-44487](https://nvd.nist.gov/vuln/detail/CVE-2023-44487), including: + +- Updates Envoy to v1.27.2. See the release notes for v1.27.1 [here](https://www.envoyproxy.io/docs/envoy/v1.27.1/version_history/v1.27/v1.27.1) and v1.27.2 [here](https://www.envoyproxy.io/docs/envoy/v1.27.2/version_history/v1.27/v1.27.2). +- Update to Go v1.20.10. See the [Go release notes](https://go.dev/doc/devel/release#go1.20.minor) for more information. + +Additional mitigations have been added for CVE-2023-44487 in the form of new configuration fields: + +## Max HTTP requests per IO cycle is configurable as an additional mitigation for HTTP/2 CVE-2023-44487 + +Envoy mitigates CVE-2023-44487 with some default runtime settings, however the `http.max_requests_per_io_cycle` does not have a default value. +This change allows configuring this runtime setting via Contour configuration to allow administrators of Contour to prevent abusive connections from starving resources from other valid connections. +The default is left as the existing behavior (no limit) so as not to impact existing valid traffic. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + max-requests-per-io-cycle: 10 +``` + +(Note this can be used in addition to the existing Listener configuration field `listener.max-requests-per-connection` which is used primarily for HTTP/1.1 connections and is an approximate limit for HTTP/2) + +## HTTP/2 max concurrent streams is configurable + +This field can be used to limit the number of concurrent streams Envoy will allow on a single connection from a downstream peer. +It can be used to tune resource usage and as a mitigation for DOS attacks arising from vulnerabilities like CVE-2023-44487. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + http2-max-concurrent-streams: 50 +``` + + +# Installing and Upgrading + +For a fresh install of Contour, consult the [getting started documentation](https://projectcontour.io/getting-started/). + +To upgrade an existing Contour installation, please consult the [upgrade documentation](https://projectcontour.io/resources/upgrading/). + + +# Compatible Kubernetes Versions + +Contour v1.26.1 is tested against Kubernetes 1.26 through 1.28. + + +# Are you a Contour user? We would love to know! +If you're using Contour and want to add your organization to our adopters list, please visit this [page](https://github.com/projectcontour/contour/blob/master/ADOPTERS.md). If you prefer to keep your organization name anonymous but still give us feedback into your usage and scenarios for Contour, please post on this [GitHub thread](https://github.com/projectcontour/contour/issues/1269). diff --git a/changelogs/unreleased/5543-izturn-small.md b/changelogs/unreleased/5543-izturn-small.md new file mode 100644 index 00000000000..7d069f3f0f3 --- /dev/null +++ b/changelogs/unreleased/5543-izturn-small.md @@ -0,0 +1 @@ +Add Kubernetes labels configurability to ContourDeployment resource. to enable customize pod labels for pod/contour & pod/envoy \ No newline at end of file diff --git a/changelogs/unreleased/5657-shadialtarsha-minor.md b/changelogs/unreleased/5657-shadialtarsha-minor.md new file mode 100644 index 00000000000..8155abf4e0b --- /dev/null +++ b/changelogs/unreleased/5657-shadialtarsha-minor.md @@ -0,0 +1,38 @@ +## Specific routes can now opt out of the virtual host's global rate limit policy + +Setting `rateLimitPolicy.global.disabled` flag to true on a specific route now disables the global rate limit policy inherited from the virtual host for that route. + +### Sample Configurations +In the example below, `/foo` route is opted out from the global rate limit policy defined by the virtualhost. +#### httpproxy.yaml +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: echo +spec: + virtualhost: + fqdn: local.projectcontour.io + rateLimitPolicy: + global: + descriptors: + - entries: + - remoteAddress: {} + - genericKey: + key: vhost + value: local.projectcontour.io + routes: + - conditions: + - prefix: / + services: + - name: ingress-conformance-echo + port: 80 + - conditions: + - prefix: /foo + rateLimitPolicy: + global: + disabled: true + services: + - name: ingress-conformance-echo + port: 80 +``` diff --git a/changelogs/unreleased/5672-therealak12-minor.md b/changelogs/unreleased/5672-therealak12-minor.md new file mode 100644 index 00000000000..2b3efc28bed --- /dev/null +++ b/changelogs/unreleased/5672-therealak12-minor.md @@ -0,0 +1,5 @@ +## Contour now waits for the cache sync before starting the DAG rebuild and XDS server + +Before this, we only waited for informer caches to sync but didn't wait for delivering the events to subscribed handlers. +Now contour waits for the initial list of Kubernetes objects to be cached and processed by handlers (using the returned `HasSynced` methods) +and then starts building its DAG and serving XDS. diff --git a/changelogs/unreleased/5678-clayton-gonsalves-minor.md b/changelogs/unreleased/5678-clayton-gonsalves-minor.md new file mode 100644 index 00000000000..2037e2ccb28 --- /dev/null +++ b/changelogs/unreleased/5678-clayton-gonsalves-minor.md @@ -0,0 +1,24 @@ +## HTTPProxy: Allow Host header rewrite with dynamic headers. + +This Change allows the host header to be rewritten on requests using dynamic headers on the only route level. + +#### Example +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: dynamic-host-header-rewrite +spec: + fqdn: local.projectcontour.io + routes: + - conditions: + - prefix: / + services: + - name: s1 + port: 80 + - requestHeaderPolicy: + set: + - name: host + value: "%REQ(x-rewrite-header)%" +``` + diff --git a/changelogs/unreleased/5686-izturn-small.md b/changelogs/unreleased/5686-izturn-small.md new file mode 100644 index 00000000000..7e36c60111d --- /dev/null +++ b/changelogs/unreleased/5686-izturn-small.md @@ -0,0 +1 @@ +Add flags: `--incluster`, `--kubeconfig` for enable run the `gateway-provisioner` in or out of the cluster. \ No newline at end of file diff --git a/changelogs/unreleased/5699-yangyy93-small.md b/changelogs/unreleased/5699-yangyy93-small.md new file mode 100644 index 00000000000..103b384cd40 --- /dev/null +++ b/changelogs/unreleased/5699-yangyy93-small.md @@ -0,0 +1 @@ +Gateway provisioner: Add the `overloadMaxHeapSize` configuration option to contourDeployment to allow adding [overloadManager](https://projectcontour.io/docs/main/config/overload-manager/) configuration when generating envoy's initial configuration file. diff --git a/changelogs/unreleased/5731-skriss-small.md b/changelogs/unreleased/5731-skriss-small.md deleted file mode 100644 index b4108a89e00..00000000000 --- a/changelogs/unreleased/5731-skriss-small.md +++ /dev/null @@ -1 +0,0 @@ -Updates to Go 1.21.0. See the [Go release notes](https://go.dev/doc/devel/release#go1.21) for more information. \ No newline at end of file diff --git a/changelogs/unreleased/5752-davinci26-major.md b/changelogs/unreleased/5752-davinci26-major.md new file mode 100644 index 00000000000..de420dff378 --- /dev/null +++ b/changelogs/unreleased/5752-davinci26-major.md @@ -0,0 +1,15 @@ +## Fix bug with algorithm used to sort Envoy regex/prefix path rules + +Envoy greedy matches routes and as a result the order route matches are presented to Envoy is important. Contour attempts to produce consistent routing tables so that the most specific route matches are given preference. This is done to facilitate consistency when using HTTPProxy inclusion and provide a uniform user experience for route matching to be inline with Ingress and Gateway API Conformance. + +This changes fixes the sorting algorithm used for `Prefix` and `Regex` based path matching. Previously the algorithm lexicographically sorted based on the path match string instead of sorting them based on the length of the `Prefix`|`Regex`. i.e. Longer prefix/regexes will be sorted first in order to give preference to more specific routes, then lexicographic sorting for things of the same length. + +Note that for prefix matching, this change is _not_ expected to change the relative ordering of more specific prefixes vs. less specific ones when the more specific prefix match string has the less specific one as a prefix, e.g. `/foo/bar` will continue to sort before `/foo`. However, relative ordering of other combinations of prefix matches may change per the above description. +### How to update safely + +Caution is advised if you update Contour and you are operating large routing tables. We advise you to: + +1. Deploy a duplicate Contour installation that parses the same CRDs +2. Port-forward to the Envoy admin interface [docs](https://projectcontour.io/docs/v1.3.0/troubleshooting/) +3. Access `http://127.0.0.1:9001/config_dump` and compare the configuration of Envoy. In particular the routes and their order. The prefix routes might be changing in order, so if they are you need to verify that the route matches as expected. + diff --git a/changelogs/unreleased/5804-skriss-small.md b/changelogs/unreleased/5804-skriss-small.md new file mode 100644 index 00000000000..721d9536180 --- /dev/null +++ b/changelogs/unreleased/5804-skriss-small.md @@ -0,0 +1 @@ +Gateway API: set Listeners' `ResolvedRefs` condition to `true` by default. \ No newline at end of file diff --git a/changelogs/unreleased/5827-sunjayBhatia-minor.md b/changelogs/unreleased/5827-sunjayBhatia-minor.md new file mode 100644 index 00000000000..93010ffb61a --- /dev/null +++ b/changelogs/unreleased/5827-sunjayBhatia-minor.md @@ -0,0 +1,16 @@ +## Max HTTP requests per IO cycle is configurable as an additional mitigation for HTTP/2 CVE-2023-44487 + +Envoy v1.27.1 mitigates CVE-2023-44487 with some default runtime settings, however the `http.max_requests_per_io_cycle` does not have a default value. +This change allows configuring this runtime setting via Contour configuration to allow administrators of Contour to prevent abusive connections from starving resources from other valid connections. +The default is left as the existing behavior (no limit) so as not to impact existing valid traffic. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + max-requests-per-io-cycle: 10 +``` + +(Note this can be used in addition to the existing Listener configuration field `listener.max-requests-per-connection` which is used primarily for HTTP/1.1 connections and is an approximate limit for HTTP/2) + +See the [Envoy release notes](https://www.envoyproxy.io/docs/envoy/v1.27.1/version_history/v1.27/v1.27.1) for more details. diff --git a/changelogs/unreleased/5841-sunjayBhatia-small.md b/changelogs/unreleased/5841-sunjayBhatia-small.md new file mode 100644 index 00000000000..4f254e3482d --- /dev/null +++ b/changelogs/unreleased/5841-sunjayBhatia-small.md @@ -0,0 +1 @@ +Updates to Go 1.21.3. See the [Go release notes](https://go.dev/doc/devel/release#go1.21.minor) for more information. diff --git a/changelogs/unreleased/5850-sunjayBhatia-minor.md b/changelogs/unreleased/5850-sunjayBhatia-minor.md new file mode 100644 index 00000000000..32e38a6c494 --- /dev/null +++ b/changelogs/unreleased/5850-sunjayBhatia-minor.md @@ -0,0 +1,11 @@ +## HTTP/2 max concurrent streams is configurable + +This field can be used to limit the number of concurrent streams Envoy will allow on a single connection from a downstream peer. +It can be used to tune resource usage and as a mitigation for DOS attacks arising from vulnerabilities like CVE-2023-44487. + +The Contour ConfigMap can be modified similar to the following (and Contour restarted) to set this value: + +``` +listener: + http2-max-concurrent-streams: 50 +``` diff --git a/changelogs/unreleased/5863-sunjayBhatia-small.md b/changelogs/unreleased/5863-sunjayBhatia-small.md new file mode 100644 index 00000000000..6441a67198a --- /dev/null +++ b/changelogs/unreleased/5863-sunjayBhatia-small.md @@ -0,0 +1 @@ +Updates Envoy to v1.27.2. See the release notes for v1.27.1 [here](https://www.envoyproxy.io/docs/envoy/v1.27.1/version_history/v1.27/v1.27.1) and v1.27.2 [here](https://www.envoyproxy.io/docs/envoy/v1.27.2/version_history/v1.27/v1.27.2). diff --git a/cmd/contour/gatewayprovisioner.go b/cmd/contour/gatewayprovisioner.go index bf5c1fb3a42..cfcbd77857a 100644 --- a/cmd/contour/gatewayprovisioner.go +++ b/cmd/contour/gatewayprovisioner.go @@ -17,11 +17,13 @@ import ( "fmt" "os" - "github.com/alecthomas/kingpin/v2" - "github.com/novln/docker-parser/distribution/reference" + "github.com/projectcontour/contour/internal/k8s" "github.com/projectcontour/contour/internal/provisioner" "github.com/projectcontour/contour/internal/provisioner/controller" "github.com/projectcontour/contour/pkg/config" + + "github.com/alecthomas/kingpin/v2" + "github.com/distribution/reference" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" @@ -34,7 +36,7 @@ func registerGatewayProvisioner(app *kingpin.Application) (*kingpin.CmdClause, * provisionerConfig := &gatewayProvisionerConfig{ contourImage: "ghcr.io/projectcontour/contour:main", - envoyImage: "docker.io/envoyproxy/envoy:v1.27.0", + envoyImage: "docker.io/envoyproxy/envoy:v1.27.2", metricsBindAddress: ":8080", leaderElection: false, leaderElectionID: "0d879e31.projectcontour.io", @@ -56,6 +58,13 @@ func registerGatewayProvisioner(app *kingpin.Application) (*kingpin.CmdClause, * Default(provisionerConfig.gatewayControllerName). StringVar(&provisionerConfig.gatewayControllerName) + cmd.Flag("incluster", "Use in cluster configuration."). + Default("true"). + BoolVar(&provisionerConfig.inCluster) + cmd.Flag("kubeconfig", "Path to kubeconfig (if not in running inside a cluster)."). + PlaceHolder("/path/to/file"). + StringVar(&provisionerConfig.kubeconfig) + cmd.Flag("leader-election-namespace", "The namespace in which the leader election resource will be created."). Default(config.GetenvOr("CONTOUR_PROVISIONER_NAMESPACE", "projectcontour")). StringVar(&provisionerConfig.leaderElectionNamespace) @@ -95,6 +104,10 @@ type gatewayProvisionerConfig struct { // gatewayControllerName defines the controller string that this gateway provisioner instance // will process GatewayClasses and Gateways for. gatewayControllerName string + + // Kubernetes client parameters. + inCluster bool + kubeconfig string } func runGatewayProvisioner(config *gatewayProvisionerConfig) { @@ -111,7 +124,14 @@ func runGatewayProvisioner(config *gatewayProvisionerConfig) { setupLog.Info("using contour", "image", config.contourImage) setupLog.Info("using envoy", "image", config.envoyImage) - mgr, err := createManager(ctrl.GetConfigOrDie(), config) + // Establish k8s core client connection. + restConfig, err := k8s.NewRestConfig(config.kubeconfig, config.inCluster) + if err != nil { + setupLog.Error(err, "failed to create REST config for Kubernetes clients") + os.Exit(1) + } + + mgr, err := createManager(restConfig, config) if err != nil { setupLog.Error(err, "failed to create contour gateway provisioner") os.Exit(1) diff --git a/cmd/contour/serve.go b/cmd/contour/serve.go index 1d9a41a713d..85490f7caa4 100644 --- a/cmd/contour/serve.go +++ b/cmd/contour/serve.go @@ -15,7 +15,6 @@ package main import ( "context" - "errors" "fmt" "net" "net/http" @@ -25,6 +24,25 @@ import ( "github.com/alecthomas/kingpin/v2" envoy_server_v3 "github.com/envoyproxy/go-control-plane/pkg/server/v3" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + networking_v1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + ctrl_cache "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + controller_runtime_metrics "sigs.k8s.io/controller-runtime/pkg/metrics" + gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" + + controller_runtime_metrics_server "sigs.k8s.io/controller-runtime/pkg/metrics/server" + contour_api_v1 "github.com/projectcontour/contour/apis/projectcontour/v1" contour_api_v1alpha1 "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/annotation" @@ -46,22 +64,10 @@ import ( "github.com/projectcontour/contour/internal/xdscache" xdscache_v3 "github.com/projectcontour/contour/internal/xdscache/v3" "github.com/projectcontour/contour/pkg/config" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - networking_v1 "k8s.io/api/networking/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - ctrl_cache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - controller_runtime_metrics "sigs.k8s.io/controller-runtime/pkg/metrics" - controller_runtime_metrics_server "sigs.k8s.io/controller-runtime/pkg/metrics/server" - gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" +) + +const ( + initialDagBuildPollPeriod = 100 * time.Millisecond ) // registerServe registers the serve subcommand and flags @@ -176,11 +182,12 @@ func registerServe(app *kingpin.Application) (*kingpin.CmdClause, *serveContext) } type Server struct { - log logrus.FieldLogger - ctx *serveContext - coreClient *kubernetes.Clientset - mgr manager.Manager - registry *prometheus.Registry + log logrus.FieldLogger + ctx *serveContext + coreClient *kubernetes.Clientset + mgr manager.Manager + registry *prometheus.Registry + handlerCacheSyncs []cache.InformerSynced } // NewServer returns a Server object which contains the initial configuration @@ -430,6 +437,7 @@ func (s *Server) doServe() error { XffNumTrustedHops: *contourConfiguration.Envoy.Network.XffNumTrustedHops, ConnectionBalancer: contourConfiguration.Envoy.Listener.ConnectionBalancer, MaxRequestsPerConnection: contourConfiguration.Envoy.Listener.MaxRequestsPerConnection, + HTTP2MaxConcurrentStreams: contourConfiguration.Envoy.Listener.HTTP2MaxConcurrentStreams, PerConnectionBufferLimitBytes: contourConfiguration.Envoy.Listener.PerConnectionBufferLimitBytes, SocketOptions: contourConfiguration.Envoy.Listener.SocketOptions, } @@ -458,7 +466,9 @@ func (s *Server) doServe() error { &xdscache_v3.RouteCache{}, &xdscache_v3.ClusterCache{}, endpointHandler, - &xdscache_v3.RuntimeCache{}, + xdscache_v3.NewRuntimeCache(xdscache_v3.ConfigurableRuntimeSettings{ + MaxRequestsPerIOCycle: contourConfiguration.Envoy.Listener.MaxRequestsPerIOCycle, + }), } // snapshotHandler is used to produce new snapshots when the internal state changes for any xDS resource. @@ -537,6 +547,16 @@ func (s *Server) doServe() error { contourMetrics, dag.ComposeObservers(append(xdscache.ObserversOf(resources), snapshotHandler)...), ) + + hasSynced := func() bool { + for _, syncFunc := range s.handlerCacheSyncs { + if !syncFunc() { + return false + } + } + return true + } + contourHandler := contour.NewEventHandler(contour.EventHandlerConfig{ Logger: s.log.WithField("context", "contourEventHandler"), HoldoffDelay: 100 * time.Millisecond, @@ -544,7 +564,7 @@ func (s *Server) doServe() error { Observer: observer, StatusUpdater: sh.Writer(), Builder: builder, - }) + }, hasSynced) // Wrap contourHandler in an EventRecorder which tracks API server events. eventHandler := &contour.EventRecorder{ @@ -568,7 +588,7 @@ func (s *Server) doServe() error { // Inform on the remaining resources. for name, r := range informerResources { - if err := informOnResource(r, eventHandler, s.mgr.GetCache()); err != nil { + if err := s.informOnResource(r, eventHandler); err != nil { s.log.WithError(err).WithField("resource", name).Fatal("failed to create informer") } } @@ -584,15 +604,15 @@ func (s *Server) doServe() error { handler = k8s.NewNamespaceFilter(sets.List(secretNamespaces), eventHandler) } - if err := informOnResource(&corev1.Secret{}, handler, s.mgr.GetCache()); err != nil { + if err := s.informOnResource(&corev1.Secret{}, handler); err != nil { s.log.WithError(err).WithField("resource", "secrets").Fatal("failed to create informer") } // Inform on endpoints. - if err := informOnResource(&corev1.Endpoints{}, &contour.EventRecorder{ + if err := s.informOnResource(&corev1.Endpoints{}, &contour.EventRecorder{ Next: endpointHandler, Counter: contourMetrics.EventHandlerOperations, - }, s.mgr.GetCache()); err != nil { + }); err != nil { s.log.WithError(err).WithField("resource", "endpoints").Fatal("failed to create informer") } @@ -646,7 +666,7 @@ func (s *Server) doServe() error { handler = k8s.NewNamespaceFilter([]string{contourConfiguration.Envoy.Service.Namespace}, handler) } - if err := informOnResource(&corev1.Service{}, handler, s.mgr.GetCache()); err != nil { + if err := s.informOnResource(&corev1.Service{}, handler); err != nil { s.log.WithError(err).WithField("resource", "services").Fatal("failed to create informer") } @@ -657,11 +677,11 @@ func (s *Server) doServe() error { xdsServer := &xdsServer{ log: s.log, - mgr: s.mgr, registry: s.registry, config: *contourConfiguration.XDSServer, snapshotHandler: snapshotHandler, resources: resources, + initialDagBuilt: contourHandler.HasBuiltInitialDag, } if err := s.mgr.Add(xdsServer); err != nil { return err @@ -830,11 +850,11 @@ func (s *Server) setupDebugService(debugConfig contour_api_v1alpha1.DebugConfig, type xdsServer struct { log logrus.FieldLogger - mgr manager.Manager registry *prometheus.Registry config contour_api_v1alpha1.XDSServerConfig snapshotHandler *xdscache.SnapshotHandler resources []xdscache.ResourceCache + initialDagBuilt func() bool } func (x *xdsServer) NeedLeaderElection() bool { @@ -844,11 +864,13 @@ func (x *xdsServer) NeedLeaderElection() bool { func (x *xdsServer) Start(ctx context.Context) error { log := x.log.WithField("context", "xds") - log.Printf("waiting for informer caches to sync") - if !x.mgr.GetCache().WaitForCacheSync(ctx) { - return errors.New("informer cache failed to sync") + log.Printf("waiting for the initial dag to be built") + if err := wait.PollUntilContextCancel(ctx, initialDagBuildPollPeriod, true, func(ctx context.Context) (done bool, err error) { + return x.initialDagBuilt(), nil + }); err != nil { + return fmt.Errorf("failed to wait for initial dag build, %w", err) } - log.Printf("informer caches synced") + log.Printf("the initial dag is built") grpcServer := xds.NewServer(x.registry, grpcOptions(log, x.config.TLS)...) @@ -953,12 +975,12 @@ func (s *Server) setupGatewayAPI(contourConfiguration contour_api_v1alpha1.Conto // to process, we just need informers to get events. case contourConfiguration.Gateway.GatewayRef != nil: // Inform on GatewayClasses. - if err := informOnResource(&gatewayapi_v1beta1.GatewayClass{}, eventHandler, mgr.GetCache()); err != nil { + if err := s.informOnResource(&gatewayapi_v1beta1.GatewayClass{}, eventHandler); err != nil { s.log.WithError(err).WithField("resource", "gatewayclasses").Fatal("failed to create informer") } // Inform on Gateways. - if err := informOnResource(&gatewayapi_v1beta1.Gateway{}, eventHandler, mgr.GetCache()); err != nil { + if err := s.informOnResource(&gatewayapi_v1beta1.Gateway{}, eventHandler); err != nil { s.log.WithError(err).WithField("resource", "gateways").Fatal("failed to create informer") } // Otherwise, run the GatewayClass and Gateway controllers to determine @@ -1029,12 +1051,12 @@ func (s *Server) setupGatewayAPI(contourConfiguration contour_api_v1alpha1.Conto } // Inform on ReferenceGrants. - if err := informOnResource(&gatewayapi_v1beta1.ReferenceGrant{}, eventHandler, mgr.GetCache()); err != nil { + if err := s.informOnResource(&gatewayapi_v1beta1.ReferenceGrant{}, eventHandler); err != nil { s.log.WithError(err).WithField("resource", "referencegrants").Fatal("failed to create informer") } // Inform on Namespaces. - if err := informOnResource(&corev1.Namespace{}, eventHandler, mgr.GetCache()); err != nil { + if err := s.informOnResource(&corev1.Namespace{}, eventHandler); err != nil { s.log.WithError(err).WithField("resource", "namespaces").Fatal("failed to create informer") } } @@ -1197,12 +1219,18 @@ func (s *Server) getDAGBuilder(dbc dagBuilderConfig) *dag.Builder { return builder } -func informOnResource(obj client.Object, handler cache.ResourceEventHandler, cache ctrl_cache.Cache) error { - inf, err := cache.GetInformer(context.Background(), obj) +func (s *Server) informOnResource(obj client.Object, handler cache.ResourceEventHandler) error { + inf, err := s.mgr.GetCache().GetInformer(context.Background(), obj) if err != nil { return err } - _, err = inf.AddEventHandler(handler) - return err + registration, err := inf.AddEventHandler(handler) + + if err != nil { + return err + } + + s.handlerCacheSyncs = append(s.handlerCacheSyncs, registration.HasSynced) + return nil } diff --git a/cmd/contour/servecontext.go b/cmd/contour/servecontext.go index 629ef75280f..7d3f72312ad 100644 --- a/cmd/contour/servecontext.go +++ b/cmd/contour/servecontext.go @@ -528,6 +528,8 @@ func (ctx *serveContext) convertToContourConfigurationSpec() contour_api_v1alpha ConnectionBalancer: ctx.Config.Listener.ConnectionBalancer, PerConnectionBufferLimitBytes: ctx.Config.Listener.PerConnectionBufferLimitBytes, MaxRequestsPerConnection: ctx.Config.Listener.MaxRequestsPerConnection, + MaxRequestsPerIOCycle: ctx.Config.Listener.MaxRequestsPerIOCycle, + HTTP2MaxConcurrentStreams: ctx.Config.Listener.HTTP2MaxConcurrentStreams, TLS: &contour_api_v1alpha1.EnvoyTLS{ MinimumProtocolVersion: ctx.Config.TLS.MinimumProtocolVersion, MaximumProtocolVersion: ctx.Config.TLS.MaximumProtocolVersion, diff --git a/cmd/contour/servecontext_test.go b/cmd/contour/servecontext_test.go index 1e7cc21db2a..dd8d3bf681c 100644 --- a/cmd/contour/servecontext_test.go +++ b/cmd/contour/servecontext_test.go @@ -874,6 +874,18 @@ func TestConvertServeContext(t *testing.T) { return cfg }, }, + "envoy listener settings": { + getServeContext: func(ctx *serveContext) *serveContext { + ctx.Config.Listener.MaxRequestsPerIOCycle = ref.To(uint32(10)) + ctx.Config.Listener.HTTP2MaxConcurrentStreams = ref.To(uint32(30)) + return ctx + }, + getContourConfiguration: func(cfg contour_api_v1alpha1.ContourConfigurationSpec) contour_api_v1alpha1.ContourConfigurationSpec { + cfg.Envoy.Listener.MaxRequestsPerIOCycle = ref.To(uint32(10)) + cfg.Envoy.Listener.HTTP2MaxConcurrentStreams = ref.To(uint32(30)) + return cfg + }, + }, } for name, tc := range cases { diff --git a/cmd/contour/shutdownmanager.go b/cmd/contour/shutdownmanager.go index f9dfd7e7874..b41f748ac81 100644 --- a/cmd/contour/shutdownmanager.go +++ b/cmd/contour/shutdownmanager.go @@ -98,7 +98,7 @@ func newShutdownContext() *shutdownContext { } // healthzHandler handles the /healthz endpoint which is used for the shutdown-manager's liveness probe. -func (s *shutdownmanagerContext) healthzHandler(w http.ResponseWriter, r *http.Request) { +func (s *shutdownmanagerContext) healthzHandler(w http.ResponseWriter, _ *http.Request) { if _, err := w.Write([]byte(http.StatusText(http.StatusOK))); err != nil { s.WithField("context", "healthzHandler").Error(err) } diff --git a/design/configuration-crd.md b/design/configuration-crd.md index c083a968b3f..ccc8918d275 100644 --- a/design/configuration-crd.md +++ b/design/configuration-crd.md @@ -145,7 +145,7 @@ Contour will provide a new command or external tool (similar to ir2proxy) which A managed version of Contour was made available with the `Contour Operator`. Since Contour will manage Envoy instances, the Operator will now manage instances of Contour. The details of how an instance of Contour should be deployed within a cluster will be defined in the second CRD named `ContourDeployment`. -The `spec.confguration` of this object will be the same struct defined in the `ContourConfiguration`. +The `spec.configuration` of this object will be the same struct defined in the `ContourConfiguration`. A controller will watch for these objects to be created and take action on them accordingly to make desired state in the cluster match the configuration on the spec. diff --git a/design/downstream-crl-design.md b/design/downstream-crl-design.md index 727b30e6306..7ebb9ab3dab 100644 --- a/design/downstream-crl-design.md +++ b/design/downstream-crl-design.md @@ -59,7 +59,7 @@ The same approach shall be followed for configuring revocation lists as is used The CRL is stored in an opaque Kubernetes secret. The secret will be stored in the same namespace as the corresponding `HTTPProxy` object. The secret object shall contain entry named `crl.pem`. -The constents shall be the CRL in PEM format. +The contents shall be the CRL in PEM format. The file may contain "PEM bundle", that is, a list of CRLs concatenated in single file. Example: diff --git a/design/external-authorization-design.md b/design/external-authorization-design.md index caf73356fca..8a298bfc914 100644 --- a/design/external-authorization-design.md +++ b/design/external-authorization-design.md @@ -32,7 +32,7 @@ This document describes a design for performing request authorization for virtua ## High-Level Design A new `ExtensionService` CRD adds a way to represent and track an authorization service. -This CRD is relatively generic, so that it can be re-used for Envoy rate limiting and logging services. +This CRD is relatively generic, so that it can be reused for Envoy rate limiting and logging services. The core of the `ExtensionService` CRD is subset of the `projectcontour.v1.HTTPProxy` `Service` specification. Re-using the `Service` type allows the operator to specify configuration in familiar and consistent terms, especially TLS configuration. @@ -115,7 +115,7 @@ Note that the Envoy cluster name can be non-obvious, so exposing it in status ma If the `Service` refers to a Kubernetes `ExternalName`, Contour should program Envoy to send the traffic to the external destination. -The `ExtensionService` CRD re-uses the `Service` type from the `projectcontour.io/v1` API. +The `ExtensionService` CRD reuses the `Service` type from the `projectcontour.io/v1` API. However, the setting following fields can generate a validation errors: - `Protocol` may only be set to `h2` or `h2c` (the default should be `h2`). @@ -338,7 +338,7 @@ Once that happens, the client has to resend the original request and it will ent 1. Contour could install itself as the authorization server. This could remove some of the limitations of the Envoy configuration structure at the cost of more complexity in Contour. 1. Integrate external authorization directly into `HTTPProxy`. - This increases the complexity of the `HTTPProxy` structure and makes it difficult to re-use the same authorization service acrtoss multiple proxies. + This increases the complexity of the `HTTPProxy` structure and makes it difficult to reuse the same authorization service acrtoss multiple proxies. A separate CRD gives better opportunities to expose useful operational status. Integrating specific authorization parameters into `HTTPProxy` prevents operators implementing their own authorization flows. diff --git a/design/session-affinity.md b/design/session-affinity.md index 001b4f68f8c..e1e2862a23d 100644 --- a/design/session-affinity.md +++ b/design/session-affinity.md @@ -155,7 +155,7 @@ For example consider two routes, `/cart` and `/checkout` are served by the same - name: ecommerce-pro port: 8080 strategy: Cookie - - match: /cheeckout + - match: /checkout - name: ecommerce-pro port: 8080 strategy: Cookie diff --git a/examples/contour/01-crds.yaml b/examples/contour/01-crds.yaml index 9fceb5c1458..692dd1a5781 100644 --- a/examples/contour/01-crds.yaml +++ b/examples/contour/01-crds.yaml @@ -196,6 +196,18 @@ spec: slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 connections + and the limit for concurrent streams allowed for a peer + on a single HTTP/2 connection. It is recommended to not + set this lower than 100 but this field can be used to bound + resource usage by HTTP/2 connections and mitigate attacks + like CVE-2023-44487. The default value when this is not + set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions @@ -203,6 +215,17 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in a single + I/O cycle. Requests over this limit are processed in subsequent + I/O cycles. Can be used as a mitigation for CVE-2023-44487 + when abusive traffic is detected. Configures the http.max_requests_per_io_cycle + Envoy runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -1392,6 +1415,13 @@ spec: Contour pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Contour pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Contour replicas. If if @@ -3381,6 +3411,15 @@ spec: type: object type: array type: object + overloadMaxHeapSize: + description: 'OverloadMaxHeapSize defines the maximum heap memory + of the envoy controlled by the overload manager. When the value + is greater than 0, the overload manager is enabled, and when + envoy reaches 95% of the maximum heap size, it performs a shrink + heap operation, When it reaches 98% of the maximum heap size, + Envoy Will stop accepting requests. More info: https://projectcontour.io/docs/main/config/overload-manager/' + format: int64 + type: integer podAnnotations: additionalProperties: type: string @@ -3388,6 +3427,13 @@ spec: Envoy pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Envoy pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Envoy replicas. If WorkloadType @@ -3626,6 +3672,18 @@ spec: duplicate slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 + connections and the limit for concurrent streams allowed + for a peer on a single HTTP/2 connection. It is recommended + to not set this lower than 100 but this field can be + used to bound resource usage by HTTP/2 connections and + mitigate attacks like CVE-2023-44487. The default value + when this is not set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see @@ -3634,6 +3692,18 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in + a single I/O cycle. Requests over this limit are processed + in subsequent I/O cycles. Can be used as a mitigation + for CVE-2023-44487 when abusive traffic is detected. + Configures the http.max_requests_per_io_cycle Envoy + runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -5944,8 +6014,15 @@ spec: type: object type: object requestHeadersPolicy: - description: The policy for managing request headers during - proxying. + description: "The policy for managing request headers during + proxying. \n You may dynamically rewrite the Host header to + be forwarded upstream to the content of a request header using + the below format \"%REQ(X-Header-Name)%\". If the value of + the header is empty, it is ignored. \n *NOTE: Pay attention + to the potential security implications of using this option. + Provided header must come from trusted source. \n **NOTE: + The header rewrite is only done while forwarding and has no + bearing on the routing decision." properties: remove: description: Remove specifies a list of HTTP header names diff --git a/examples/contour/03-envoy.yaml b/examples/contour/03-envoy.yaml index 1c4dfc1d020..aab46d9b276 100644 --- a/examples/contour/03-envoy.yaml +++ b/examples/contour/03-envoy.yaml @@ -50,7 +50,7 @@ spec: - --log-level info command: - envoy - image: docker.io/envoyproxy/envoy:v1.27.0 + image: docker.io/envoyproxy/envoy:v1.27.2 imagePullPolicy: IfNotPresent name: envoy env: diff --git a/examples/deployment/03-envoy-deployment.yaml b/examples/deployment/03-envoy-deployment.yaml index 897ef8abb43..9e48a7e7038 100644 --- a/examples/deployment/03-envoy-deployment.yaml +++ b/examples/deployment/03-envoy-deployment.yaml @@ -63,7 +63,7 @@ spec: - --log-level info command: - envoy - image: docker.io/envoyproxy/envoy:v1.27.0 + image: docker.io/envoyproxy/envoy:v1.27.2 imagePullPolicy: IfNotPresent name: envoy env: diff --git a/examples/gateway/00-crds.yaml b/examples/gateway/00-crds.yaml index b4d1ccad162..de2e81b518e 100644 --- a/examples/gateway/00-crds.yaml +++ b/examples/gateway/00-crds.yaml @@ -940,7 +940,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -1718,7 +1718,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -5845,6 +5845,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with @@ -8244,6 +8286,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with diff --git a/examples/render/contour-deployment.yaml b/examples/render/contour-deployment.yaml index def722ee900..06c54014828 100644 --- a/examples/render/contour-deployment.yaml +++ b/examples/render/contour-deployment.yaml @@ -415,6 +415,18 @@ spec: slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 connections + and the limit for concurrent streams allowed for a peer + on a single HTTP/2 connection. It is recommended to not + set this lower than 100 but this field can be used to bound + resource usage by HTTP/2 connections and mitigate attacks + like CVE-2023-44487. The default value when this is not + set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions @@ -422,6 +434,17 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in a single + I/O cycle. Requests over this limit are processed in subsequent + I/O cycles. Can be used as a mitigation for CVE-2023-44487 + when abusive traffic is detected. Configures the http.max_requests_per_io_cycle + Envoy runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -1611,6 +1634,13 @@ spec: Contour pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Contour pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Contour replicas. If if @@ -3600,6 +3630,15 @@ spec: type: object type: array type: object + overloadMaxHeapSize: + description: 'OverloadMaxHeapSize defines the maximum heap memory + of the envoy controlled by the overload manager. When the value + is greater than 0, the overload manager is enabled, and when + envoy reaches 95% of the maximum heap size, it performs a shrink + heap operation, When it reaches 98% of the maximum heap size, + Envoy Will stop accepting requests. More info: https://projectcontour.io/docs/main/config/overload-manager/' + format: int64 + type: integer podAnnotations: additionalProperties: type: string @@ -3607,6 +3646,13 @@ spec: Envoy pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Envoy pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Envoy replicas. If WorkloadType @@ -3845,6 +3891,18 @@ spec: duplicate slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 + connections and the limit for concurrent streams allowed + for a peer on a single HTTP/2 connection. It is recommended + to not set this lower than 100 but this field can be + used to bound resource usage by HTTP/2 connections and + mitigate attacks like CVE-2023-44487. The default value + when this is not set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see @@ -3853,6 +3911,18 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in + a single I/O cycle. Requests over this limit are processed + in subsequent I/O cycles. Can be used as a mitigation + for CVE-2023-44487 when abusive traffic is detected. + Configures the http.max_requests_per_io_cycle Envoy + runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -6163,8 +6233,15 @@ spec: type: object type: object requestHeadersPolicy: - description: The policy for managing request headers during - proxying. + description: "The policy for managing request headers during + proxying. \n You may dynamically rewrite the Host header to + be forwarded upstream to the content of a request header using + the below format \"%REQ(X-Header-Name)%\". If the value of + the header is empty, it is ignored. \n *NOTE: Pay attention + to the potential security implications of using this option. + Provided header must come from trusted source. \n **NOTE: + The header rewrite is only done while forwarding and has no + bearing on the routing decision." properties: remove: description: Remove specifies a list of HTTP header names @@ -8673,7 +8750,7 @@ spec: - --log-level info command: - envoy - image: docker.io/envoyproxy/envoy:v1.27.0 + image: docker.io/envoyproxy/envoy:v1.27.2 imagePullPolicy: IfNotPresent name: envoy env: diff --git a/examples/render/contour-gateway-provisioner.yaml b/examples/render/contour-gateway-provisioner.yaml index aa8adf634cc..fa830028928 100644 --- a/examples/render/contour-gateway-provisioner.yaml +++ b/examples/render/contour-gateway-provisioner.yaml @@ -207,6 +207,18 @@ spec: slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 connections + and the limit for concurrent streams allowed for a peer + on a single HTTP/2 connection. It is recommended to not + set this lower than 100 but this field can be used to bound + resource usage by HTTP/2 connections and mitigate attacks + like CVE-2023-44487. The default value when this is not + set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions @@ -214,6 +226,17 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in a single + I/O cycle. Requests over this limit are processed in subsequent + I/O cycles. Can be used as a mitigation for CVE-2023-44487 + when abusive traffic is detected. Configures the http.max_requests_per_io_cycle + Envoy runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -1403,6 +1426,13 @@ spec: Contour pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Contour pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Contour replicas. If if @@ -3392,6 +3422,15 @@ spec: type: object type: array type: object + overloadMaxHeapSize: + description: 'OverloadMaxHeapSize defines the maximum heap memory + of the envoy controlled by the overload manager. When the value + is greater than 0, the overload manager is enabled, and when + envoy reaches 95% of the maximum heap size, it performs a shrink + heap operation, When it reaches 98% of the maximum heap size, + Envoy Will stop accepting requests. More info: https://projectcontour.io/docs/main/config/overload-manager/' + format: int64 + type: integer podAnnotations: additionalProperties: type: string @@ -3399,6 +3438,13 @@ spec: Envoy pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Envoy pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Envoy replicas. If WorkloadType @@ -3637,6 +3683,18 @@ spec: duplicate slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 + connections and the limit for concurrent streams allowed + for a peer on a single HTTP/2 connection. It is recommended + to not set this lower than 100 but this field can be + used to bound resource usage by HTTP/2 connections and + mitigate attacks like CVE-2023-44487. The default value + when this is not set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see @@ -3645,6 +3703,18 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in + a single I/O cycle. Requests over this limit are processed + in subsequent I/O cycles. Can be used as a mitigation + for CVE-2023-44487 when abusive traffic is detected. + Configures the http.max_requests_per_io_cycle Envoy + runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -5955,8 +6025,15 @@ spec: type: object type: object requestHeadersPolicy: - description: The policy for managing request headers during - proxying. + description: "The policy for managing request headers during + proxying. \n You may dynamically rewrite the Host header to + be forwarded upstream to the content of a request header using + the below format \"%REQ(X-Header-Name)%\". If the value of + the header is empty, it is ignored. \n *NOTE: Pay attention + to the potential security implications of using this option. + Provided header must come from trusted source. \n **NOTE: + The header rewrite is only done while forwarding and has no + bearing on the routing decision." properties: remove: description: Remove specifies a list of HTTP header names @@ -8985,7 +9062,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -9763,7 +9840,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -13890,6 +13967,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with @@ -16289,6 +16408,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with diff --git a/examples/render/contour-gateway.yaml b/examples/render/contour-gateway.yaml index 751be52c470..fcc18267f46 100644 --- a/examples/render/contour-gateway.yaml +++ b/examples/render/contour-gateway.yaml @@ -418,6 +418,18 @@ spec: slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 connections + and the limit for concurrent streams allowed for a peer + on a single HTTP/2 connection. It is recommended to not + set this lower than 100 but this field can be used to bound + resource usage by HTTP/2 connections and mitigate attacks + like CVE-2023-44487. The default value when this is not + set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions @@ -425,6 +437,17 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in a single + I/O cycle. Requests over this limit are processed in subsequent + I/O cycles. Can be used as a mitigation for CVE-2023-44487 + when abusive traffic is detected. Configures the http.max_requests_per_io_cycle + Envoy runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -1614,6 +1637,13 @@ spec: Contour pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Contour pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Contour replicas. If if @@ -3603,6 +3633,15 @@ spec: type: object type: array type: object + overloadMaxHeapSize: + description: 'OverloadMaxHeapSize defines the maximum heap memory + of the envoy controlled by the overload manager. When the value + is greater than 0, the overload manager is enabled, and when + envoy reaches 95% of the maximum heap size, it performs a shrink + heap operation, When it reaches 98% of the maximum heap size, + Envoy Will stop accepting requests. More info: https://projectcontour.io/docs/main/config/overload-manager/' + format: int64 + type: integer podAnnotations: additionalProperties: type: string @@ -3610,6 +3649,13 @@ spec: Envoy pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Envoy pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Envoy replicas. If WorkloadType @@ -3848,6 +3894,18 @@ spec: duplicate slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 + connections and the limit for concurrent streams allowed + for a peer on a single HTTP/2 connection. It is recommended + to not set this lower than 100 but this field can be + used to bound resource usage by HTTP/2 connections and + mitigate attacks like CVE-2023-44487. The default value + when this is not set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see @@ -3856,6 +3914,18 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in + a single I/O cycle. Requests over this limit are processed + in subsequent I/O cycles. Can be used as a mitigation + for CVE-2023-44487 when abusive traffic is detected. + Configures the http.max_requests_per_io_cycle Envoy + runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -6166,8 +6236,15 @@ spec: type: object type: object requestHeadersPolicy: - description: The policy for managing request headers during - proxying. + description: "The policy for managing request headers during + proxying. \n You may dynamically rewrite the Host header to + be forwarded upstream to the content of a request header using + the below format \"%REQ(X-Header-Name)%\". If the value of + the header is empty, it is ignored. \n *NOTE: Pay attention + to the potential security implications of using this option. + Provided header must come from trusted source. \n **NOTE: + The header rewrite is only done while forwarding and has no + bearing on the routing decision." properties: remove: description: Remove specifies a list of HTTP header names @@ -8663,7 +8740,7 @@ spec: - --log-level info command: - envoy - image: docker.io/envoyproxy/envoy:v1.27.0 + image: docker.io/envoyproxy/envoy:v1.27.2 imagePullPolicy: IfNotPresent name: envoy env: @@ -9694,7 +9771,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -10472,7 +10549,7 @@ spec: for each listener rule: 'self.all(l1, self.exists_one(l2, l1.port == l2.port && l1.protocol == l2.protocol && (has(l1.hostname) && has(l2.hostname) ? l1.hostname - == l2.hostname : true)))' + == l2.hostname : !has(l1.hostname) && !has(l2.hostname))))' required: - gatewayClassName - listeners @@ -14599,6 +14676,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with @@ -16998,6 +17117,48 @@ spec: type: object maxItems: 8 type: array + timeouts: + description: "Timeouts defines the timeouts that can be configured + for an HTTP request. \n Support: Extended \n " + properties: + backendRequest: + description: "BackendRequest specifies a timeout for an + individual request from the gateway to a backend. This + covers the time from when the request first starts being + sent from the gateway to when the full response has been + received from the backend. \n An entire client HTTP transaction + with a gateway, covered by the Request timeout, may result + in more than one call from the gateway to the destination + backend, for example, if automatic retries are supported. + \n Because the Request timeout encompasses the BackendRequest + timeout, the value of BackendRequest must be <= the value + of Request timeout. \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + request: + description: "Request specifies the maximum duration for + a gateway to respond to an HTTP request. If the gateway + has not been able to respond before this deadline is met, + the gateway MUST return a timeout error. \n For example, + setting the `rules.timeouts.request` field to the value + `10s` in an `HTTPRoute` will cause a timeout if a client + request is taking longer than 10 seconds to complete. + \n This timeout is intended to cover as close to the whole + request-response transaction as possible although an implementation + MAY choose to start the timeout after the entire request + stream has been received instead of immediately after + the transaction is initiated by the client. \n When this + field is unspecified, request timeout behavior is implementation-specific. + \n Support: Extended" + pattern: ^([0-9]{1,5}(h|m|s|ms)){1,4}$ + type: string + type: object + x-kubernetes-validations: + - message: backendRequest timeout cannot be longer than request + timeout + rule: '!(has(self.request) && has(self.backendRequest) && + duration(self.request) != duration(''0s'') && duration(self.backendRequest) + > duration(self.request))' type: object x-kubernetes-validations: - message: RequestRedirect filter must not be used together with diff --git a/examples/render/contour.yaml b/examples/render/contour.yaml index 6cefe63013d..fac0faa5e6e 100644 --- a/examples/render/contour.yaml +++ b/examples/render/contour.yaml @@ -415,6 +415,18 @@ spec: slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 connections + and the limit for concurrent streams allowed for a peer + on a single HTTP/2 connection. It is recommended to not + set this lower than 100 but this field can be used to bound + resource usage by HTTP/2 connections and mitigate attacks + like CVE-2023-44487. The default value when this is not + set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/protocol.proto#envoy-v3-api-msg-config-core-v3-httpprotocoloptions @@ -422,6 +434,17 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in a single + I/O cycle. Requests over this limit are processed in subsequent + I/O cycles. Can be used as a mitigation for CVE-2023-44487 + when abusive traffic is detected. Configures the http.max_requests_per_io_cycle + Envoy runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -1611,6 +1634,13 @@ spec: Contour pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Contour pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Contour replicas. If if @@ -3600,6 +3630,15 @@ spec: type: object type: array type: object + overloadMaxHeapSize: + description: 'OverloadMaxHeapSize defines the maximum heap memory + of the envoy controlled by the overload manager. When the value + is greater than 0, the overload manager is enabled, and when + envoy reaches 95% of the maximum heap size, it performs a shrink + heap operation, When it reaches 98% of the maximum heap size, + Envoy Will stop accepting requests. More info: https://projectcontour.io/docs/main/config/overload-manager/' + format: int64 + type: integer podAnnotations: additionalProperties: type: string @@ -3607,6 +3646,13 @@ spec: Envoy pods. the annotations for Prometheus will be appended or overwritten with predefined value. type: object + podLabels: + additionalProperties: + type: string + description: PodLabels defines labels to add to the Envoy pods. + If there is a label with the same key as in `ContourDeploymentSpec.ResourceLabels`, + the one here has a higher priority. + type: object replicas: description: "Deprecated: Use `DeploymentSettings.Replicas` instead. \n Replicas is the desired number of Envoy replicas. If WorkloadType @@ -3845,6 +3891,18 @@ spec: duplicate slashes from request URL paths. \n Contour's default is false." type: boolean + httpMaxConcurrentStreams: + description: Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS + Envoy will advertise in the SETTINGS frame in HTTP/2 + connections and the limit for concurrent streams allowed + for a peer on a single HTTP/2 connection. It is recommended + to not set this lower than 100 but this field can be + used to bound resource usage by HTTP/2 connections and + mitigate attacks like CVE-2023-44487. The default value + when this is not set is unlimited. + format: int32 + minimum: 1 + type: integer maxRequestsPerConnection: description: Defines the maximum requests for downstream connections. If not specified, there is no limit. see @@ -3853,6 +3911,18 @@ spec: format: int32 minimum: 1 type: integer + maxRequestsPerIOCycle: + description: Defines the limit on number of HTTP requests + that Envoy will process from a single connection in + a single I/O cycle. Requests over this limit are processed + in subsequent I/O cycles. Can be used as a mitigation + for CVE-2023-44487 when abusive traffic is detected. + Configures the http.max_requests_per_io_cycle Envoy + runtime setting. The default value when this is not + set is no limit. + format: int32 + minimum: 1 + type: integer per-connection-buffer-limit-bytes: description: Defines the soft limit on size of the listener’s new connection read and write buffers in bytes. If unspecified, @@ -6163,8 +6233,15 @@ spec: type: object type: object requestHeadersPolicy: - description: The policy for managing request headers during - proxying. + description: "The policy for managing request headers during + proxying. \n You may dynamically rewrite the Host header to + be forwarded upstream to the content of a request header using + the below format \"%REQ(X-Header-Name)%\". If the value of + the header is empty, it is ignored. \n *NOTE: Pay attention + to the potential security implications of using this option. + Provided header must come from trusted source. \n **NOTE: + The header rewrite is only done while forwarding and has no + bearing on the routing decision." properties: remove: description: Remove specifies a list of HTTP header names @@ -8660,7 +8737,7 @@ spec: - --log-level info command: - envoy - image: docker.io/envoyproxy/envoy:v1.27.0 + image: docker.io/envoyproxy/envoy:v1.27.2 imagePullPolicy: IfNotPresent name: envoy env: diff --git a/go.mod b/go.mod index a184ab01fd5..6f378412254 100644 --- a/go.mod +++ b/go.mod @@ -7,31 +7,32 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 github.com/alecthomas/kingpin/v2 v2.3.2 - github.com/bombsimon/logrusr/v2 v2.0.1 - github.com/cert-manager/cert-manager v1.13.0 + github.com/bombsimon/logrusr/v4 v4.0.0 + github.com/cert-manager/cert-manager v1.13.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/distribution/reference v0.5.0 github.com/envoyproxy/go-control-plane v0.11.1 github.com/go-logr/logr v1.2.4 - github.com/google/go-cmp v0.5.9 + github.com/google/go-cmp v0.6.0 github.com/google/go-github/v48 v48.2.0 + github.com/google/uuid v1.3.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/novln/docker-parser v1.0.0 - github.com/onsi/ginkgo/v2 v2.12.0 - github.com/onsi/gomega v1.27.10 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.28.0 github.com/projectcontour/yages v0.1.0 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.17.0 + github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.44.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.8.4 github.com/tsaarni/certyaml v0.9.2 - github.com/vektra/mockery/v2 v2.33.2 + github.com/vektra/mockery/v2 v2.35.4 go.uber.org/automaxprocs v1.5.3 - golang.org/x/oauth2 v0.12.0 + golang.org/x/oauth2 v0.13.0 gonum.org/v1/plot v0.14.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20230911183012-2d3300fd4832 - google.golang.org/grpc v1.58.1 + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.28.2 @@ -41,7 +42,7 @@ require ( k8s.io/klog/v2 v2.100.1 sigs.k8s.io/controller-runtime v0.16.2 sigs.k8s.io/controller-tools v0.13.0 - sigs.k8s.io/gateway-api v0.8.0 + sigs.k8s.io/gateway-api v0.8.1 sigs.k8s.io/kustomize/kyaml v0.14.3 ) @@ -55,7 +56,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chigopher/pathlib v0.15.0 // indirect github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 // indirect - github.com/emicklei/go-restful/v3 v3.10.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect @@ -79,8 +80,8 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/strcase v0.2.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -98,10 +99,11 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/rs/zerolog v1.29.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.9.3 // indirect @@ -114,13 +116,13 @@ require ( github.com/subosito/gotenv v1.4.2 // indirect github.com/tsaarni/x500dn v1.0.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect - golang.org/x/crypto v0.13.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/image v0.11.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/net v0.15.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.13.0 // indirect diff --git a/go.sum b/go.sum index 4d59b3a776a..4ae0b53e4cc 100644 --- a/go.sum +++ b/go.sum @@ -59,15 +59,15 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bombsimon/logrusr/v2 v2.0.1 h1:1VgxVNQMCvjirZIYaT9JYn6sAVGVEcNtRE0y4mvaOAM= -github.com/bombsimon/logrusr/v2 v2.0.1/go.mod h1:ByVAX+vHdLGAfdroiMg6q0zgq2FODY2lc5YJvzmOJio= +github.com/bombsimon/logrusr/v4 v4.0.0 h1:Pm0InGphX0wMhPqC02t31onlq9OVyJ98eP/Vh63t1Oo= +github.com/bombsimon/logrusr/v4 v4.0.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= -github.com/cert-manager/cert-manager v1.13.0 h1:P9rWfCgzr2wjpQcZtG5iWdQsOJgpHNwR2WyUNDfs47w= -github.com/cert-manager/cert-manager v1.13.0/go.mod h1:AHwJ0l63L2EoD2G5qz3blEd+8boZcqgWf6dBFA4kZbc= +github.com/cert-manager/cert-manager v1.13.1 h1:hRST6l3G/Y3IDnn3H4zb6unDrZmtTPqaaz3TTGgfXNE= +github.com/cert-manager/cert-manager v1.13.1/go.mod h1:pJe/sqGZ6yX0kYcsAv3e2EQH+xn8Ag8WOLGl/qYWAps= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chigopher/pathlib v0.15.0 h1:1pg96WL3iC1/YyWV4UJSl3E0GBf4B+h5amBtsbAAieY= @@ -88,8 +88,10 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJIZIqlJLqE= -github.com/emicklei/go-restful/v3 v3.10.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -126,7 +128,6 @@ github.com/go-latex/latex v0.0.0-20230307184459-12ec69307ad9/go.mod h1:gWuR/CrFD github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -193,8 +194,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github/v48 v48.2.0 h1:68puzySE6WqUY9KWmpOsDEQfDZsso98rT6pZcz9HqcE= github.com/google/go-github/v48 v48.2.0/go.mod h1:dDlehKBDo850ZPvCTK0sEqTCVWcrGl2LcDiajkYi89Y= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -234,6 +236,8 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -254,7 +258,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -290,14 +293,14 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/novln/docker-parser v1.0.0 h1:PjEBd9QnKixcWczNGyEdfUrP6GR0YUilAqG7Wksg3uc= -github.com/novln/docker-parser v1.0.0/go.mod h1:oCeM32fsoUwkwByB5wVjsrsVQySzPWkl3JdlTn1txpE= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= -github.com/onsi/ginkgo/v2 v2.12.0/go.mod h1:ZNEzXISYlqpb8S36iN71ifqLi3vVD1rVJGvWRCJOUpQ= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.28.0 h1:i2rg/p9n/UqIDAMFUJ6qIUUMcsqOuUHgbpbu235Vr1c= +github.com/onsi/gomega v1.28.0/go.mod h1:A1H2JE76sI14WIP57LMKj7FVfCHx3g3BcZVjJG8bjX8= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= @@ -311,15 +314,15 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/projectcontour/yages v0.1.0 h1:vcFpregOq5TVF0/AXLive1MY4CVMDkgL7/+qbUeIbDs= github.com/projectcontour/yages v0.1.0/go.mod h1:pcJrPa3dP17HwGj2YOfBZ4w5WmC1rSpv/X/sV4wauSw= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= @@ -330,7 +333,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk= @@ -367,8 +369,8 @@ github.com/tsaarni/certyaml v0.9.2 h1:LoRTuajwjJ1CHAJiMv5cpOtwQ05207Oqe6cT9D7WDa github.com/tsaarni/certyaml v0.9.2/go.mod h1:s+ErAC1wZ32r1ihSULvR7HXedKKN5HZasdb8Cj8gT9E= github.com/tsaarni/x500dn v1.0.0 h1:LvaWTkqRpse4VHBhB5uwf3wytokK4vF9IOyNAEyiA+U= github.com/tsaarni/x500dn v1.0.0/go.mod h1:QaHa3EcUKC4dfCAZmj8+ZRGLKukWgpGv9H3oOCsAbcE= -github.com/vektra/mockery/v2 v2.33.2 h1:znIUwQ3FxnA5jvPy8irYBoiIqMZhuOJhoPOJYNoTJqU= -github.com/vektra/mockery/v2 v2.33.2/go.mod h1:9lREs4VEeQiUS3rizYQx1saxHu2JiIhThP0q9+fDegM= +github.com/vektra/mockery/v2 v2.35.4 h1:IGD/3KQNKkLw1MiWh5Zi5XQse2h17j6ygSYK6ky9ODY= +github.com/vektra/mockery/v2 v2.35.4/go.mod h1:diB13hxXG6QrTR0ol2Rk8s2dRMftzvExSvPDKr+IYKk= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -399,8 +401,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -475,8 +477,8 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -486,8 +488,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= -golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -512,7 +514,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -536,7 +537,6 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -547,13 +547,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -720,8 +720,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58= -google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -795,8 +795,8 @@ sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQ sigs.k8s.io/controller-runtime v0.16.2/go.mod h1:vpMu3LpI5sYWtujJOa2uPK61nB5rbwlN7BAB8aSLvGU= sigs.k8s.io/controller-tools v0.13.0 h1:NfrvuZ4bxyolhDBt/rCZhDnx3M2hzlhgo5n3Iv2RykI= sigs.k8s.io/controller-tools v0.13.0/go.mod h1:5vw3En2NazbejQGCeWKRrE7q4P+CW8/klfVqP8QZkgA= -sigs.k8s.io/gateway-api v0.8.0 h1:isQQ3Jx2qFP7vaA3ls0846F0Amp9Eq14P08xbSwVbQg= -sigs.k8s.io/gateway-api v0.8.0/go.mod h1:okOnjPNBFbIS/Rw9kAhuIUaIkLhTKEu+ARIuXk2dgaM= +sigs.k8s.io/gateway-api v0.8.1 h1:Bo4NMAQFYkQZnHXOfufbYwbPW7b3Ic5NjpbeW6EJxuU= +sigs.k8s.io/gateway-api v0.8.1/go.mod h1:0PteDrsrgkRmr13nDqFWnev8tOysAVrwnvfFM55tSVg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= diff --git a/hack/codespell.sh b/hack/codespell.sh index d983005037e..b83cc4f72fe 100755 --- a/hack/codespell.sh +++ b/hack/codespell.sh @@ -13,7 +13,7 @@ if command -v docker >/dev/null; then --volume $(pwd):/workdir \ --workdir=/workdir \ --entrypoint=/usr/local/bin/codespell \ - ghcr.io/codespell-project/actions-codespell/stable:v1.0 "$@" + ghcr.io/codespell-project/actions-codespell/stable:v2.0 "$@" fi cat <)%" +var hostRewriteHeaderRegex = regexp.MustCompile(`%REQ\(([A-Za-z0-9-]+)\)%`) + // retryOn transforms a slice of retry on values to a comma-separated string. // CRD validation ensures that all retry on values are valid. func retryOn(ron []contour_api_v1.RetryOn) string { @@ -127,6 +130,11 @@ func headersPolicyService(defaultPolicy *HeadersPolicy, policy *contour_api_v1.H return nil, fmt.Errorf("rewriting %q header is not supported", key) } if len(userPolicy.HostRewrite) == 0 { + // check for the hostRewriteHeader on the service. Return error if set since this + // is not supported on envoy. + if HostRewriteHeader := extractHostRewriteHeaderValue(v); HostRewriteHeader != "" { + return nil, fmt.Errorf("rewriting %q host header with dynamic value is not supported on service", key) + } userPolicy.HostRewrite = v } continue @@ -164,6 +172,7 @@ func headersPolicyRoute(policy *contour_api_v1.HeadersPolicy, allowHostRewrite b set := make(map[string]string, len(policy.Set)) hostRewrite := "" + hostRewriteHeader := "" for _, entry := range policy.Set { key := http.CanonicalHeaderKey(entry.Name) if _, ok := set[key]; ok { @@ -173,6 +182,10 @@ func headersPolicyRoute(policy *contour_api_v1.HeadersPolicy, allowHostRewrite b if !allowHostRewrite { return nil, fmt.Errorf("rewriting %q header is not supported", key) } + if extractedHostRewriteHeader := extractHostRewriteHeaderValue(entry.Value); extractedHostRewriteHeader != "" { + hostRewriteHeader = http.CanonicalHeaderKey(extractedHostRewriteHeader) + continue + } hostRewrite = entry.Value continue } @@ -203,12 +216,24 @@ func headersPolicyRoute(policy *contour_api_v1.HeadersPolicy, allowHostRewrite b } return &HeadersPolicy{ - Set: set, - HostRewrite: hostRewrite, - Remove: rl, + Set: set, + HostRewrite: hostRewrite, + HostRewriteHeader: hostRewriteHeader, + Remove: rl, }, nil } +// extractHostRewriteHeaderValue returns the value of the header +func extractHostRewriteHeaderValue(s string) string { + matches := hostRewriteHeaderRegex.FindStringSubmatch(s) + + if len(matches) == 2 { + return strings.TrimSpace(matches[1]) + } + + return "" +} + // headersPolicyGatewayAPI builds a *HeaderPolicy for the supplied HTTPHeaderFilter. // TODO: Take care about the order of operators once https://github.com/kubernetes-sigs/gateway-api/issues/480 was solved. func headersPolicyGatewayAPI(hf *gatewayapi_v1beta1.HTTPHeaderFilter, headerPolicyType string) (*HeadersPolicy, error) { diff --git a/internal/dag/policy_test.go b/internal/dag/policy_test.go index f222ff109e2..c58f49b7087 100644 --- a/internal/dag/policy_test.go +++ b/internal/dag/policy_test.go @@ -15,6 +15,7 @@ package dag import ( "errors" + "fmt" "io" "testing" "time" @@ -1249,6 +1250,15 @@ func TestValidateHeaderAlteration(t *testing.T) { "K-Foo": "100%%", }, }, + }, { + name: "Host header rewrite via dynamic header", + in: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{ + Name: "Host", + Value: "%REQ(foo)%", + }}, + }, + wantErr: fmt.Errorf("rewriting \"Host\" header is not supported"), }} for _, test := range tests { @@ -1259,3 +1269,124 @@ func TestValidateHeaderAlteration(t *testing.T) { }) } } + +func TestExtractHeaderValue(t *testing.T) { + tests := map[string]string{ + "%REQ(X-Header-Name)%": "X-Header-Name", + "%req(X-Header-Name)%": "", + "%REQ( Content-Type )%": "", + "REQ(Content-Type)": "", + "%REQ(Content-Type%": "", + "SomeOtherValue": "", + } + + for input, expected := range tests { + t.Run(input, func(t *testing.T) { + actual := extractHostRewriteHeaderValue(input) + if actual != expected { + t.Errorf("For input %q, expected %q, got %q", input, expected, actual) + } + }) + } +} + +func TestHeadersPolicyRoute(t *testing.T) { + tests := []struct { + name string + policy *contour_api_v1.HeadersPolicy + allowRewrite bool + dynHeaders map[string]string + expected *HeadersPolicy + expectedErr error + }{ + { + name: "nil policy", + policy: nil, + expected: nil, + }, + { + name: "duplicate set headers", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "X-Header", Value: "Test"}, {Name: "X-Header", Value: "Test2"}}, + }, + expectedErr: fmt.Errorf("duplicate header addition: %q", "X-Header"), + }, + { + name: "host rewrite not allowed", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "Host", Value: "Test"}}, + }, + allowRewrite: false, + expectedErr: fmt.Errorf("rewriting %q header is not supported", "Host"), + }, + { + name: "host rewrite allowed", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "Host", Value: "Test"}}, + }, + allowRewrite: true, + expected: &HeadersPolicy{ + HostRewrite: "Test", + Remove: nil, + }, + }, + { + name: "host rewrite allowed, by header", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "Host", Value: "%REQ(Test)%"}}, + }, + allowRewrite: true, + expected: &HeadersPolicy{ + HostRewrite: "", + HostRewriteHeader: "Test", + Remove: nil, + }, + }, + { + name: "host rewrite allowed, by header. invalid", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "Host", Value: "%REQ (Test"}}, + }, + allowRewrite: true, + expected: &HeadersPolicy{ + HostRewrite: "%REQ (Test", + HostRewriteHeader: "", + Remove: nil, + }, + }, + { + name: "invalid header name", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: " Invalid-Header ", Value: "Test"}}, + }, + expectedErr: fmt.Errorf(`invalid set header " Invalid-Header ": [a valid HTTP header must consist of alphanumeric characters or '-' (e.g. 'X-Header-Name', regex used for validation is '[-A-Za-z0-9]+')]`), + }, + { + name: "duplicate remove headers", + policy: &contour_api_v1.HeadersPolicy{ + Remove: []string{"X-Header", "X-Header"}, + }, + expectedErr: fmt.Errorf("duplicate header removal: %q", "X-Header"), + }, + { + name: "valid set and remove headers", + policy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{Name: "X-Header", Value: "Test"}}, + Remove: []string{"Y-Header"}, + }, + expected: &HeadersPolicy{ + Set: map[string]string{"X-Header": "Test"}, + HostRewrite: "", + Remove: []string{"Y-Header"}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := headersPolicyRoute(tc.policy, tc.allowRewrite, tc.dynHeaders) + assert.Equal(t, tc.expected, result) + assert.Equal(t, tc.expectedErr, err) + }) + } +} diff --git a/internal/dag/status_test.go b/internal/dag/status_test.go index 7031e921447..124cafb5d05 100644 --- a/internal/dag/status_test.go +++ b/internal/dag/status_test.go @@ -5137,20 +5137,7 @@ func validGatewayStatusUpdate(listenerName string, listenerProtocol gatewayapi_v Name: gatewayapi_v1beta1.SectionName(listenerName), AttachedRoutes: int32(attachedRoutes), SupportedKinds: supportedKinds, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -6340,12 +6327,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -6497,12 +6479,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -6598,12 +6575,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -6699,12 +6671,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -6800,12 +6767,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -6902,12 +6864,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -7135,20 +7092,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, "listener-2": { Name: gatewayapi_v1beta1.SectionName("listener-2"), @@ -7163,20 +7107,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -7277,20 +7208,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, "listener-2": { Name: gatewayapi_v1beta1.SectionName("listener-2"), @@ -7305,20 +7223,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -7408,20 +7313,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -7511,20 +7403,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -7678,20 +7557,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }, @@ -8420,20 +8286,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Kind: "GRPCRoute", }, }, - Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), - Message: "Valid listener", - }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, - }, + Conditions: listenerValidConditions(), }, }, }}, @@ -8487,12 +8340,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -8550,12 +8398,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -8613,12 +8456,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -8691,12 +8529,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -8765,12 +8598,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -8831,6 +8659,7 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: string(gatewayapi_v1beta1.ListenerReasonUnsupportedProtocol), Message: "Listener protocol \"invalid\" is unsupported, must be one of HTTP, HTTPS, TLS, TCP or projectcontour.io/https", }, + listenerResolvedRefsCondition(), }, }, }, @@ -8888,12 +8717,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.TLS is required when protocol is \"HTTPS\".", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -8951,12 +8776,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.TLS is required when protocol is \"TLS\".", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9020,12 +8841,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.TLS.CertificateRefs cannot be defined when Listener.TLS.Mode is \"Passthrough\".", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9086,12 +8903,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: string(gatewayapi_v1beta1.ListenerReasonInvalid), Message: "Listener.TLS.CertificateRefs must contain exactly one entry", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9152,12 +8965,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.TLS.Mode must be \"Terminate\" when protocol is \"HTTPS\".", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9217,12 +9026,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.AllowedRoutes.Namespaces.Selector is required when Listener.AllowedRoutes.Namespaces.From is set to \"Selector\".", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9288,12 +9093,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Error parsing Listener.AllowedRoutes.Namespaces.Selector: values: Invalid value: []string{\"error\"}: values set must be empty for exists and does not exist.", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9353,12 +9154,8 @@ func TestGatewayAPIHTTPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Listener.AllowedRoutes.Namespaces.Selector must specify at least one MatchLabel or MatchExpression.", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -9977,18 +9774,14 @@ func TestGatewayAPITLSRouteDAGStatus(t *testing.T) { }, }, Conditions: []metav1.Condition{ - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, { Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), Status: metav1.ConditionFalse, Reason: "Invalid", Message: `Listener.TLS.Mode must be "Terminate" or "Passthrough".`, }, + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), }, }, }, @@ -11049,12 +10842,7 @@ func TestGatewayAPITCPRouteDAGStatus(t *testing.T) { Reason: "Invalid", Message: "Invalid listener, see other listener conditions for details", }, - { - Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), - Status: metav1.ConditionTrue, - Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), - Message: "Listener accepted", - }, + listenerAcceptedCondition(), { Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), Status: metav1.ConditionFalse, @@ -11219,3 +11007,38 @@ func routeAcceptedTCPRouteCondition() metav1.Condition { Message: "Accepted TCPRoute", } } + +func listenerProgrammedCondition() metav1.Condition { + return metav1.Condition{ + Type: string(gatewayapi_v1beta1.ListenerConditionProgrammed), + Status: metav1.ConditionTrue, + Reason: string(gatewayapi_v1beta1.ListenerReasonProgrammed), + Message: "Valid listener", + } +} + +func listenerAcceptedCondition() metav1.Condition { + return metav1.Condition{ + Type: string(gatewayapi_v1beta1.ListenerConditionAccepted), + Status: metav1.ConditionTrue, + Reason: string(gatewayapi_v1beta1.ListenerReasonAccepted), + Message: "Listener accepted", + } +} + +func listenerResolvedRefsCondition() metav1.Condition { + return metav1.Condition{ + Type: string(gatewayapi_v1beta1.ListenerConditionResolvedRefs), + Status: metav1.ConditionTrue, + Reason: string(gatewayapi_v1beta1.ListenerReasonResolvedRefs), + Message: "Listener references resolved", + } +} + +func listenerValidConditions() []metav1.Condition { + return []metav1.Condition{ + listenerProgrammedCondition(), + listenerAcceptedCondition(), + listenerResolvedRefsCondition(), + } +} diff --git a/internal/envoy/route.go b/internal/envoy/route.go index 03d720242b6..ab3c5fe4a13 100644 --- a/internal/envoy/route.go +++ b/internal/envoy/route.go @@ -19,13 +19,20 @@ import ( "google.golang.org/protobuf/types/known/durationpb" ) -func HostReplaceHeader(hp *dag.HeadersPolicy) string { +func HostRewriteLiteral(hp *dag.HeadersPolicy) string { if hp == nil { return "" } return hp.HostRewrite } +func HostRewriteHeader(hp *dag.HeadersPolicy) string { + if hp == nil { + return "" + } + return hp.HostRewriteHeader +} + // Timeout converts a timeout.Setting to a protobuf.Duration // that's appropriate for Envoy. In general (though there are // exceptions), Envoy uses the following semantics: @@ -64,18 +71,16 @@ func SingleSimpleCluster(route *dag.Route) bool { // If the target cluster performs any kind of header manipulation, // then we should use a WeightedCluster to encode the additional // configuration. - if cluster.RequestHeadersPolicy == nil { - // no request headers policy - } else if len(cluster.RequestHeadersPolicy.Set) != 0 || - len(cluster.RequestHeadersPolicy.Add) != 0 || - len(cluster.RequestHeadersPolicy.Remove) != 0 || - len(cluster.RequestHeadersPolicy.HostRewrite) != 0 { + if cluster.RequestHeadersPolicy != nil && + (len(cluster.RequestHeadersPolicy.Set) != 0 || + len(cluster.RequestHeadersPolicy.Add) != 0 || + len(cluster.RequestHeadersPolicy.Remove) != 0 || + len(cluster.RequestHeadersPolicy.HostRewrite) != 0) { return false } - if cluster.ResponseHeadersPolicy == nil { - // no response headers policy - } else if len(cluster.ResponseHeadersPolicy.Set) != 0 || - len(cluster.ResponseHeadersPolicy.Remove) != 0 { + if cluster.ResponseHeadersPolicy != nil && + (len(cluster.ResponseHeadersPolicy.Set) != 0 || + len(cluster.ResponseHeadersPolicy.Remove) != 0) { return false } if len(cluster.CookieRewritePolicies) > 0 { diff --git a/internal/envoy/v3/bootstrap.go b/internal/envoy/v3/bootstrap.go index 904c5288f26..ba39f1e2366 100644 --- a/internal/envoy/v3/bootstrap.go +++ b/internal/envoy/v3/bootstrap.go @@ -241,7 +241,7 @@ func bootstrapConfig(c *envoy.BootstrapConfig) *envoy_bootstrap_v3.Bootstrap { LoadAssignment: &envoy_endpoint_v3.ClusterLoadAssignment{ ClusterName: "envoy-admin", Endpoints: Endpoints( - UnixSocketAddress(c.GetAdminAddress(), c.GetAdminPort()), + UnixSocketAddress(c.GetAdminAddress()), ), }, }}, @@ -252,7 +252,7 @@ func bootstrapConfig(c *envoy.BootstrapConfig) *envoy_bootstrap_v3.Bootstrap { }, Admin: &envoy_bootstrap_v3.Admin{ AccessLog: adminAccessLog(c.GetAdminAccessLogPath()), - Address: UnixSocketAddress(c.GetAdminAddress(), c.GetAdminPort()), + Address: UnixSocketAddress(c.GetAdminAddress()), }, } if c.MaximumHeapSizeBytes > 0 { diff --git a/internal/envoy/v3/listener.go b/internal/envoy/v3/listener.go index 2707700875a..73403867ff9 100644 --- a/internal/envoy/v3/listener.go +++ b/internal/envoy/v3/listener.go @@ -172,6 +172,7 @@ type httpConnectionManagerBuilder struct { numTrustedHops uint32 tracingConfig *http.HttpConnectionManager_Tracing maxRequestsPerConnection *uint32 + http2MaxConcurrentStreams *uint32 enableWebsockets bool } @@ -284,6 +285,11 @@ func (b *httpConnectionManagerBuilder) MaxRequestsPerConnection(maxRequestsPerCo return b } +func (b *httpConnectionManagerBuilder) HTTP2MaxConcurrentStreams(http2MaxConcurrentStreams *uint32) *httpConnectionManagerBuilder { + b.http2MaxConcurrentStreams = http2MaxConcurrentStreams + return b +} + func (b *httpConnectionManagerBuilder) DefaultFilters() *httpConnectionManagerBuilder { // Add a default set of ordered http filters. @@ -538,6 +544,12 @@ func (b *httpConnectionManagerBuilder) Get() *envoy_listener_v3.Filter { cm.CommonHttpProtocolOptions.MaxRequestsPerConnection = wrapperspb.UInt32(*b.maxRequestsPerConnection) } + if b.http2MaxConcurrentStreams != nil { + cm.Http2ProtocolOptions = &envoy_core_v3.Http2ProtocolOptions{ + MaxConcurrentStreams: wrapperspb.UInt32(*b.http2MaxConcurrentStreams), + } + } + if b.enableWebsockets { cm.UpgradeConfigs = append(cm.UpgradeConfigs, &http.HttpConnectionManager_UpgradeConfig{ @@ -648,7 +660,7 @@ func TCPProxy(statPrefix string, proxy *dag.TCPProxy, accesslogger []*accesslog. } // UnixSocketAddress creates a new Unix Socket envoy_core_v3.Address. -func UnixSocketAddress(address string, port int) *envoy_core_v3.Address { +func UnixSocketAddress(address string) *envoy_core_v3.Address { return &envoy_core_v3.Address{ Address: &envoy_core_v3.Address_Pipe{ Pipe: &envoy_core_v3.Pipe{ @@ -887,7 +899,7 @@ func FilterChainTLS(domain string, downstream *envoy_tls_v3.DownstreamTlsContext return fc } -// FilterChainTLSFallback returns a TLS enabled envoy_listener_v3.FilterChain conifgured for FallbackCertificate. +// FilterChainTLSFallback returns a TLS enabled envoy_listener_v3.FilterChain configured for FallbackCertificate. func FilterChainTLSFallback(downstream *envoy_tls_v3.DownstreamTlsContext, filters []*envoy_listener_v3.Filter) *envoy_listener_v3.FilterChain { fc := &envoy_listener_v3.FilterChain{ Name: "fallback-certificate", diff --git a/internal/envoy/v3/listener_test.go b/internal/envoy/v3/listener_test.go index 4a9d8046e56..12bccde3262 100644 --- a/internal/envoy/v3/listener_test.go +++ b/internal/envoy/v3/listener_test.go @@ -652,6 +652,7 @@ func TestHTTPConnectionManager(t *testing.T) { forwardClientCertificate *dag.ClientCertificateDetails xffNumTrustedHops uint32 maxRequestsPerConnection *uint32 + http2MaxConcurrentStreams *uint32 want *envoy_listener_v3.Filter }{ "default": { @@ -1396,6 +1397,56 @@ func TestHTTPConnectionManager(t *testing.T) { }, }, }, + "http2MaxConcurrentStreams set": { + routename: "default/kuard", + accesslogger: FileAccessLogEnvoy("/dev/stdout", "", nil, v1alpha1.LogLevelInfo), + http2MaxConcurrentStreams: ref.To(uint32(50)), + want: &envoy_listener_v3.Filter{ + Name: wellknown.HTTPConnectionManager, + ConfigType: &envoy_listener_v3.Filter_TypedConfig{ + TypedConfig: protobuf.MustMarshalAny(&http.HttpConnectionManager{ + StatPrefix: "default/kuard", + RouteSpecifier: &http.HttpConnectionManager_Rds{ + Rds: &http.Rds{ + RouteConfigName: "default/kuard", + ConfigSource: &envoy_core_v3.ConfigSource{ + ResourceApiVersion: envoy_core_v3.ApiVersion_V3, + ConfigSourceSpecifier: &envoy_core_v3.ConfigSource_ApiConfigSource{ + ApiConfigSource: &envoy_core_v3.ApiConfigSource{ + ApiType: envoy_core_v3.ApiConfigSource_GRPC, + TransportApiVersion: envoy_core_v3.ApiVersion_V3, + GrpcServices: []*envoy_core_v3.GrpcService{{ + TargetSpecifier: &envoy_core_v3.GrpcService_EnvoyGrpc_{ + EnvoyGrpc: &envoy_core_v3.GrpcService_EnvoyGrpc{ + ClusterName: "contour", + Authority: "contour", + }, + }, + }}, + }, + }, + }, + }, + }, + HttpFilters: defaultHTTPFilters, + HttpProtocolOptions: &envoy_core_v3.Http1ProtocolOptions{ + // Enable support for HTTP/1.0 requests that carry + // a Host: header. See #537. + AcceptHttp_10: true, + }, + CommonHttpProtocolOptions: &envoy_core_v3.HttpProtocolOptions{}, + Http2ProtocolOptions: &envoy_core_v3.Http2ProtocolOptions{ + MaxConcurrentStreams: wrapperspb.UInt32(50), + }, + AccessLog: FileAccessLogEnvoy("/dev/stdout", "", nil, v1alpha1.LogLevelInfo), + UseRemoteAddress: wrapperspb.Bool(true), + NormalizePath: wrapperspb.Bool(true), + PreserveExternalRequestId: true, + MergeSlashes: false, + }), + }, + }, + }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -1415,6 +1466,7 @@ func TestHTTPConnectionManager(t *testing.T) { NumTrustedHops(tc.xffNumTrustedHops). ForwardClientCertificate(tc.forwardClientCertificate). MaxRequestsPerConnection(tc.maxRequestsPerConnection). + HTTP2MaxConcurrentStreams(tc.http2MaxConcurrentStreams). DefaultFilters(). Get() diff --git a/internal/envoy/v3/ratelimit.go b/internal/envoy/v3/ratelimit.go index 1aebd499570..c926d29c876 100644 --- a/internal/envoy/v3/ratelimit.go +++ b/internal/envoy/v3/ratelimit.go @@ -163,3 +163,12 @@ func enableXRateLimitHeaders(enable bool) ratelimit_filter_v3.RateLimit_XRateLim } return ratelimit_filter_v3.RateLimit_OFF } + +// rateLimitPerRoute returns a per-route config to configure vhost rate limits. +func rateLimitPerRoute(r *dag.RateLimitPerRoute) *anypb.Any { + return protobuf.MustMarshalAny( + &ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: ratelimit_filter_v3.RateLimitPerRoute_VhRateLimitsOptions(r.VhRateLimits), + }, + ) +} diff --git a/internal/envoy/v3/ratelimit_test.go b/internal/envoy/v3/ratelimit_test.go index 6010b72808d..e07c96d9913 100644 --- a/internal/envoy/v3/ratelimit_test.go +++ b/internal/envoy/v3/ratelimit_test.go @@ -411,3 +411,40 @@ func TestGlobalRateLimitFilter(t *testing.T) { }) } } + +func TestRateLimitPerRoute(t *testing.T) { + tests := map[string]struct { + name string + cfg *dag.RateLimitPerRoute + want *anypb.Any + }{ + "VhRateLimits in Override mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsOverride, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 0, + }), + }, "VhRateLimits in Include mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsInclude, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 1, + }), + }, "VhRateLimits in Ignore mode": { + cfg: &dag.RateLimitPerRoute{ + VhRateLimits: dag.VhRateLimitsIgnore, + }, + want: protobuf.MustMarshalAny(&ratelimit_filter_v3.RateLimitPerRoute{ + VhRateLimits: 2, + }), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.want, rateLimitPerRoute(tc.cfg)) + }) + } +} diff --git a/internal/envoy/v3/route.go b/internal/envoy/v3/route.go index 609dcc2c76d..786f5cbecc5 100644 --- a/internal/envoy/v3/route.go +++ b/internal/envoy/v3/route.go @@ -145,6 +145,10 @@ func buildRoute(dagRoute *dag.Route, vhostName string, secure bool) *envoy_route route.TypedPerFilterConfig["envoy.filters.http.local_ratelimit"] = LocalRateLimitConfig(dagRoute.RateLimitPolicy.Local, "vhost."+vhostName) } + if dagRoute.RateLimitPerRoute != nil { + route.TypedPerFilterConfig["envoy.filters.http.ratelimit"] = rateLimitPerRoute(dagRoute.RateLimitPerRoute) + } + // Apply per-route authorization policy modifications. if dagRoute.AuthDisabled { route.TypedPerFilterConfig["envoy.filters.http.ext_authz"] = routeAuthzDisabled() @@ -415,10 +419,14 @@ func routeRoute(r *dag.Route) *envoy_route_v3.Route_Route { } // Check for host header policy and set if found - if val := envoy.HostReplaceHeader(r.RequestHeadersPolicy); val != "" { + if val := envoy.HostRewriteLiteral(r.RequestHeadersPolicy); val != "" { ra.HostRewriteSpecifier = &envoy_route_v3.RouteAction_HostRewriteLiteral{ HostRewriteLiteral: val, } + } else if val := envoy.HostRewriteHeader(r.RequestHeadersPolicy); val != "" { + ra.HostRewriteSpecifier = &envoy_route_v3.RouteAction_HostRewriteHeader{ + HostRewriteHeader: val, + } } if r.Websocket { @@ -616,7 +624,7 @@ func weightedClusters(route *dag.Route) *envoy_route_v3.WeightedCluster { c.RequestHeadersToAdd = append(headerValueList(cluster.RequestHeadersPolicy.Set, false), headerValueList(cluster.RequestHeadersPolicy.Add, true)...) c.RequestHeadersToRemove = cluster.RequestHeadersPolicy.Remove // Check for host header policy and set if found - if val := envoy.HostReplaceHeader(cluster.RequestHeadersPolicy); val != "" { + if val := envoy.HostRewriteLiteral(cluster.RequestHeadersPolicy); val != "" { c.HostRewriteSpecifier = &envoy_route_v3.WeightedCluster_ClusterWeight_HostRewriteLiteral{ HostRewriteLiteral: val, } diff --git a/internal/envoy/v3/runtime.go b/internal/envoy/v3/runtime.go index 8e2a121e06b..135eddab56b 100644 --- a/internal/envoy/v3/runtime.go +++ b/internal/envoy/v3/runtime.go @@ -24,11 +24,15 @@ const ( maxRegexProgramSizeWarn = 1000 ) -func RuntimeLayers() []*envoy_service_runtime_v3.Runtime { +func RuntimeLayers(configurableRuntimeFields map[string]*structpb.Value) []*envoy_service_runtime_v3.Runtime { + baseLayer := baseRuntimeLayer() + for k, v := range configurableRuntimeFields { + baseLayer.Fields[k] = v + } return []*envoy_service_runtime_v3.Runtime{ { Name: DynamicRuntimeLayerName, - Layer: baseRuntimeLayer(), + Layer: baseLayer, }, } } @@ -36,8 +40,8 @@ func RuntimeLayers() []*envoy_service_runtime_v3.Runtime { func baseRuntimeLayer() *structpb.Struct { return &structpb.Struct{ Fields: map[string]*structpb.Value{ - "re2.max_program_size.error_level": {Kind: &structpb.Value_NumberValue{NumberValue: maxRegexProgramSizeError}}, - "re2.max_program_size.warn_level": {Kind: &structpb.Value_NumberValue{NumberValue: maxRegexProgramSizeWarn}}, + "re2.max_program_size.error_level": structpb.NewNumberValue(maxRegexProgramSizeError), + "re2.max_program_size.warn_level": structpb.NewNumberValue(maxRegexProgramSizeWarn), }, } } diff --git a/internal/envoy/v3/runtime_test.go b/internal/envoy/v3/runtime_test.go index a0ca20d3cf0..9e84d136b65 100644 --- a/internal/envoy/v3/runtime_test.go +++ b/internal/envoy/v3/runtime_test.go @@ -22,15 +22,38 @@ import ( ) func TestRuntimeLayers(t *testing.T) { - require.Equal(t, []*envoy_service_runtime_v3.Runtime{ - { - Name: "dynamic", - Layer: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "re2.max_program_size.error_level": {Kind: &structpb.Value_NumberValue{NumberValue: 1 << 20}}, - "re2.max_program_size.warn_level": {Kind: &structpb.Value_NumberValue{NumberValue: 1000}}, - }, + testCases := map[string]struct { + configurableFields map[string]*structpb.Value + }{ + "nil configurable fields": {}, + "empty configurable fields": { + configurableFields: map[string]*structpb.Value{}, + }, + "some configurable fields": { + configurableFields: map[string]*structpb.Value{ + "some.value1": structpb.NewBoolValue(true), + "some.value2": structpb.NewNumberValue(1000), }, }, - }, RuntimeLayers()) + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + expectedFields := map[string]*structpb.Value{ + "re2.max_program_size.error_level": structpb.NewNumberValue(1 << 20), + "re2.max_program_size.warn_level": structpb.NewNumberValue(1000), + } + for k, v := range tc.configurableFields { + expectedFields[k] = v + } + layers := RuntimeLayers(tc.configurableFields) + require.Equal(t, []*envoy_service_runtime_v3.Runtime{ + { + Name: "dynamic", + Layer: &structpb.Struct{ + Fields: expectedFields, + }, + }, + }, layers) + }) + } } diff --git a/internal/featuretests/v3/envoy.go b/internal/featuretests/v3/envoy.go index a7f74e08ac2..97252185bd2 100644 --- a/internal/featuretests/v3/envoy.go +++ b/internal/featuretests/v3/envoy.go @@ -156,6 +156,15 @@ func routeHostRewrite(cluster, newHostName string) *envoy_route_v3.Route_Route { } } +func routeHostRewriteHeader(cluster, hostnameHeader string) *envoy_route_v3.Route_Route { + return &envoy_route_v3.Route_Route{ + Route: &envoy_route_v3.RouteAction{ + ClusterSpecifier: &envoy_route_v3.RouteAction_Cluster{Cluster: cluster}, + HostRewriteSpecifier: &envoy_route_v3.RouteAction_HostRewriteHeader{HostRewriteHeader: hostnameHeader}, + }, + } +} + func upgradeHTTPS(match *envoy_route_v3.RouteMatch) *envoy_route_v3.Route { return &envoy_route_v3.Route{ Match: match, diff --git a/internal/featuretests/v3/featuretests.go b/internal/featuretests/v3/featuretests.go index 8cd2fbc8f95..30f0bd72183 100644 --- a/internal/featuretests/v3/featuretests.go +++ b/internal/featuretests/v3/featuretests.go @@ -32,6 +32,17 @@ import ( envoy_service_route_v3 "github.com/envoyproxy/go-control-plane/envoy/service/route/v3" envoy_service_secret_v3 "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3" resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "github.com/prometheus/client_golang/prometheus" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + contour_api_v1 "github.com/projectcontour/contour/apis/projectcontour/v1" "github.com/projectcontour/contour/apis/projectcontour/v1alpha1" "github.com/projectcontour/contour/internal/contour" @@ -46,16 +57,6 @@ import ( contour_xds_v3 "github.com/projectcontour/contour/internal/xds/v3" "github.com/projectcontour/contour/internal/xdscache" xdscache_v3 "github.com/projectcontour/contour/internal/xdscache/v3" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/types/known/anypb" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/tools/cache" ) const ( @@ -152,7 +153,7 @@ func setup(t *testing.T, opts ...any) (ResourceEventHandlerWrapper, *Contour, fu dag.ComposeObservers(xdscache.ObserversOf(resources)...), ), Builder: builder, - }) + }, func() bool { return true }) l, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) diff --git a/internal/featuretests/v3/globalratelimit_test.go b/internal/featuretests/v3/globalratelimit_test.go index 7cefb8c7ba0..216d2c46ab5 100644 --- a/internal/featuretests/v3/globalratelimit_test.go +++ b/internal/featuretests/v3/globalratelimit_test.go @@ -188,7 +188,6 @@ func globalRateLimitNoRateLimitsDefined(t *testing.T, rh ResourceEventHandlerWra ), }) } - } func globalRateLimitVhostRateLimitDefined(t *testing.T, rh ResourceEventHandlerWrapper, c *Contour, tls tlsConfig) { diff --git a/internal/featuretests/v3/headerpolicy_test.go b/internal/featuretests/v3/headerpolicy_test.go index bd0696fb3cd..d22c5a41c6c 100644 --- a/internal/featuretests/v3/headerpolicy_test.go +++ b/internal/featuretests/v3/headerpolicy_test.go @@ -219,3 +219,123 @@ func TestHeaderPolicy_ReplaceHeader_HTTProxy(t *testing.T) { TypeUrl: clusterType, }) } + +func TestHeaderPolicy_ReplaceHostHeader_HTTProxy(t *testing.T) { + // Enable ExternalName processing here because + // we need to check that host rewrites work in combination + // with ExternalName. + rh, c, done := setup(t, enableExternalNameService(t)) + defer done() + + rh.OnAdd(fixture.NewService("svc1"). + WithPorts(v1.ServicePort{Port: 80, TargetPort: intstr.FromInt(8080)}), + ) + + rh.OnAdd(fixture.NewProxy("simple").WithSpec( + contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{Fqdn: "hello.world"}, + Routes: []contour_api_v1.Route{{ + Services: []contour_api_v1.Service{{ + Name: "svc1", + Port: 80, + }}, + RequestHeadersPolicy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{ + Name: "Host", + Value: "%REQ(x-goodbye-planet)%", + }}, + }, + }}, + }), + ) + + c.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{ + Resources: resources(t, + envoy_v3.RouteConfiguration("ingress_http", + envoy_v3.VirtualHost("hello.world", + &envoy_route_v3.Route{ + Match: routePrefix("/"), + Action: routeHostRewriteHeader("default/svc1/80/da39a3ee5e", "X-Goodbye-Planet"), + }, + ), + ), + ), + TypeUrl: routeType, + }) + + rh.OnAdd(fixture.NewService("externalname"). + Annotate("projectcontour.io/upstream-protocol.tls", "https,443"). + WithSpec(v1.ServiceSpec{ + ExternalName: "goodbye.planet", + Type: v1.ServiceTypeExternalName, + Ports: []v1.ServicePort{{ + Port: 443, + Name: "https", + }}, + }), + ) + + rh.OnAdd(&v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + Type: "kubernetes.io/tls", + Data: featuretests.Secretdata(featuretests.CERTIFICATE, featuretests.RSA_PRIVATE_KEY), + }) + + // Proxy with SNI + rh.OnAdd(fixture.NewProxy("simple").WithSpec( + contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "hello.world", + TLS: &contour_api_v1.TLS{SecretName: "foo"}, + }, + Routes: []contour_api_v1.Route{{ + Services: []contour_api_v1.Service{{ + Name: "externalname", + Port: 443, + }}, + RequestHeadersPolicy: &contour_api_v1.HeadersPolicy{ + Set: []contour_api_v1.HeaderValue{{ + Name: "Host", + Value: "%REQ(x-goodbye-planet)%", + }}, + }, + }}, + }), + ) + + c.Request(routeType).Equals(&envoy_discovery_v3.DiscoveryResponse{ + Resources: routeResources(t, + envoy_v3.RouteConfiguration("ingress_http", + envoy_v3.VirtualHost("hello.world", + &envoy_route_v3.Route{ + Match: routePrefix("/"), + Action: &envoy_route_v3.Route_Redirect{ + Redirect: &envoy_route_v3.RedirectAction{ + SchemeRewriteSpecifier: &envoy_route_v3.RedirectAction_HttpsRedirect{ + HttpsRedirect: true, + }, + }, + }, + }), + ), + envoy_v3.RouteConfiguration("https/hello.world", + envoy_v3.VirtualHost("hello.world", + &envoy_route_v3.Route{ + Match: routePrefix("/"), + Action: routeHostRewriteHeader("default/externalname/443/9ebffe8f28", "X-Goodbye-Planet"), + }, + )), + ), + TypeUrl: routeType, + }) + + c.Request(clusterType).Equals(&envoy_discovery_v3.DiscoveryResponse{ + Resources: resources(t, + tlsCluster(externalNameCluster("default/externalname/443/9ebffe8f28", "default/externalname/https", "default_externalname_443", "goodbye.planet", 443), nil, "goodbye.planet", "goodbye.planet", nil), + ), + TypeUrl: clusterType, + }) +} diff --git a/internal/featuretests/v3/replaceprefix_test.go b/internal/featuretests/v3/replaceprefix_test.go index bc46d400799..5b6989ad44b 100644 --- a/internal/featuretests/v3/replaceprefix_test.go +++ b/internal/featuretests/v3/replaceprefix_test.go @@ -478,16 +478,15 @@ func artifactoryDocker(t *testing.T) { Resources: resources(t, envoy_v3.RouteConfiguration("ingress_http", envoy_v3.VirtualHost("artifactory.projectcontour.io", - &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-sandbox/"), + Match: routePrefix("/v2/container-external/"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-sandbox/v2/"), + "/artifactory/api/docker/container-external/v2/"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-sandbox"), + Match: routePrefix("/v2/container-sandbox/"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-sandbox/v2"), + "/artifactory/api/docker/container-sandbox/v2/"), }, &envoy_route_v3.Route{ Match: routePrefix("/v2/container-release/"), @@ -495,29 +494,29 @@ func artifactoryDocker(t *testing.T) { "/artifactory/api/docker/container-release/v2/"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-release"), + Match: routePrefix("/v2/container-external"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-release/v2"), + "/artifactory/api/docker/container-external/v2"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-public/"), + Match: routePrefix("/v2/container-sandbox"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-public/v2/"), + "/artifactory/api/docker/container-sandbox/v2"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-public"), + Match: routePrefix("/v2/container-release"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-public/v2"), + "/artifactory/api/docker/container-release/v2"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-external/"), + Match: routePrefix("/v2/container-public/"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-external/v2/"), + "/artifactory/api/docker/container-public/v2/"), }, &envoy_route_v3.Route{ - Match: routePrefix("/v2/container-external"), + Match: routePrefix("/v2/container-public"), Action: withPrefixRewrite(routeCluster("artifactory/service/8080/da39a3ee5e"), - "/artifactory/api/docker/container-external/v2"), + "/artifactory/api/docker/container-public/v2"), }, ), ), diff --git a/internal/featuretests/v3/route_test.go b/internal/featuretests/v3/route_test.go index b17f0902929..1e92c0f1d71 100644 --- a/internal/featuretests/v3/route_test.go +++ b/internal/featuretests/v3/route_test.go @@ -1237,26 +1237,26 @@ func TestRouteWithTLS_InsecurePaths(t *testing.T) { Resources: routeResources(t, envoy_v3.RouteConfiguration("ingress_http", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: envoy_v3.UpgradeHTTPS(), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: envoy_v3.UpgradeHTTPS(), + }, ), ), envoy_v3.RouteConfiguration("https/test2.test.com", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: routecluster("default/svc2/80/da39a3ee5e"), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: routecluster("default/svc2/80/da39a3ee5e"), + }, ), ), ), @@ -1335,25 +1335,25 @@ func TestRouteWithTLS_InsecurePaths_DisablePermitInsecureTrue(t *testing.T) { envoy_v3.RouteConfiguration("ingress_http", envoy_v3.VirtualHost("test2.test.com", &envoy_route_v3.Route{ - Match: routePrefix("/secure"), + Match: routePrefix("/insecure"), Action: envoy_v3.UpgradeHTTPS(), }, &envoy_route_v3.Route{ - Match: routePrefix("/insecure"), + Match: routePrefix("/secure"), Action: envoy_v3.UpgradeHTTPS(), }, ), ), envoy_v3.RouteConfiguration("https/test2.test.com", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: routecluster("default/svc2/80/da39a3ee5e"), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: routecluster("default/svc2/80/da39a3ee5e"), + }, ), ), ), @@ -1609,26 +1609,26 @@ func TestHTTPProxyRouteWithTLS_InsecurePaths(t *testing.T) { Resources: routeResources(t, envoy_v3.RouteConfiguration("ingress_http", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: envoy_v3.UpgradeHTTPS(), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: envoy_v3.UpgradeHTTPS(), + }, ), ), envoy_v3.RouteConfiguration("https/test2.test.com", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: routecluster("default/svc2/80/da39a3ee5e"), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: routecluster("default/svc2/80/da39a3ee5e"), + }, ), ), ), @@ -1703,25 +1703,25 @@ func TestHTTPProxyRouteWithTLS_InsecurePaths_DisablePermitInsecureTrue(t *testin envoy_v3.RouteConfiguration("ingress_http", envoy_v3.VirtualHost("test2.test.com", &envoy_route_v3.Route{ - Match: routePrefix("/secure"), + Match: routePrefix("/insecure"), Action: envoy_v3.UpgradeHTTPS(), }, &envoy_route_v3.Route{ - Match: routePrefix("/insecure"), + Match: routePrefix("/secure"), Action: envoy_v3.UpgradeHTTPS(), }, ), ), envoy_v3.RouteConfiguration("https/test2.test.com", envoy_v3.VirtualHost("test2.test.com", - &envoy_route_v3.Route{ - Match: routePrefix("/secure"), - Action: routecluster("default/svc2/80/da39a3ee5e"), - }, &envoy_route_v3.Route{ Match: routePrefix("/insecure"), Action: routecluster("default/kuard/80/da39a3ee5e"), }, + &envoy_route_v3.Route{ + Match: routePrefix("/secure"), + Action: routecluster("default/svc2/80/da39a3ee5e"), + }, ), ), ), diff --git a/internal/k8s/helpers.go b/internal/k8s/helpers.go index ed4363edd01..a0ac86baddc 100644 --- a/internal/k8s/helpers.go +++ b/internal/k8s/helpers.go @@ -94,22 +94,22 @@ func isStatusEqual(objA, objB any) bool { // // Make an attempt to avoid comparing full objects since it can be very CPU intensive. // Prefer comparing Generation when only interested in spec changes. -func IsObjectEqual(old, new client.Object) (bool, error) { +func IsObjectEqual(oldObj, newObj client.Object) (bool, error) { // Fast path for any object: when ResourceVersions are equal, the objects are equal. // NOTE: This optimizes the case when controller-runtime executes full sync and sends updates for all objects. - if isResourceVersionEqual(old, new) { + if isResourceVersionEqual(oldObj, newObj) { return true, nil } - switch old := old.(type) { + switch oldObj := oldObj.(type) { // Fast path for objects that implement Generation and where only spec changes matter. // Status/annotations/labels changes are ignored. // Generation is implemented in CRDs, Ingress and IngressClass. case *contour_api_v1alpha1.ExtensionService, *contour_api_v1.TLSCertificateDelegation: - return isGenerationEqual(old, new), nil + return isGenerationEqual(oldObj, newObj), nil case *gatewayapi_v1beta1.GatewayClass, *gatewayapi_v1beta1.Gateway, @@ -118,36 +118,36 @@ func IsObjectEqual(old, new client.Object) (bool, error) { *gatewayapi_v1alpha2.TLSRoute, *gatewayapi_v1alpha2.GRPCRoute, *gatewayapi_v1alpha2.TCPRoute: - return isGenerationEqual(old, new), nil + return isGenerationEqual(oldObj, newObj), nil // Slow path: compare the content of the objects. case *contour_api_v1.HTTPProxy, *networking_v1.Ingress: - return isGenerationEqual(old, new) && - apiequality.Semantic.DeepEqual(old.GetAnnotations(), new.GetAnnotations()), nil + return isGenerationEqual(oldObj, newObj) && + apiequality.Semantic.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()), nil case *v1.Secret: - if new, ok := new.(*v1.Secret); ok { - return reflect.DeepEqual(old.Data, new.Data), nil + if newObj, ok := newObj.(*v1.Secret); ok { + return reflect.DeepEqual(oldObj.Data, newObj.Data), nil } case *v1.Service: - if new, ok := new.(*v1.Service); ok { - return apiequality.Semantic.DeepEqual(old.Spec, new.Spec) && - apiequality.Semantic.DeepEqual(old.Status, new.Status) && - apiequality.Semantic.DeepEqual(old.GetAnnotations(), new.GetAnnotations()), nil + if newObj, ok := newObj.(*v1.Service); ok { + return apiequality.Semantic.DeepEqual(oldObj.Spec, newObj.Spec) && + apiequality.Semantic.DeepEqual(oldObj.Status, newObj.Status) && + apiequality.Semantic.DeepEqual(oldObj.GetAnnotations(), newObj.GetAnnotations()), nil } case *v1.Endpoints: - if new, ok := new.(*v1.Endpoints); ok { - return apiequality.Semantic.DeepEqual(old.Subsets, new.Subsets), nil + if newObj, ok := newObj.(*v1.Endpoints); ok { + return apiequality.Semantic.DeepEqual(oldObj.Subsets, newObj.Subsets), nil } case *v1.Namespace: - if new, ok := new.(*v1.Namespace); ok { - return apiequality.Semantic.DeepEqual(old.Labels, new.Labels), nil + if newObj, ok := newObj.(*v1.Namespace); ok { + return apiequality.Semantic.DeepEqual(oldObj.Labels, newObj.Labels), nil } } // ResourceVersions are not equal and we don't know how to compare the object type. // This should never happen and indicates that new type was added to the code but is missing in the switch above. - return false, fmt.Errorf("do not know how to compare %T", new) + return false, fmt.Errorf("do not know how to compare %T", newObj) } func isGenerationEqual(a, b client.Object) bool { diff --git a/internal/k8s/helpers_test.go b/internal/k8s/helpers_test.go index 1f605d5e49d..362cfb097fd 100644 --- a/internal/k8s/helpers_test.go +++ b/internal/k8s/helpers_test.go @@ -96,12 +96,12 @@ func TestIsObjectEqual(t *testing.T) { assert.Equal(t, 2, len(objects), "expected 2 objects in file") // Decode the objects. - old, _, err := deserializer.Decode([]byte(objects[0]), nil, nil) + oldObj, _, err := deserializer.Decode([]byte(objects[0]), nil, nil) assert.NoError(t, err) - new, _, err := deserializer.Decode([]byte(objects[1]), nil, nil) + newObj, _, err := deserializer.Decode([]byte(objects[1]), nil, nil) assert.NoError(t, err) - got, err := IsObjectEqual(old.(client.Object), new.(client.Object)) + got, err := IsObjectEqual(oldObj.(client.Object), newObj.(client.Object)) assert.NoError(t, err) assert.Equal(t, tc.equals, got) }) @@ -109,7 +109,7 @@ func TestIsObjectEqual(t *testing.T) { } func TestIsEqualForResourceVersion(t *testing.T) { - old := &v1.Secret{ + oldS := &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", @@ -120,23 +120,23 @@ func TestIsEqualForResourceVersion(t *testing.T) { }, } - new := old.DeepCopy() + newS := oldS.DeepCopy() // Objects with equal ResourceVersion should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldS, newS) assert.NoError(t, err) assert.True(t, got) // Differences in data should be ignored. - new.Data["foo"] = []byte("baz") - got, err = IsObjectEqual(old, new) + newS.Data["foo"] = []byte("baz") + got, err = IsObjectEqual(oldS, newS) assert.NoError(t, err) assert.True(t, got) } // TestIsEqualFallback compares with ConfigMap objects, which are not supported. func TestIsEqualFallback(t *testing.T) { - old := &v1.ConfigMap{ + oldObj := &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "test", Namespace: "default", @@ -147,37 +147,37 @@ func TestIsEqualFallback(t *testing.T) { }, } - new := old.DeepCopy() + newObj := oldObj.DeepCopy() // Any object (even unsupported types) with equal ResourceVersion should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.True(t, got) // Unsupported types with unequal ResourceVersion should return an error. - new.ResourceVersion = "456" - got, err = IsObjectEqual(old, new) + newObj.ResourceVersion = "456" + got, err = IsObjectEqual(oldObj, newObj) assert.Error(t, err) assert.False(t, got) } func TestIsEqualForGeneration(t *testing.T) { - run := func(t *testing.T, old client.Object) { + run := func(t *testing.T, oldObj client.Object) { t.Helper() - new := old.DeepCopyObject().(client.Object) + newObj := oldObj.DeepCopyObject().(client.Object) // Set different ResourceVersion to ensure that Generation is the only difference. - old.SetResourceVersion("123") - new.SetResourceVersion("456") + oldObj.SetResourceVersion("123") + newObj.SetResourceVersion("456") // Objects with equal Generation should evaluate to true. - got, err := IsObjectEqual(old, new) + got, err := IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.True(t, got) // Objects with unequal Generation should evaluate to false. - new.SetGeneration(old.GetGeneration() + 1) - got, err = IsObjectEqual(old, new) + newObj.SetGeneration(oldObj.GetGeneration() + 1) + got, err = IsObjectEqual(oldObj, newObj) assert.NoError(t, err) assert.False(t, got) } diff --git a/internal/k8s/log.go b/internal/k8s/log.go index a38ec4d149d..ebc16b26150 100644 --- a/internal/k8s/log.go +++ b/internal/k8s/log.go @@ -19,7 +19,7 @@ import ( "os" "strconv" - "github.com/bombsimon/logrusr/v2" + "github.com/bombsimon/logrusr/v4" "github.com/go-logr/logr" "github.com/sirupsen/logrus" klog "k8s.io/klog/v2" @@ -138,6 +138,6 @@ func (l *alwaysEnabledLogSink) WithCallDepth(depth int) logr.LogSink { // Override Enabled to always return true since we rely on klog itself to do log // level filtering. -func (l *alwaysEnabledLogSink) Enabled(level int) bool { +func (l *alwaysEnabledLogSink) Enabled(_ int) bool { return true } diff --git a/internal/k8s/log_test.go b/internal/k8s/log_test.go index 6af5e317455..62d9c6ce89e 100644 --- a/internal/k8s/log_test.go +++ b/internal/k8s/log_test.go @@ -152,26 +152,19 @@ func TestMultipleLogWriterOptions(t *testing.T) { } func TestLogLevelOptionKlog(t *testing.T) { - log, logHook := test.NewNullLogger() + log, _ := test.NewNullLogger() l := log.WithField("some", "field") - for logLevel := 1; logLevel <= 10; logLevel++ { + for logLevel := 0; logLevel <= 10; logLevel++ { t.Run(fmt.Sprintf("log level %d", logLevel), func(t *testing.T) { InitLogging(LogWriterOption(l), LogLevelOption(logLevel)) // Make sure log verbosity is set properly. - for verbosityLevel := 1; verbosityLevel <= 10; verbosityLevel++ { + for verbosityLevel := 0; verbosityLevel <= 10; verbosityLevel++ { enabled := klog.V(klog.Level(verbosityLevel)).Enabled() if verbosityLevel <= logLevel { assert.True(t, enabled) - klog.V(klog.Level(verbosityLevel)).Info("something") - klog.Flush() - assert.Eventually(t, func() bool { return len(logHook.AllEntries()) == 1 }, klogFlushWaitTime, klogFlushWaitInterval) } else { assert.False(t, enabled) - klog.V(klog.Level(verbosityLevel)).Info("something") - klog.Flush() - assert.Never(t, func() bool { return len(logHook.AllEntries()) > 0 }, klogFlushWaitTime, klogFlushWaitInterval) } - logHook.Reset() } }) } diff --git a/internal/k8s/statusaddress.go b/internal/k8s/statusaddress.go index 3b5d2e0c002..8544e8c05f9 100644 --- a/internal/k8s/statusaddress.go +++ b/internal/k8s/statusaddress.go @@ -61,7 +61,7 @@ func (s *StatusAddressUpdater) Set(status v1.LoadBalancerStatus) { // OnAdd updates the given Ingress/HTTPProxy/Gateway object with the // current load balancer address. Note that this method can be called // concurrently from an informer or from Contour itself. -func (s *StatusAddressUpdater) OnAdd(obj any, isInInitialList bool) { +func (s *StatusAddressUpdater) OnAdd(obj any, _ bool) { // Hold the mutex to get a shallow copy. We don't need to // deep copy, since all the references are read-only. s.mu.Lock() @@ -191,7 +191,7 @@ func (s *StatusAddressUpdater) OnAdd(obj any, isInInitialList bool) { } } -func (s *StatusAddressUpdater) OnUpdate(oldObj, newObj any) { +func (s *StatusAddressUpdater) OnUpdate(_, newObj any) { // We only care about the new object, because we're only updating its status. // So, we can get away with just passing this call to OnAdd. @@ -199,7 +199,7 @@ func (s *StatusAddressUpdater) OnUpdate(oldObj, newObj any) { } -func (s *StatusAddressUpdater) OnDelete(obj any) { +func (s *StatusAddressUpdater) OnDelete(_ any) { // we don't need to update the status on resources that // have been deleted. } @@ -214,7 +214,7 @@ type ServiceStatusLoadBalancerWatcher struct { Log logrus.FieldLogger } -func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, isInInitialList bool) { +func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, _ bool) { svc, ok := obj.(*v1.Service) if !ok { // not a service @@ -230,7 +230,7 @@ func (s *ServiceStatusLoadBalancerWatcher) OnAdd(obj any, isInInitialList bool) s.notify(svc.Status.LoadBalancer) } -func (s *ServiceStatusLoadBalancerWatcher) OnUpdate(oldObj, newObj any) { +func (s *ServiceStatusLoadBalancerWatcher) OnUpdate(_, newObj any) { svc, ok := newObj.(*v1.Service) if !ok { // not a service diff --git a/internal/provisioner/controller/gateway.go b/internal/provisioner/controller/gateway.go index 0fd328566ad..2f0a1369836 100644 --- a/internal/provisioner/controller/gateway.go +++ b/internal/provisioner/controller/gateway.go @@ -267,6 +267,9 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct for k, v := range contourParams.PodAnnotations { contourModel.Spec.ContourPodAnnotations[k] = v } + for k, v := range contourParams.PodLabels { + contourModel.Spec.ContourPodLabels[k] = v + } } if gatewayClassParams.Spec.Envoy != nil { @@ -339,6 +342,11 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct contourModel.Spec.EnvoyPodAnnotations[k] = v } + // Pod Labels + for k, v := range envoyParams.PodLabels { + contourModel.Spec.EnvoyPodLabels[k] = v + } + contourModel.Spec.EnvoyResources = envoyParams.Resources if envoyParams.LogLevel != "" { @@ -361,6 +369,10 @@ func (r *gatewayReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct contourModel.Spec.EnvoyBaseID = envoyParams.BaseID } + if envoyParams.OverloadMaxHeapSize > 0 { + contourModel.Spec.EnvoyMaxHeapSizeBytes = envoyParams.OverloadMaxHeapSize + } + } } diff --git a/internal/provisioner/controller/gateway_test.go b/internal/provisioner/controller/gateway_test.go index d3498e0183f..49230049891 100644 --- a/internal/provisioner/controller/gateway_test.go +++ b/internal/provisioner/controller/gateway_test.go @@ -1127,6 +1127,56 @@ func TestGatewayReconcile(t *testing.T) { }, }, + "If ContourDeployment.Spec.Envoy.OverloadMaxHeapSize is specified, the envoy-initconfig container's arguments contain --overload-max-heap": { + gatewayClass: reconcilableGatewayClassWithParams("gatewayclass-1", controller), + gatewayClassParams: &contourv1alpha1.ContourDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "projectcontour", + Name: "gatewayclass-1-params", + }, + Spec: contourv1alpha1.ContourDeploymentSpec{ + Envoy: &contourv1alpha1.EnvoySettings{ + OverloadMaxHeapSize: 10000000, + }, + }, + }, + gateway: makeGateway(), + assertions: func(t *testing.T, r *gatewayReconciler, gw *gatewayv1beta1.Gateway, reconcileErr error) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "gateway-1", + Name: "envoy-gateway-1", + }, + } + require.NoError(t, r.client.Get(context.Background(), keyFor(ds), ds)) + assert.Contains(t, ds.Spec.Template.Spec.InitContainers[0].Args, "--overload-max-heap=10000000") + }, + }, + + "If ContourDeployment.Spec.Envoy.OverloadMaxHeapSize is not specified, the envoy-initconfig container's arguments contain --overload-max-heap=0": { + gatewayClass: reconcilableGatewayClassWithParams("gatewayclass-1", controller), + gatewayClassParams: &contourv1alpha1.ContourDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "projectcontour", + Name: "gatewayclass-1-params", + }, + Spec: contourv1alpha1.ContourDeploymentSpec{ + Envoy: &contourv1alpha1.EnvoySettings{}, + }, + }, + gateway: makeGateway(), + assertions: func(t *testing.T, r *gatewayReconciler, gw *gatewayv1beta1.Gateway, reconcileErr error) { + ds := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "gateway-1", + Name: "envoy-gateway-1", + }, + } + require.NoError(t, r.client.Get(context.Background(), keyFor(ds), ds)) + assert.Contains(t, ds.Spec.Template.Spec.InitContainers[0].Args, "--overload-max-heap=0") + }, + }, + "If ContourDeployment.Spec.Contour.PodAnnotations is specified, the Contour pods' have annotations for prometheus & user-defined": { gatewayClass: reconcilableGatewayClassWithParams("gatewayclass-1", controller), gatewayClassParams: &contourv1alpha1.ContourDeployment{ diff --git a/internal/provisioner/model/model.go b/internal/provisioner/model/model.go index dfdf0796328..656eeeb7525 100644 --- a/internal/provisioner/model/model.go +++ b/internal/provisioner/model/model.go @@ -38,11 +38,12 @@ func Default(namespace, name string) *Contour { Name: name, }, Spec: ContourSpec{ - ContourReplicas: 2, - EnvoyWorkloadType: WorkloadTypeDaemonSet, - EnvoyReplicas: 2, // ignored if not provisioning Envoy as a deployment. - EnvoyLogLevel: contourv1alpha1.InfoLog, - EnvoyBaseID: 0, + ContourReplicas: 2, + EnvoyWorkloadType: WorkloadTypeDaemonSet, + EnvoyReplicas: 2, // ignored if not provisioning Envoy as a deployment. + EnvoyLogLevel: contourv1alpha1.InfoLog, + EnvoyBaseID: 0, + EnvoyMaxHeapSizeBytes: 0, NetworkPublishing: NetworkPublishing{ Envoy: EnvoyNetworkPublishing{ Type: LoadBalancerServicePublishingType, @@ -72,6 +73,8 @@ func Default(namespace, name string) *Contour { ResourceLabels: map[string]string{}, EnvoyPodAnnotations: map[string]string{}, ContourPodAnnotations: map[string]string{}, + EnvoyPodLabels: map[string]string{}, + ContourPodLabels: map[string]string{}, }, } } @@ -216,6 +219,12 @@ type ContourSpec struct { // the annotations: "prometheus.io/scrape", "prometheus.io/port" will be overwritten with predefined value. ContourPodAnnotations map[string]string + // EnvoyPodLabels holds the labels that will be add to the envoy‘s pod. + EnvoyPodLabels map[string]string + + // ContourPodLabels holds the labels that will be add to the contour's pod. + ContourPodLabels map[string]string + // Compute Resources required by envoy container. EnvoyResources corev1.ResourceRequirements @@ -231,6 +240,11 @@ type ContourSpec struct { // so that the shared memory regions do not conflict. // defaults to 0. EnvoyBaseID int32 + + // MaximumHeapSizeBytes defines how much memory the overload manager controls Envoy to allocate at most. + // If the value is 0, the overload manager is disabled. + // defaults to 0. + EnvoyMaxHeapSizeBytes uint64 } // WorkloadType is the type of Kubernetes workload to use for a component. diff --git a/internal/provisioner/model/names.go b/internal/provisioner/model/names.go index 54a751d6e7c..091121fce93 100644 --- a/internal/provisioner/model/names.go +++ b/internal/provisioner/model/names.go @@ -86,6 +86,15 @@ func (c *Contour) AppLabels() map[string]string { labels[k] = v } + for k, v := range c.AppPredefinedLabels() { + labels[k] = v + } + return labels +} + +// AppPredefinedLabels returns predefined labels for a Contour resources(Deployment/DaemonSet). +func (c *Contour) AppPredefinedLabels() map[string]string { + labels := map[string]string{} labels["app.kubernetes.io/instance"] = c.Name labels["app.kubernetes.io/name"] = "contour" labels["app.kubernetes.io/component"] = "ingress-controller" diff --git a/internal/provisioner/objects/dataplane/dataplane.go b/internal/provisioner/objects/dataplane/dataplane.go index ee1fd6f7109..c0c1bc2924c 100644 --- a/internal/provisioner/objects/dataplane/dataplane.go +++ b/internal/provisioner/objects/dataplane/dataplane.go @@ -295,6 +295,7 @@ func desiredContainers(contour *model.Contour, contourImage, envoyImage string) fmt.Sprintf("--envoy-cafile=%s", filepath.Join("/", envoyCertsVolMntDir, "ca.crt")), fmt.Sprintf("--envoy-cert-file=%s", filepath.Join("/", envoyCertsVolMntDir, "tls.crt")), fmt.Sprintf("--envoy-key-file=%s", filepath.Join("/", envoyCertsVolMntDir, "tls.key")), + fmt.Sprintf("--overload-max-heap=%d", contour.Spec.EnvoyMaxHeapSizeBytes), }, VolumeMounts: []corev1.VolumeMount{ { @@ -525,9 +526,16 @@ func EnvoyPodSelector(contour *model.Contour) *metav1.LabelSelector { // envoyPodLabels returns the labels for envoy's pods func envoyPodLabels(contour *model.Contour) map[string]string { labels := EnvoyPodSelector(contour).MatchLabels - for k, v := range contour.AppLabels() { + for k, v := range model.CommonLabels(contour) { labels[k] = v } + for k, v := range contour.Spec.EnvoyPodLabels { + labels[k] = v + } + for k, v := range contour.AppPredefinedLabels() { + labels[k] = v + } + return labels } diff --git a/internal/provisioner/objects/dataplane/dataplane_test.go b/internal/provisioner/objects/dataplane/dataplane_test.go index da5449b7bbf..ad83d186b04 100644 --- a/internal/provisioner/objects/dataplane/dataplane_test.go +++ b/internal/provisioner/objects/dataplane/dataplane_test.go @@ -107,6 +107,15 @@ func checkDaemonSetHasPodAnnotations(t *testing.T, ds *appsv1.DaemonSet, expecte t.Errorf("daemonset has unexpected %q pod annotations", ds.Spec.Template.Annotations) } +func checkDaemonSetHasPodLabels(t *testing.T, ds *appsv1.DaemonSet, expected map[string]string) { + t.Helper() + + if apiequality.Semantic.DeepEqual(ds.Spec.Template.ObjectMeta.Labels, expected) { + return + } + t.Errorf("daemonset has unexpected %q pod labels", ds.Spec.Template.Labels) +} + func checkContainerHasPort(t *testing.T, ds *appsv1.DaemonSet, port int32) { t.Helper() @@ -271,6 +280,10 @@ func TestDesiredDaemonSet(t *testing.T) { "prometheus.io/scrape": "false", } + cntr.Spec.EnvoyPodLabels = map[string]string{ + "sidecar.istio.io/inject": "false", + } + volTest := corev1.Volume{ Name: "vol-test-mount", } @@ -290,6 +303,7 @@ func TestDesiredDaemonSet(t *testing.T) { testEnvoyImage := "docker.io/envoyproxy/envoy:test" testLogLevelArg := "--log-level debug" testBaseIDArg := "--base-id 1" + testEnvoyMaxHeapSize := "--overload-max-heap=8000000000" resQutoa := corev1.ResourceRequirements{ Limits: corev1.ResourceList{ @@ -315,6 +329,8 @@ func TestDesiredDaemonSet(t *testing.T) { // Change the Envoy base id to test --base-id 1 cntr.Spec.EnvoyBaseID = 1 + cntr.Spec.EnvoyMaxHeapSizeBytes = 8000000000 + ds := DesiredDaemonSet(cntr, testContourImage, testEnvoyImage) container := checkDaemonSetHasContainer(t, ds, EnvoyContainerName, true) checkContainerHasArg(t, container, testLogLevelArg) @@ -330,6 +346,8 @@ func TestDesiredDaemonSet(t *testing.T) { checkContainerHaveResourceRequirements(t, container) checkContainerHasImage(t, container, testContourImage) + checkContainerHasArg(t, container, testEnvoyMaxHeapSize) + checkDaemonSetHasEnvVar(t, ds, EnvoyContainerName, envoyNsEnvVar) checkDaemonSetHasEnvVar(t, ds, EnvoyContainerName, envoyPodEnvVar) checkDaemonSetHasEnvVar(t, ds, envoyInitContainerName, envoyNsEnvVar) @@ -341,6 +359,7 @@ func TestDesiredDaemonSet(t *testing.T) { checkDaemonSecurityContext(t, ds) checkDaemonSetHasVolume(t, ds, volTest, volTestMount) checkDaemonSetHasPodAnnotations(t, ds, envoyPodAnnotations(cntr)) + checkDaemonSetHasPodLabels(t, ds, envoyPodLabels(cntr)) checkDaemonSetHasMetricsPort(t, ds, objects.EnvoyMetricsPort) checkDaemonSetHasResourceRequirements(t, ds, resQutoa) diff --git a/internal/provisioner/objects/deployment/deployment.go b/internal/provisioner/objects/deployment/deployment.go index 4a1688708f9..4908ca12b83 100644 --- a/internal/provisioner/objects/deployment/deployment.go +++ b/internal/provisioner/objects/deployment/deployment.go @@ -290,10 +290,17 @@ func ContourDeploymentPodSelector(contour *model.Contour) *metav1.LabelSelector } // contourPodLabels returns the labels for contour's pods, there are pod selector & -// app labels +// app & pod labels func contourPodLabels(contour *model.Contour) map[string]string { labels := ContourDeploymentPodSelector(contour).MatchLabels - for k, v := range contour.AppLabels() { + + for k, v := range model.CommonLabels(contour) { + labels[k] = v + } + for k, v := range contour.Spec.ContourPodLabels { + labels[k] = v + } + for k, v := range contour.AppPredefinedLabels() { labels[k] = v } return labels diff --git a/internal/provisioner/objects/deployment/deployment_test.go b/internal/provisioner/objects/deployment/deployment_test.go index 2d5de7ad691..8f4d8aec0b6 100644 --- a/internal/provisioner/objects/deployment/deployment_test.go +++ b/internal/provisioner/objects/deployment/deployment_test.go @@ -76,7 +76,16 @@ func checkPodHasAnnotations(t *testing.T, tmpl *corev1.PodTemplateSpec, annotati t.Errorf("pod template has unexpected %q annotations", tmpl.Annotations) } } +} + +func checkPodHasLabels(t *testing.T, tmpl *corev1.PodTemplateSpec, labels map[string]string) { + t.Helper() + for k, v := range labels { + if val, ok := tmpl.Labels[k]; !ok || val != v { + t.Errorf("pod template has unexpected %q labels", tmpl.Labels) + } + } } func checkContainerHasArg(t *testing.T, container *corev1.Container, arg string) { @@ -152,10 +161,6 @@ func TestDesiredDeployment(t *testing.T) { }, } - annotations := map[string]string{ - "key": "value", - "prometheus.io/scrape": "false", - } cntr.Spec.ContourResources = resQutoa // Change the Kubernetes log level to test --kubernetes-debug. @@ -165,9 +170,16 @@ func TestDesiredDeployment(t *testing.T) { cntr.Spec.ContourLogLevel = v1alpha1.DebugLog cntr.Spec.ResourceLabels = map[string]string{ - "key": "value", + "key": "value", + "key1": "value1", + } + cntr.Spec.ContourPodAnnotations = map[string]string{ + "key": "value", + "prometheus.io/scrape": "false", + } + cntr.Spec.ContourPodLabels = map[string]string{ + "key1": "overwritten", } - cntr.Spec.ContourPodAnnotations = annotations // Use non-default container ports to test that --envoy-service-http(s)-port // flags are added. @@ -184,6 +196,7 @@ func TestDesiredDeployment(t *testing.T) { checkDeploymentHasEnvVar(t, deploy, contourNsEnvVar) checkDeploymentHasEnvVar(t, deploy, contourPodEnvVar) checkDeploymentHasLabels(t, deploy, cntr.AppLabels()) + checkPodHasLabels(t, &deploy.Spec.Template, contourPodLabels(cntr)) checkPodHasAnnotations(t, &deploy.Spec.Template, contourPodAnnotations(cntr)) for _, port := range cntr.Spec.NetworkPublishing.Envoy.Ports { diff --git a/internal/provisioner/objects/secret/secret.go b/internal/provisioner/objects/secret/secret.go index c29a1d31ecc..9d3bbfc2835 100644 --- a/internal/provisioner/objects/secret/secret.go +++ b/internal/provisioner/objects/secret/secret.go @@ -134,7 +134,7 @@ func EnsureXDSSecretsDeleted(ctx context.Context, cli client.Client, contour *mo }, } - if err := cli.Delete(context.Background(), s); err != nil && !errors.IsNotFound(err) { + if err := cli.Delete(ctx, s); err != nil && !errors.IsNotFound(err) { return err } } diff --git a/internal/sorter/sorter.go b/internal/sorter/sorter.go index 5334b2bea3c..133273c5cd2 100644 --- a/internal/sorter/sorter.go +++ b/internal/sorter/sorter.go @@ -296,33 +296,47 @@ func (s routeSorter) Less(i, j int) bool { switch a := s[i].PathMatchCondition.(type) { case *dag.PrefixMatchCondition: if b, ok := s[j].PathMatchCondition.(*dag.PrefixMatchCondition); ok { - cmp := strings.Compare(a.Prefix, b.Prefix) - switch cmp { - case 1: + switch { + case len(a.Prefix) > len(b.Prefix): // Sort longest prefix first. return true - case -1: + case len(a.Prefix) < len(b.Prefix): return false default: - if a.PrefixMatchType == b.PrefixMatchType { - return compareRoutesByMethodHeaderQueryParams(s[i], s[j]) + cmp := strings.Compare(a.Prefix, b.Prefix) + switch cmp { + case 1: + return true + case -1: + return false + default: + if a.PrefixMatchType == b.PrefixMatchType { + return compareRoutesByMethodHeaderQueryParams(s[i], s[j]) + } + // Segment prefixes sort first as they are more specific. + return a.PrefixMatchType == dag.PrefixMatchSegment } - // Segment prefixes sort first as they are more specific. - return a.PrefixMatchType == dag.PrefixMatchSegment } } case *dag.RegexMatchCondition: switch b := s[j].PathMatchCondition.(type) { case *dag.RegexMatchCondition: - cmp := strings.Compare(a.Regex, b.Regex) - switch cmp { - case 1: + switch { + case len(a.Regex) > len(b.Regex): // Sort longest regex first. return true - case -1: + case len(a.Regex) < len(b.Regex): return false default: - return compareRoutesByMethodHeaderQueryParams(s[i], s[j]) + cmp := strings.Compare(a.Regex, b.Regex) + switch cmp { + case 1: + return true + case -1: + return false + default: + return compareRoutesByMethodHeaderQueryParams(s[i], s[j]) + } } case *dag.PrefixMatchCondition: return true @@ -331,9 +345,11 @@ func (s routeSorter) Less(i, j int) bool { switch b := s[j].PathMatchCondition.(type) { case *dag.ExactMatchCondition: cmp := strings.Compare(a.Path, b.Path) + // Sorting function doesn't really matter here + // since we want exact matching. Lexicographic sorting + // is ok switch cmp { case 1: - // Sort longest path first. return true case -1: return false diff --git a/internal/sorter/sorter_test.go b/internal/sorter/sorter_test.go index f42c3581179..b28e61afa8d 100644 --- a/internal/sorter/sorter_test.go +++ b/internal/sorter/sorter_test.go @@ -279,32 +279,35 @@ func TestSortRoutesPathMatch(t *testing.T) { }, // Note that regex matches sort before prefix matches. { - PathMatchCondition: matchRegex("/this/is/the/longest"), + PathMatchCondition: matchRegex("/athis/is/the/longest"), }, { PathMatchCondition: matchRegex(`/foo((\/).*)*`), }, { - PathMatchCondition: matchRegex("/"), + PathMatchCondition: matchRegex("/foo.*"), + }, + { + PathMatchCondition: matchRegex("/bar.*"), }, { - PathMatchCondition: matchRegex("."), + PathMatchCondition: matchRegex("/"), }, // Prefix segment matches sort before string matches. { - PathMatchCondition: matchPrefixSegment("/path/prefix2"), + PathMatchCondition: matchPrefixSegment("/path/prefix/a"), }, { - PathMatchCondition: matchPrefixString("/path/prefix2"), + PathMatchCondition: matchPrefixString("/path/prefix/a"), }, { - PathMatchCondition: matchPrefixSegment("/path/prefix/a"), + PathMatchCondition: matchPrefixString("/path/prf222"), }, { - PathMatchCondition: matchPrefixString("/path/prefix/a"), + PathMatchCondition: matchPrefixString("/path/prf122"), }, { - PathMatchCondition: matchPrefixString("/path/prefix"), + PathMatchCondition: matchPrefixString("/path/prfx"), }, { PathMatchCondition: matchPrefixSegment("/path/p"), @@ -389,25 +392,31 @@ func TestSortRoutesLongestHeaders(t *testing.T) { PathMatchCondition: matchExact("/pathexact"), }, { - PathMatchCondition: matchRegex("/pathregex"), + PathMatchCondition: matchRegex("/pathregex2"), + HeaderMatchConditions: []dag.HeaderMatchCondition{ + presentHeader("header-name"), + }, + }, + { + PathMatchCondition: matchRegex("/pathregex1"), HeaderMatchConditions: []dag.HeaderMatchCondition{ exactHeader("header-name", "header-value"), }, }, { - PathMatchCondition: matchRegex("/pathregex"), + PathMatchCondition: matchRegex("/pathregex1"), HeaderMatchConditions: []dag.HeaderMatchCondition{ presentHeader("header-name"), }, }, { - PathMatchCondition: matchRegex("/pathregex"), + PathMatchCondition: matchRegex("/pathregex1"), HeaderMatchConditions: []dag.HeaderMatchCondition{ exactHeader("long-header-name", "long-header-value"), }, }, { - PathMatchCondition: matchRegex("/pathregex"), + PathMatchCondition: matchRegex("/pathregex1"), }, { PathMatchCondition: matchPrefixSegment("/path"), diff --git a/internal/status/gatewayclass.go b/internal/status/gatewayclass.go index 2cd162a8814..44c84f8c857 100644 --- a/internal/status/gatewayclass.go +++ b/internal/status/gatewayclass.go @@ -14,15 +14,12 @@ package status import ( - "context" - - "sigs.k8s.io/controller-runtime/pkg/client" gatewayapi_v1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1" ) // SetGatewayClassAccepted inserts or updates the Accepted condition // for the provided GatewayClass. -func SetGatewayClassAccepted(ctx context.Context, cli client.Client, gc *gatewayapi_v1beta1.GatewayClass, accepted bool) *gatewayapi_v1beta1.GatewayClass { +func SetGatewayClassAccepted(gc *gatewayapi_v1beta1.GatewayClass, accepted bool) *gatewayapi_v1beta1.GatewayClass { gc.Status.Conditions = mergeConditions(gc.Status.Conditions, computeGatewayClassAcceptedCondition(gc, accepted)) return gc } diff --git a/internal/xds/v3/contour_test.go b/internal/xds/v3/contour_test.go index 3d631cefd77..99b07c93567 100644 --- a/internal/xds/v3/contour_test.go +++ b/internal/xds/v3/contour_test.go @@ -225,7 +225,7 @@ type mockResource struct { typeurl func() string } -func (m *mockResource) Contents() []proto.Message { return m.contents() } -func (m *mockResource) Query(names []string) []proto.Message { return m.query(names) } -func (m *mockResource) Register(ch chan int, last int, hints ...string) { m.register(ch, last) } -func (m *mockResource) TypeURL() string { return m.typeurl() } +func (m *mockResource) Contents() []proto.Message { return m.contents() } +func (m *mockResource) Query(names []string) []proto.Message { return m.query(names) } +func (m *mockResource) Register(ch chan int, last int, _ ...string) { m.register(ch, last) } +func (m *mockResource) TypeURL() string { return m.typeurl() } diff --git a/internal/xdscache/snapshot.go b/internal/xdscache/snapshot.go index 5b05680f8d7..c652b487668 100644 --- a/internal/xdscache/snapshot.go +++ b/internal/xdscache/snapshot.go @@ -14,13 +14,12 @@ package xdscache import ( - "math" "reflect" - "strconv" "sync" envoy_types "github.com/envoyproxy/go-control-plane/pkg/cache/types" envoy_resource_v3 "github.com/envoyproxy/go-control-plane/pkg/resource/v3" + "github.com/google/uuid" "github.com/projectcontour/contour/internal/dag" "github.com/sirupsen/logrus" ) @@ -36,9 +35,6 @@ type SnapshotHandler struct { // resources holds the cache of xDS contents. resources map[envoy_resource_v3.Type]ResourceCache - // snapshotVersion holds the current version of the snapshot. - snapshotVersion int64 - snapshotters []Snapshotter snapLock sync.Mutex @@ -67,7 +63,7 @@ func (s *SnapshotHandler) Refresh() { } // OnChange is called when the DAG is rebuilt and a new snapshot is needed. -func (s *SnapshotHandler) OnChange(root *dag.DAG) { +func (s *SnapshotHandler) OnChange(_ *dag.DAG) { s.generateNewSnapshot() } @@ -75,7 +71,7 @@ func (s *SnapshotHandler) OnChange(root *dag.DAG) { // the Contour XDS caches. func (s *SnapshotHandler) generateNewSnapshot() { // Generate new snapshot version. - version := s.newSnapshotVersion() + version := uuid.NewString() // Convert caches to envoy xDS Resources. resources := map[envoy_resource_v3.Type][]envoy_types.Resource{ @@ -97,20 +93,6 @@ func (s *SnapshotHandler) generateNewSnapshot() { } } -// newSnapshotVersion increments the current snapshotVersion -// and returns as a string. -func (s *SnapshotHandler) newSnapshotVersion() string { - - // Reset the snapshotVersion if it ever hits max size. - if s.snapshotVersion == math.MaxInt64 { - s.snapshotVersion = 0 - } - - // Increment the snapshot version & return as string. - s.snapshotVersion++ - return strconv.FormatInt(s.snapshotVersion, 10) -} - // asResources casts the given slice of values (that implement the envoy_types.Resource // interface) to a slice of envoy_types.Resource. If the length of the slice is 0, it // returns nil. diff --git a/internal/xdscache/snapshot_test.go b/internal/xdscache/snapshot_test.go deleted file mode 100644 index 632694b27bd..00000000000 --- a/internal/xdscache/snapshot_test.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Project Contour Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package xdscache - -import ( - "math" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGetNewSnapshotVersion(t *testing.T) { - type testcase struct { - startingVersion int64 - want string - } - - run := func(t *testing.T, name string, tc testcase) { - t.Helper() - - t.Run(name, func(t *testing.T) { - t.Helper() - - sh := SnapshotHandler{ - snapshotVersion: tc.startingVersion, - } - got := sh.newSnapshotVersion() - assert.Equal(t, tc.want, got) - }) - } - - run(t, "simple", testcase{ - startingVersion: 0, - want: "1", - }) - - run(t, "big version", testcase{ - startingVersion: math.MaxInt64 - 1, - want: "9223372036854775807", - }) - - run(t, "resets if max hit", testcase{ - startingVersion: math.MaxInt64, - want: "1", - }) -} diff --git a/internal/xdscache/v3/endpointstranslator.go b/internal/xdscache/v3/endpointstranslator.go index 48f539ef30a..fe15c3feb09 100644 --- a/internal/xdscache/v3/endpointstranslator.go +++ b/internal/xdscache/v3/endpointstranslator.go @@ -348,7 +348,7 @@ func equal(a, b map[string]*envoy_endpoint_v3.ClusterLoadAssignment) bool { return true } -func (e *EndpointsTranslator) OnAdd(obj any, isInInitialList bool) { +func (e *EndpointsTranslator) OnAdd(obj any, _ bool) { switch obj := obj.(type) { case *v1.Endpoints: if !e.cache.UpdateEndpoint(obj) { diff --git a/internal/xdscache/v3/listener.go b/internal/xdscache/v3/listener.go index c9cf6d04aaf..01e99745689 100644 --- a/internal/xdscache/v3/listener.go +++ b/internal/xdscache/v3/listener.go @@ -129,6 +129,8 @@ type ListenerConfig struct { // if not specified there is no limit set. MaxRequestsPerConnection *uint32 + HTTP2MaxConcurrentStreams *uint32 + // PerConnectionBufferLimitBytes defines the soft limit on size of the listener’s new connection read and write buffers // If unspecified, an implementation defined default is applied (1MiB). PerConnectionBufferLimitBytes *uint32 @@ -420,6 +422,7 @@ func (c *ListenerCache) OnChange(root *dag.DAG) { ServerHeaderTransformation(cfg.ServerHeaderTransformation). NumTrustedHops(cfg.XffNumTrustedHops). MaxRequestsPerConnection(cfg.MaxRequestsPerConnection). + HTTP2MaxConcurrentStreams(cfg.HTTP2MaxConcurrentStreams). AddFilter(httpGlobalExternalAuthConfig(cfg.GlobalExternalAuthConfig)). Tracing(envoy_v3.TracingConfig(envoyTracingConfig(cfg.TracingConfig))). AddFilter(envoy_v3.GlobalRateLimitFilter(envoyGlobalRateLimitConfig(cfg.RateLimitConfig))). @@ -497,6 +500,7 @@ func (c *ListenerCache) OnChange(root *dag.DAG) { AddFilter(envoy_v3.GlobalRateLimitFilter(envoyGlobalRateLimitConfig(cfg.RateLimitConfig))). ForwardClientCertificate(forwardClientCertificate). MaxRequestsPerConnection(cfg.MaxRequestsPerConnection). + HTTP2MaxConcurrentStreams(cfg.HTTP2MaxConcurrentStreams). EnableWebsockets(listener.EnableWebsockets). Get() @@ -571,6 +575,7 @@ func (c *ListenerCache) OnChange(root *dag.DAG) { AddFilter(envoy_v3.GlobalRateLimitFilter(envoyGlobalRateLimitConfig(cfg.RateLimitConfig))). ForwardClientCertificate(forwardClientCertificate). MaxRequestsPerConnection(cfg.MaxRequestsPerConnection). + HTTP2MaxConcurrentStreams(cfg.HTTP2MaxConcurrentStreams). EnableWebsockets(listener.EnableWebsockets). Get() diff --git a/internal/xdscache/v3/listener_test.go b/internal/xdscache/v3/listener_test.go index 186215e39c7..0c64fc3030f 100644 --- a/internal/xdscache/v3/listener_test.go +++ b/internal/xdscache/v3/listener_test.go @@ -3684,6 +3684,142 @@ func TestListenerVisit(t *testing.T) { SocketOptions: envoy_v3.NewSocketOptions().TCPKeepalive().Build(), }), }, + "httpproxy with HTTP2MaxConcurrentStreams set in listener config": { + ListenerConfig: ListenerConfig{ + HTTP2MaxConcurrentStreams: ref.To(uint32(100)), + }, + objs: []any{ + &contour_api_v1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "default", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "www.example.com", + }, + Routes: []contour_api_v1.Route{{ + Conditions: []contour_api_v1.MatchCondition{{ + Prefix: "/", + }}, + Services: []contour_api_v1.Service{{ + Name: "backend", + Port: 80, + }}, + }}, + }, + }, + &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backend", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 80, + }}, + }, + }, + }, + want: listenermap(&envoy_listener_v3.Listener{ + Name: ENVOY_HTTP_LISTENER, + Address: envoy_v3.SocketAddress("0.0.0.0", 8080), + FilterChains: envoy_v3.FilterChains( + envoy_v3.HTTPConnectionManagerBuilder(). + RouteConfigName(ENVOY_HTTP_LISTENER). + MetricsPrefix(ENVOY_HTTP_LISTENER). + AccessLoggers(envoy_v3.FileAccessLogEnvoy(DEFAULT_HTTP_ACCESS_LOG, "", nil, v1alpha1.LogLevelInfo)). + DefaultFilters(). + HTTP2MaxConcurrentStreams(ref.To(uint32(100))). + Get(), + ), + SocketOptions: envoy_v3.NewSocketOptions().TCPKeepalive().Build(), + }), + }, + "httpsproxy with HTTP2MaxConcurrentStreams set in listener config": { + ListenerConfig: ListenerConfig{ + HTTP2MaxConcurrentStreams: ref.To(uint32(101)), + }, + objs: []any{ + &contour_api_v1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "simple", + Namespace: "default", + }, + Spec: contour_api_v1.HTTPProxySpec{ + VirtualHost: &contour_api_v1.VirtualHost{ + Fqdn: "www.example.com", + TLS: &contour_api_v1.TLS{ + SecretName: "secret", + }, + }, + Routes: []contour_api_v1.Route{{ + Services: []contour_api_v1.Service{{ + Name: "backend", + Port: 80, + }}, + }}, + }, + }, + &v1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "default", + }, + Type: "kubernetes.io/tls", + Data: secretdata(CERTIFICATE, RSA_PRIVATE_KEY), + }, + &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "backend", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Name: "http", + Protocol: "TCP", + Port: 80, + }}, + }, + }, + }, + want: listenermap(&envoy_listener_v3.Listener{ + Name: ENVOY_HTTP_LISTENER, + Address: envoy_v3.SocketAddress("0.0.0.0", 8080), + FilterChains: envoy_v3.FilterChains(envoy_v3.HTTPConnectionManagerBuilder(). + RouteConfigName(ENVOY_HTTP_LISTENER). + MetricsPrefix(ENVOY_HTTP_LISTENER). + AccessLoggers(envoy_v3.FileAccessLogEnvoy(DEFAULT_HTTP_ACCESS_LOG, "", nil, v1alpha1.LogLevelInfo)). + DefaultFilters(). + HTTP2MaxConcurrentStreams(ref.To(uint32(101))). + Get(), + ), + SocketOptions: envoy_v3.NewSocketOptions().TCPKeepalive().Build(), + }, &envoy_listener_v3.Listener{ + Name: ENVOY_HTTPS_LISTENER, + Address: envoy_v3.SocketAddress("0.0.0.0", 8443), + FilterChains: []*envoy_listener_v3.FilterChain{{ + FilterChainMatch: &envoy_listener_v3.FilterChainMatch{ + ServerNames: []string{"www.example.com"}, + }, + TransportSocket: transportSocket("secret", envoy_tls_v3.TlsParameters_TLSv1_2, envoy_tls_v3.TlsParameters_TLSv1_3, nil, "h2", "http/1.1"), + Filters: envoy_v3.Filters(envoy_v3.HTTPConnectionManagerBuilder(). + AddFilter(envoy_v3.FilterMisdirectedRequests("www.example.com")). + DefaultFilters(). + MetricsPrefix(ENVOY_HTTPS_LISTENER). + RouteConfigName(path.Join("https", "www.example.com")). + AccessLoggers(envoy_v3.FileAccessLogEnvoy(DEFAULT_HTTP_ACCESS_LOG, "", nil, v1alpha1.LogLevelInfo)). + HTTP2MaxConcurrentStreams(ref.To(uint32(101))). + Get()), + }}, + ListenerFilters: envoy_v3.ListenerFilters( + envoy_v3.TLSInspector(), + ), + SocketOptions: envoy_v3.NewSocketOptions().TCPKeepalive().Build(), + }), + }, "httpproxy with PerConnectionBufferLimitBytes set in listener config": { ListenerConfig: ListenerConfig{ PerConnectionBufferLimitBytes: ref.To(uint32(32768)), diff --git a/internal/xdscache/v3/route_test.go b/internal/xdscache/v3/route_test.go index 9decc42e5ef..ff167ff4ee8 100644 --- a/internal/xdscache/v3/route_test.go +++ b/internal/xdscache/v3/route_test.go @@ -3722,9 +3722,9 @@ func TestSortLongestRouteFirst(t *testing.T) { PathMatchCondition: &dag.RegexMatchCondition{Regex: "/v1/.+"}, }}, want: []*dag.Route{{ - PathMatchCondition: &dag.RegexMatchCondition{Regex: "/v2"}, - }, { PathMatchCondition: &dag.RegexMatchCondition{Regex: "/v1/.+"}, + }, { + PathMatchCondition: &dag.RegexMatchCondition{Regex: "/v2"}, }}, }, "two exact matches": { diff --git a/internal/xdscache/v3/runtime.go b/internal/xdscache/v3/runtime.go index a163350804f..bab794e1f04 100644 --- a/internal/xdscache/v3/runtime.go +++ b/internal/xdscache/v3/runtime.go @@ -20,23 +20,39 @@ import ( envoy_v3 "github.com/projectcontour/contour/internal/envoy/v3" "github.com/projectcontour/contour/internal/protobuf" "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" ) +type ConfigurableRuntimeSettings struct { + MaxRequestsPerIOCycle *uint32 +} + // RuntimeCache manages the contents of the gRPC RTDS cache. type RuntimeCache struct { contour.Cond + runtimeKV map[string]*structpb.Value +} + +// NewRuntimeCache builds a RuntimeCache with the provided runtime +// settings that will be set in the runtime layer configured by Contour. +func NewRuntimeCache(runtimeSettings ConfigurableRuntimeSettings) *RuntimeCache { + runtimeKV := make(map[string]*structpb.Value) + if runtimeSettings.MaxRequestsPerIOCycle != nil && *runtimeSettings.MaxRequestsPerIOCycle > 0 { + runtimeKV["http.max_requests_per_io_cycle"] = structpb.NewNumberValue(float64(*runtimeSettings.MaxRequestsPerIOCycle)) + } + return &RuntimeCache{runtimeKV: runtimeKV} } // Contents returns all Runtime layers. func (c *RuntimeCache) Contents() []proto.Message { - return protobuf.AsMessages(envoy_v3.RuntimeLayers()) + return protobuf.AsMessages(envoy_v3.RuntimeLayers(c.runtimeKV)) } // Query returns only the "dynamic" layer if requested, otherwise empty. func (c *RuntimeCache) Query(names []string) []proto.Message { for _, name := range names { if name == envoy_v3.DynamicRuntimeLayerName { - return protobuf.AsMessages(envoy_v3.RuntimeLayers()) + return protobuf.AsMessages(envoy_v3.RuntimeLayers(c.runtimeKV)) } } return []proto.Message{} @@ -44,6 +60,6 @@ func (c *RuntimeCache) Query(names []string) []proto.Message { func (*RuntimeCache) TypeURL() string { return resource.RuntimeType } -func (c *RuntimeCache) OnChange(root *dag.DAG) { +func (c *RuntimeCache) OnChange(_ *dag.DAG) { // DAG changes do not affect runtime layers at the moment. } diff --git a/internal/xdscache/v3/runtime_test.go b/internal/xdscache/v3/runtime_test.go index 71793f0c40d..b7aa25815d2 100644 --- a/internal/xdscache/v3/runtime_test.go +++ b/internal/xdscache/v3/runtime_test.go @@ -18,16 +18,72 @@ import ( envoy_service_runtime_v3 "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" "github.com/projectcontour/contour/internal/protobuf" + "github.com/projectcontour/contour/internal/ref" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/structpb" ) func TestRuntimeCacheContents(t *testing.T) { - rc := &RuntimeCache{} - protobuf.ExpectEqual(t, runtimeLayers(), rc.Contents()) + testCases := map[string]struct { + runtimeSettings ConfigurableRuntimeSettings + additionalFields map[string]*structpb.Value + }{ + "no values set": { + runtimeSettings: ConfigurableRuntimeSettings{}, + }, + "http max requests per io cycle set": { + runtimeSettings: ConfigurableRuntimeSettings{ + MaxRequestsPerIOCycle: ref.To(uint32(1)), + }, + additionalFields: map[string]*structpb.Value{ + "http.max_requests_per_io_cycle": structpb.NewNumberValue(1), + }, + }, + "http max requests per io cycle set invalid": { + runtimeSettings: ConfigurableRuntimeSettings{ + MaxRequestsPerIOCycle: ref.To(uint32(0)), + }, + }, + "http max requests per io cycle set nil": { + runtimeSettings: ConfigurableRuntimeSettings{ + MaxRequestsPerIOCycle: nil, + }, + }, + } + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + rc := NewRuntimeCache(tc.runtimeSettings) + fields := map[string]*structpb.Value{ + "re2.max_program_size.error_level": structpb.NewNumberValue(1 << 20), + "re2.max_program_size.warn_level": structpb.NewNumberValue(1000), + } + for k, v := range tc.additionalFields { + fields[k] = v + } + protobuf.ExpectEqual(t, []proto.Message{ + &envoy_service_runtime_v3.Runtime{ + Name: "dynamic", + Layer: &structpb.Struct{ + Fields: fields, + }, + }, + }, rc.Contents()) + }) + } } func TestRuntimeCacheQuery(t *testing.T) { + baseRuntimeLayers := []proto.Message{ + &envoy_service_runtime_v3.Runtime{ + Name: "dynamic", + Layer: &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "re2.max_program_size.error_level": structpb.NewNumberValue(1 << 20), + "re2.max_program_size.warn_level": structpb.NewNumberValue(1000), + }, + }, + }, + } testCases := map[string]struct { names []string expected []proto.Message @@ -38,7 +94,7 @@ func TestRuntimeCacheQuery(t *testing.T) { }, "names include dynamic": { names: []string{"foo", "dynamic", "bar"}, - expected: runtimeLayers(), + expected: baseRuntimeLayers, }, "names excludes dynamic": { names: []string{"foo", "bar", "baz"}, @@ -47,22 +103,8 @@ func TestRuntimeCacheQuery(t *testing.T) { } for name, tc := range testCases { t.Run(name, func(t *testing.T) { - rc := &RuntimeCache{} + rc := NewRuntimeCache(ConfigurableRuntimeSettings{}) protobuf.ExpectEqual(t, tc.expected, rc.Query(tc.names)) }) } } - -func runtimeLayers() []proto.Message { - return []proto.Message{ - &envoy_service_runtime_v3.Runtime{ - Name: "dynamic", - Layer: &structpb.Struct{ - Fields: map[string]*structpb.Value{ - "re2.max_program_size.error_level": {Kind: &structpb.Value_NumberValue{NumberValue: 1 << 20}}, - "re2.max_program_size.warn_level": {Kind: &structpb.Value_NumberValue{NumberValue: 1000}}, - }, - }, - }, - } -} diff --git a/internal/xdscache/v3/server_test.go b/internal/xdscache/v3/server_test.go index 54d845e06f0..b1716839613 100644 --- a/internal/xdscache/v3/server_test.go +++ b/internal/xdscache/v3/server_test.go @@ -27,12 +27,6 @@ import ( envoy_service_runtime_v3 "github.com/envoyproxy/go-control-plane/envoy/service/runtime/v3" envoy_service_secret_v3 "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3" resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3" - "github.com/projectcontour/contour/internal/contour" - "github.com/projectcontour/contour/internal/dag" - "github.com/projectcontour/contour/internal/fixture" - "github.com/projectcontour/contour/internal/xds" - contour_xds_v3 "github.com/projectcontour/contour/internal/xds/v3" - "github.com/projectcontour/contour/internal/xdscache" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" @@ -42,6 +36,13 @@ import ( networking_v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + + "github.com/projectcontour/contour/internal/contour" + "github.com/projectcontour/contour/internal/dag" + "github.com/projectcontour/contour/internal/fixture" + "github.com/projectcontour/contour/internal/xds" + contour_xds_v3 "github.com/projectcontour/contour/internal/xds/v3" + "github.com/projectcontour/contour/internal/xdscache" ) func TestGRPC(t *testing.T) { @@ -207,14 +208,14 @@ func TestGRPC(t *testing.T) { &RouteCache{}, &ClusterCache{}, et, - &RuntimeCache{}, + NewRuntimeCache(ConfigurableRuntimeSettings{}), } eh = contour.NewEventHandler(contour.EventHandlerConfig{ Logger: log, Builder: new(dag.Builder), Observer: dag.ComposeObservers(xdscache.ObserversOf(resources)...), - }) + }, func() bool { return true }) srv := xds.NewServer(nil) contour_xds_v3.RegisterServer(contour_xds_v3.NewContourServer(log, xdscache.ResourcesOf(resources)...), srv) diff --git a/netlify.toml b/netlify.toml index 574ac98db83..11dab280a7c 100644 --- a/netlify.toml +++ b/netlify.toml @@ -3,47 +3,35 @@ command = "hugo --gc --minify" publish = "site/public" + [build.environment] + HUGO_VERSION = "0.119.0" + [context.production.environment] -HUGO_VERSION = "0.83.1" -HUGO_ENV = "production" -HUGO_ENABLEGITINFO = "true" + HUGO_ENV = "production" + HUGO_ENABLEGITINFO = "true" [context.split1] -command = "hugo --gc --minify --enableGitInfo" + command = "hugo --gc --minify --enableGitInfo" -[context.split1.environment] -HUGO_VERSION = "0.83.1" -HUGO_ENV = "production" + [context.split1.environment] + HUGO_ENV = "production" [context.deploy-preview] -command = "hugo --gc --minify --buildFuture -b $DEPLOY_PRIME_URL" - -[context.deploy-preview.environment] -HUGO_VERSION = "0.83.1" + command = "hugo --gc --minify --buildFuture -b $DEPLOY_PRIME_URL" [context.branch-deploy] -command = "hugo --gc --minify -b $DEPLOY_PRIME_URL" - -[context.branch-deploy.environment] -HUGO_VERSION = "0.83.1" + command = "hugo --gc --minify -b $DEPLOY_PRIME_URL" [context.next.environment] -HUGO_ENABLEGITINFO = "true" + HUGO_ENABLEGITINFO = "true" [build.processing] - skip_processing = false # -> Enable processing -[build.processing.css] - bundle = true # -> Concatenate consecutive CSS files together to reduce HTTP requests. - minify = true # -> Run CSS through a minifier to reduce file size. -[build.processing.js] - bundle = true # -> Concatenate consecutive JS files together to reduce HTTP requests. - minify = true # -> Run JS through a minifier to reduce file size. -[build.processing.images] - compress = true # -> Run all images through lossless image compression. + skip_processing = true # -> Disable post-processing. Asset optimization is deprecated. # Reference documentation # Optimization blog post: https://www.netlify.com/blog/2019/08/05/control-your-asset-optimization-settings-from-netlify.toml/ # How to guide: https://docs.netlify.com/configure-builds/file-based-configuration/#post-processing +# Hugo on Netlify docs: https://gohugo.io/hosting-and-deployment/hosting-on-netlify/ ################### # Contour redirects @@ -70,9 +58,9 @@ HUGO_ENABLEGITINFO = "true" # # kubectl apply https://projectcontour.io/quickstart/contour-gateway.yaml [[redirects]] -from = "/quickstart/contour-gateway.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour/release-1.26/examples/render/contour-gateway.yaml" -status = 302 + from = "/quickstart/contour-gateway.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour/release-1.26/examples/render/contour-gateway.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. @@ -109,86 +97,86 @@ status = 302 # # kubectl apply https://projectcontour.io/quickstart/operator.yaml [[redirects]] -from = "/quickstart/operator.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/operator/operator.yaml" -status = 302 + from = "/quickstart/operator.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/operator/operator.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. # # kubectl apply https://projectcontour.io/quickstart/v1.11.0/operator.yaml [[redirects]] -from = "/quickstart/*/operator.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/operator/operator.yaml" -status = 302 + from = "/quickstart/*/operator.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/operator/operator.yaml" + status = 302 # Redirect /quickstart/contour-custom-resource.yaml to the Contour custom resource that matches :latest. # # kubectl apply https://projectcontour.io/quickstart/contour-custom-resource.yaml [[redirects]] -from = "/quickstart/contour-custom-resource.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/contour/contour.yaml" -status = 302 + from = "/quickstart/contour-custom-resource.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/contour/contour.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. # # kubectl apply https://projectcontour.io/quickstart/v1.11.0/contour-custom-resource.yaml [[redirects]] -from = "/quickstart/*/contour-custom-resource.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/contour/contour.yaml" -status = 302 + from = "/quickstart/*/contour-custom-resource.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/contour/contour.yaml" + status = 302 # Redirect /quickstart/gateway.yaml to the example Gateway manifest that matches :latest. # # kubectl apply https://projectcontour.io/quickstart/gateway.yaml [[redirects]] -from = "/quickstart/gateway.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/gateway.yaml" -status = 302 + from = "/quickstart/gateway.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/gateway.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. # # kubectl apply https://projectcontour.io/quickstart/v1.11.0/gateway.yaml [[redirects]] -from = "/quickstart/*/gateway.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/gateway.yaml" -status = 302 + from = "/quickstart/*/gateway.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/gateway.yaml" + status = 302 # Redirect /quickstart/gateway-nodeport.yaml to the example Gateway manifest that matches :latest. # # kubectl apply https://projectcontour.io/quickstart/gateway-nodeport.yaml [[redirects]] -from = "/quickstart/gateway-nodeport.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/gateway-nodeport.yaml" -status = 302 + from = "/quickstart/gateway-nodeport.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/gateway-nodeport.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. # # kubectl apply https://projectcontour.io/quickstart/v1.11.0/gateway-nodeport.yaml [[redirects]] -from = "/quickstart/*/gateway-nodeport.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/gateway-nodeport.yaml" -status = 302 + from = "/quickstart/*/gateway-nodeport.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/gateway-nodeport.yaml" + status = 302 # Redirect /quickstart/kuard.yaml to the example Kuard manifest that matches :latest. # # kubectl apply https://projectcontour.io/quickstart/kuard.yaml [[redirects]] -from = "/quickstart/kuard.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/kuard/kuard.yaml" -status = 302 + from = "/quickstart/kuard.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/release-1.24/examples/gateway/kuard/kuard.yaml" + status = 302 # Redirect versioned quickstarts so that they can easily be referenced by # users or for upgrade testing. # # kubectl apply https://projectcontour.io/quickstart/v1.11.0/kuard.yaml [[redirects]] -from = "/quickstart/*/kuard.yaml" -to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/kuard/kuard.yaml" -status = 302 + from = "/quickstart/*/kuard.yaml" + to = "https://raw.githubusercontent.com/projectcontour/contour-operator/:splat/examples/gateway/kuard/kuard.yaml" + status = 302 ##################################### diff --git a/pkg/config/parameters.go b/pkg/config/parameters.go index 462361d226c..9046c6485b9 100644 --- a/pkg/config/parameters.go +++ b/pkg/config/parameters.go @@ -484,6 +484,21 @@ type ListenerParameters struct { // SocketOptions is used to set socket options for listeners. SocketOptions SocketOptions `yaml:"socket-options"` + + // Defines the limit on number of HTTP requests that Envoy will process from a single + // connection in a single I/O cycle. Requests over this limit are processed in subsequent + // I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is + // detected. Configures the http.max_requests_per_io_cycle Envoy runtime setting. The default + // value when this is not set is no limit. + MaxRequestsPerIOCycle *uint32 `yaml:"max-requests-per-io-cycle,omitempty"` + + // Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the + // SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed + // for a peer on a single HTTP/2 connection. It is recommended to not set this lower + // than 100 but this field can be used to bound resource usage by HTTP/2 connections + // and mitigate attacks like CVE-2023-44487. The default value when this is not set is + // unlimited. + HTTP2MaxConcurrentStreams *uint32 `yaml:"http2-max-concurrent-streams,omitempty"` } func (p *ListenerParameters) Validate() error { @@ -503,6 +518,14 @@ func (p *ListenerParameters) Validate() error { return fmt.Errorf("invalid per connections buffer limit bytes value %q set on listener, minimum value is 1", *p.PerConnectionBufferLimitBytes) } + if p.MaxRequestsPerIOCycle != nil && *p.MaxRequestsPerIOCycle < 1 { + return fmt.Errorf("invalid max connections per IO cycle value %q set on listener, minimum value is 1", *p.MaxRequestsPerIOCycle) + } + + if p.HTTP2MaxConcurrentStreams != nil && *p.HTTP2MaxConcurrentStreams < 1 { + return fmt.Errorf("invalid max HTTP/2 concurrent streams value %q set on listener, minimum value is 1", *p.HTTP2MaxConcurrentStreams) + } + return p.SocketOptions.Validate() } diff --git a/pkg/config/parameters_test.go b/pkg/config/parameters_test.go index bcd46957a99..31467990952 100644 --- a/pkg/config/parameters_test.go +++ b/pkg/config/parameters_test.go @@ -461,6 +461,13 @@ listener: max-requests-per-connection: 1 `) + check(func(t *testing.T, conf *Parameters) { + assert.Equal(t, ref.To(uint32(10)), conf.Listener.HTTP2MaxConcurrentStreams) + }, ` +listener: + http2-max-concurrent-streams: 10 +`) + check(func(t *testing.T, conf *Parameters) { assert.Equal(t, ref.To(uint32(1)), conf.Listener.PerConnectionBufferLimitBytes) }, ` @@ -468,6 +475,13 @@ listener: per-connection-buffer-limit-bytes: 1 `) + check(func(t *testing.T, conf *Parameters) { + assert.Equal(t, ref.To(uint32(1)), conf.Listener.MaxRequestsPerIOCycle) + }, ` +listener: + max-requests-per-io-cycle: 1 +`) + check(func(t *testing.T, conf *Parameters) { assert.Equal(t, ref.To(uint32(1)), conf.Cluster.MaxRequestsPerConnection) }, ` @@ -564,6 +578,22 @@ func TestListenerValidation(t *testing.T) { PerConnectionBufferLimitBytes: ref.To(uint32(0)), } require.Error(t, l.Validate()) + l = &ListenerParameters{ + MaxRequestsPerIOCycle: ref.To(uint32(1)), + } + require.NoError(t, l.Validate()) + l = &ListenerParameters{ + MaxRequestsPerIOCycle: ref.To(uint32(0)), + } + require.Error(t, l.Validate()) + l = &ListenerParameters{ + HTTP2MaxConcurrentStreams: ref.To(uint32(1)), + } + require.NoError(t, l.Validate()) + l = &ListenerParameters{ + HTTP2MaxConcurrentStreams: ref.To(uint32(0)), + } + require.Error(t, l.Validate()) l = &ListenerParameters{ SocketOptions: SocketOptions{ TOS: 64, diff --git a/site/content/docs/1.24/configuration.md b/site/content/docs/1.24/configuration.md index edb968bc45e..94e7e0ad17a 100644 --- a/site/content/docs/1.24/configuration.md +++ b/site/content/docs/1.24/configuration.md @@ -185,6 +185,8 @@ The listener configuration block can be used to configure various parameters for | Field Name | Type | Default | Description | | ------------------- | ------ | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | connection-balancer | string | `""` | This field specifies the listener connection balancer. If the value is `exact`, the listener will use the exact connection balancer to balance connections between threads in a single Envoy process. See [the Envoy documentation][14] for more information. | +| max-requests-per-io-cycle | int | none | Defines the limit on number of HTTP requests that Envoy will process from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is detected. Configures the `http.max_requests_per_io_cycle` Envoy runtime setting. The default value when this is not set is no limit. | +| http2-max-concurrent-streams | int | none | Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed for a peer on a single HTTP/2 connection. It is recommended to not set this lower than 100 but this field can be used to bound resource usage by HTTP/2 connections and mitigate attacks like CVE-2023-44487. The default value when this is not set is unlimited. | ### Server Configuration diff --git a/site/content/docs/1.25/configuration.md b/site/content/docs/1.25/configuration.md index 273d4a248fb..9d9014ec444 100644 --- a/site/content/docs/1.25/configuration.md +++ b/site/content/docs/1.25/configuration.md @@ -186,6 +186,8 @@ The listener configuration block can be used to configure various parameters for | Field Name | Type | Default | Description | | ------------------- | ------ | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | connection-balancer | string | `""` | This field specifies the listener connection balancer. If the value is `exact`, the listener will use the exact connection balancer to balance connections between threads in a single Envoy process. See [the Envoy documentation][14] for more information. | +| max-requests-per-io-cycle | int | none | Defines the limit on number of HTTP requests that Envoy will process from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is detected. Configures the `http.max_requests_per_io_cycle` Envoy runtime setting. The default value when this is not set is no limit. | +| http2-max-concurrent-streams | int | none | Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed for a peer on a single HTTP/2 connection. It is recommended to not set this lower than 100 but this field can be used to bound resource usage by HTTP/2 connections and mitigate attacks like CVE-2023-44487. The default value when this is not set is unlimited. | ### Server Configuration diff --git a/site/content/docs/1.26/configuration.md b/site/content/docs/1.26/configuration.md index 8867f89d113..dd800bc9ab2 100644 --- a/site/content/docs/1.26/configuration.md +++ b/site/content/docs/1.26/configuration.md @@ -195,6 +195,8 @@ The listener configuration block can be used to configure various parameters for | max-requests-per-connection | int | none | This field specifies the maximum requests for downstream connections. If not specified, there is no limit | | per-connection-buffer-limit-bytes | int | 1MiB* | This field specifies the soft limit on size of the listener’s new connection read and write buffer. If not specified, Envoy defaults of 1MiB apply | | socket-options | SocketOptions | | The [Socket Options](#socket-options) for Envoy listeners. | +| max-requests-per-io-cycle | int | none | Defines the limit on number of HTTP requests that Envoy will process from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is detected. Configures the `http.max_requests_per_io_cycle` Envoy runtime setting. The default value when this is not set is no limit. | +| http2-max-concurrent-streams | int | none | Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed for a peer on a single HTTP/2 connection. It is recommended to not set this lower than 100 but this field can be used to bound resource usage by HTTP/2 connections and mitigate attacks like CVE-2023-44487. The default value when this is not set is unlimited. | _This is Envoy's default setting value and is not explicitly configured by Contour._ diff --git a/site/content/docs/main/config/api-reference.html b/site/content/docs/main/config/api-reference.html index 172f69733e9..f49ea2bf5c2 100644 --- a/site/content/docs/main/config/api-reference.html +++ b/site/content/docs/main/config/api-reference.html @@ -2097,7 +2097,7 @@

HeadersPolicy

HeadersPolicy defines how headers are managed during forwarding. -The Host header is treated specially and if set in a HTTP response +The Host header is treated specially and if set in a HTTP request will be used as the SNI server name when forwarding over TLS. It is an error to attempt to set the Host header in a HTTP response.

@@ -3705,6 +3705,14 @@

Route (Optional)

The policy for managing request headers during proxying.

+

You may dynamically rewrite the Host header to be forwarded +upstream to the content of a request header using +the below format “%REQ(X-Header-Name)%”. If the value of the header +is empty, it is ignored.

+

*NOTE: Pay attention to the potential security implications of using this option. +Provided header must come from trusted source.

+

**NOTE: The header rewrite is only done while forwarding and has no bearing +on the routing decision.

@@ -6152,6 +6160,21 @@

ContourSettings the annotations for Prometheus will be appended or overwritten with predefined value.

+ + +podLabels +
+ +map[string]string + + + +(Optional) +

PodLabels defines labels to add to the Contour pods. +If there is a label with the same key as in ContourDeploymentSpec.ResourceLabels, +the one here has a higher priority.

+ +

CustomTag @@ -6776,6 +6799,41 @@

EnvoyListenerConfig Single set of options are applied to all listeners.

+ + +maxRequestsPerIOCycle +
+ +uint32 + + + +(Optional) +

Defines the limit on number of HTTP requests that Envoy will process from a single +connection in a single I/O cycle. Requests over this limit are processed in subsequent +I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is +detected. Configures the http.max_requests_per_io_cycle Envoy runtime setting. The default +value when this is not set is no limit.

+ + + + +httpMaxConcurrentStreams +
+ +uint32 + + + +(Optional) +

Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the +SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed +for a peer on a single HTTP/2 connection. It is recommended to not set this lower +than 100 but this field can be used to bound resource usage by HTTP/2 connections +and mitigate attacks like CVE-2023-44487. The default value when this is not set is +unlimited.

+ +

EnvoyLogging @@ -6988,6 +7046,21 @@

EnvoySettings +podLabels +
+ +map[string]string + + + +(Optional) +

PodLabels defines labels to add to the Envoy pods. +If there is a label with the same key as in ContourDeploymentSpec.ResourceLabels, +the one here has a higher priority.

+ + + + resources
@@ -7067,6 +7140,23 @@

EnvoySettings defaults to 0.

+ + +overloadMaxHeapSize +
+ +uint64 + + + +(Optional) +

OverloadMaxHeapSize defines the maximum heap memory of the envoy controlled by the overload manager. +When the value is greater than 0, the overload manager is enabled, +and when envoy reaches 95% of the maximum heap size, it performs a shrink heap operation, +When it reaches 98% of the maximum heap size, Envoy Will stop accepting requests. +More info: https://projectcontour.io/docs/main/config/overload-manager/

+ +

EnvoyTLS diff --git a/site/content/docs/main/config/request-rewriting.md b/site/content/docs/main/config/request-rewriting.md index a01dd78bcae..88fa3cc2508 100644 --- a/site/content/docs/main/config/request-rewriting.md +++ b/site/content/docs/main/config/request-rewriting.md @@ -257,3 +257,81 @@ For per-Route requestHeadersPolicy only `%CONTOUR_NAMESPACE%` is set and using `%CONTOUR_SERVICE_NAME%` and `%CONTOUR_SERVICE_PORT%` will end up as the literal values `%%CONTOUR_SERVICE_NAME%%` and `%%CONTOUR_SERVICE_PORT%%`, respectively. + +### Manipulating the Host header + +Contour allows users to manipulate the host header in two ways, using the `requestHeadersPolicy`. + +#### Static rewrite + +You can set the host to a static value. This can be done on the route and service level. + +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: static-host-header-rewrite-route +spec: + fqdn: local.projectcontour.io + routes: + - conditions: + - prefix: / + services: + - name: s1 + port: 80 + - requestHeaderPolicy: + set: + - name: host + value: foo.com +``` + +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: static-host-header-rewrite-service +spec: + fqdn: local.projectcontour.io + routes: + - conditions: + - prefix: / + services: + - name: s1 + port: 80 + - requestHeaderPolicy: + set: + - name: host + value: "foo.com" +``` + +#### Dynamic rewrite + +You can also set the host header dynamically with the content of an existing header. +The format has to be `"%REQ()%"`. If the header is empty, it is ignored. + +```yaml +apiVersion: projectcontour.io/v1 +kind: HTTPProxy +metadata: + name: dynamic-host-header-rewrite-route +spec: + fqdn: local.projectcontour.io + routes: + - conditions: + - prefix: / + services: + - name: s1 + port: 80 + - requestHeaderPolicy: + set: + - name: host + value: "%REQ(x-rewrite-header)%" +``` + +Note: Only one of static or dynamic host rewrite can be specified. + +Note: Dynamic rewrite is only available at the route level and not possible on the service level. + +Note: Pay attention to the potential security implications of using this option, the provided header must come from a trusted source. + +Note: The header rewrite is only done while forwarding and has no bearing on the routing decision. diff --git a/site/content/docs/main/configuration.md b/site/content/docs/main/configuration.md index 8867f89d113..d09df20caaa 100644 --- a/site/content/docs/main/configuration.md +++ b/site/content/docs/main/configuration.md @@ -195,6 +195,8 @@ The listener configuration block can be used to configure various parameters for | max-requests-per-connection | int | none | This field specifies the maximum requests for downstream connections. If not specified, there is no limit | | per-connection-buffer-limit-bytes | int | 1MiB* | This field specifies the soft limit on size of the listener’s new connection read and write buffer. If not specified, Envoy defaults of 1MiB apply | | socket-options | SocketOptions | | The [Socket Options](#socket-options) for Envoy listeners. | +| max-requests-per-io-cycle | int | none | Defines the limit on number of HTTP requests that Envoy will process from a single connection in a single I/O cycle. Requests over this limit are processed in subsequent I/O cycles. Can be used as a mitigation for CVE-2023-44487 when abusive traffic is detected. Configures the `http.max_requests_per_io_cycle` Envoy runtime setting. The default value when this is not set is no limit. | +| http2-max-concurrent-streams | int | none | Defines the value for SETTINGS_MAX_CONCURRENT_STREAMS Envoy will advertise in the SETTINGS frame in HTTP/2 connections and the limit for concurrent streams allowed for a peer on a single HTTP/2 connection. It is recommended to not set this lower than 100 but this field can be used to bound resource usage by HTTP/2 connections and mitigate attacks like CVE-2023-44487. The default value when this is not set is unlimited. | _This is Envoy's default setting value and is not explicitly configured by Contour._ @@ -497,7 +499,7 @@ There are flags that can be passed to `contour bootstrap` that help configure ho connects to Contour: | Flag | Default | Description | -| -------------------------------------- | ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| -------------------------------------- |-------------------| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | --resources-dir | "" | Directory where resource files will be written. | | --admin-address | /admin/admin.sock | Path to Envoy admin unix domain socket. | | --admin-port (Deprecated) | 9001 | Deprecated: Port is now configured as a Contour flag. | @@ -510,7 +512,7 @@ connects to Contour: | --xds-resource-version | v3 | Currently, the only valid xDS API resource version is `v3`. | | --dns-lookup-family | auto | Defines what DNS Resolution Policy to use for Envoy -> Contour cluster name lookup. Either v4, v6, auto or all. | | --log-format | text | Log output format for Contour. Either text or json. | -| --overload-max-heap | "" | Defines the maximum heap size in bytes until Envoy overload manager stops accepting new connections. | +| --overload-max-heap | 0 | Defines the maximum heap memory of the envoy controlled by the overload manager. When the value is greater than 0, the overload manager is enabled, and when envoy reaches 95% of the maximum heap size, it performs a shrink heap operation. When it reaches 98% of the maximum heap size, Envoy Will stop accepting requests. | [1]: {{< param github_url>}}/tree/{{< param branch >}}/examples/contour/01-contour-config.yaml diff --git a/site/content/docs/v1.1.0/annotations.md b/site/content/docs/v1.1.0/annotations.md index d37c395b9fc..6409be3ed25 100644 --- a/site/content/docs/v1.1.0/annotations.md +++ b/site/content/docs/v1.1.0/annotations.md @@ -16,7 +16,7 @@ The contour.heptio.com annotations are deprecated, please use the < ## Standard Kubernetes Ingress annotations -The following Kubernetes annotions are supported on [`Ingress`] objects: +The following Kubernetes annotations are supported on [`Ingress`] objects: - `kubernetes.io/ingress.class`: The Ingress class that should interpret and serve the Ingress. If not set, then all Ingress controllers serve the Ingress. If specified as `kubernetes.io/ingress.class: contour`, then Contour serves the Ingress. If any other value, Contour ignores the Ingress definition. You can override the default class `contour` with the `--ingress-class-name` flag at runtime. This can be useful while you are migrating from another controller, or if you need multiple instances of Contour. - `ingress.kubernetes.io/force-ssl-redirect`: Requires TLS/SSL for the Ingress to Envoy by setting the [Envoy virtual host option require_tls][16]. diff --git a/site/content/resources/compatibility-matrix.md b/site/content/resources/compatibility-matrix.md index e8a3d9f4780..adac3065784 100644 --- a/site/content/resources/compatibility-matrix.md +++ b/site/content/resources/compatibility-matrix.md @@ -10,11 +10,14 @@ These combinations of versions are specifically tested in CI and supported by th | Contour Version | Envoy Version | Kubernetes Versions | Operator Version | Gateway API Version | | --------------- | :------------------- | ------------------- | ---------------- | --------------------| -| main | [1.27.0][41] | 1.28, 1.27, 1.26 | N/A | v1alpha2, v1beta1 | +| main | [1.27.2][42] | 1.28, 1.27, 1.26 | N/A | v1alpha2, v1beta1 | +| 1.26.1 | [1.27.2][42] | 1.28, 1.27, 1.26 | N/A | v1alpha2, v1beta1 | | 1.26.0 | [1.27.0][41] | 1.28, 1.27, 1.26 | N/A | v1alpha2, v1beta1 | +| 1.25.3 | [1.26.6][43] | 1.27, 1.26, 1.25 | N/A | v1alpha2, v1beta1 | | 1.25.2 | [1.26.4][40] | 1.27, 1.26, 1.25 | N/A | v1alpha2, v1beta1 | | 1.25.1 | [1.26.4][40] | 1.27, 1.26, 1.25 | N/A | v1alpha2, v1beta1 | | 1.25.0 | [1.26.1][35] | 1.27, 1.26, 1.25 | N/A | v1alpha2, v1beta1 | +| 1.24.6 | [1.25.11][44] | 1.26, 1.25, 1.24 | N/A | v1alpha2, v1beta1 | | 1.24.5 | [1.25.9][39] | 1.26, 1.25, 1.24 | N/A | v1alpha2, v1beta1 | | 1.24.4 | [1.25.6][36] | 1.26, 1.25, 1.24 | N/A | v1alpha2, v1beta1 | | 1.24.3 | [1.25.4][32] | 1.26, 1.25, 1.24 | N/A | v1alpha2, v1beta1 | @@ -166,6 +169,9 @@ __Note:__ This list of extensions was last verified to be complete with Envoy v1 [39]: https://www.envoyproxy.io/docs/envoy/v1.25.9/version_history/v1.25/v1.25.9 [40]: https://www.envoyproxy.io/docs/envoy/v1.26.4/version_history/v1.26/v1.26.4 [41]: https://www.envoyproxy.io/docs/envoy/v1.27.0/version_history/v1.27/v1.27.0 +[42]: https://www.envoyproxy.io/docs/envoy/v1.27.2/version_history/v1.27/v1.27.2 +[43]: https://www.envoyproxy.io/docs/envoy/v1.26.6/version_history/v1.26/v1.26.6 +[44]: https://www.envoyproxy.io/docs/envoy/v1.25.11/version_history/v1.25/v1.25.11 [50]: https://github.com/projectcontour/contour-operator [51]: https://github.com/projectcontour/contour-operator/releases/tag/v1.11.0 diff --git a/site/content/resources/faq.md b/site/content/resources/faq.md index cc215e8104c..a3d0526ebb6 100644 --- a/site/content/resources/faq.md +++ b/site/content/resources/faq.md @@ -29,7 +29,7 @@ More information about the HTTPProxy API can be found [in the HTTPProxy document ## Q: When I load my site in Safari, it shows me an empty page. Developer tools show that the HTTP response was 421. Why does this happen? -The HTTP/2 specification allows user agents (browsers) to re-use TLS sessions to different hostnames as long as they share an IP address and a TLS server certificate (see [RFC 7540](https://tools.ietf.org/html/rfc7540#section-9.1.1)). +The HTTP/2 specification allows user agents (browsers) to reuse TLS sessions to different hostnames as long as they share an IP address and a TLS server certificate (see [RFC 7540](https://tools.ietf.org/html/rfc7540#section-9.1.1)). Sharing a TLS certificate typically uses a wildcard certificate, or a certificate containing multiple alternate names. If this kind of session reuse is not supported by the server, it sends a "421 Misdirected Request", and the user agent may retry the request with a new TLS session. Although Chrome and Firefox correctly retry 421 responses, Safari does not, and simply displays the 421 response body. @@ -39,7 +39,7 @@ This is done for security reasons, so that TLS protocol configuration guarantees The best workaround for this Safari issue is to avoid the use of wildcard certificates. [cert-manager](https://cert-manager.io) can automatically issue TLS certificates for Ingress and HTTPProxy resources (see the [configuration guide][2]). -If wildcard certificates cannot be avoided, the other workaround is to disable HTTP/2 support which will prevent inappropriate TLS session re-use. +If wildcard certificates cannot be avoided, the other workaround is to disable HTTP/2 support which will prevent inappropriate TLS session reuse. HTTP/2 support can be disabled by setting the `default-http-versions` field in the Contour [configuration file][3]. ## Q: Why is the Envoy container not accepting connections even though Contour is running? diff --git a/test/conformance/gatewayapi/gateway_conformance_test.go b/test/conformance/gatewayapi/gateway_conformance_test.go index e06e499b39d..7247eb22f9b 100644 --- a/test/conformance/gatewayapi/gateway_conformance_test.go +++ b/test/conformance/gatewayapi/gateway_conformance_test.go @@ -18,7 +18,7 @@ package gatewayapi import ( "testing" - "github.com/bombsimon/logrusr/v2" + "github.com/bombsimon/logrusr/v4" "github.com/sirupsen/logrus" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/sets" diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go index a4d95900910..16227b8cc0a 100644 --- a/test/e2e/deployment.go +++ b/test/e2e/deployment.go @@ -27,6 +27,7 @@ import ( "path/filepath" "runtime" "strconv" + "strings" "time" "github.com/onsi/gomega/gexec" @@ -696,6 +697,7 @@ func localAddress() string { } func (d *Deployment) StopLocalContour(contourCmd *gexec.Session, configFile string) error { + defer os.RemoveAll(configFile) // Look for the ENV variable to tell if this test run should use // the ContourConfiguration file or the ContourConfiguration CRD. @@ -714,8 +716,11 @@ func (d *Deployment) StopLocalContour(contourCmd *gexec.Session, configFile stri // Default timeout of 1s produces test flakes, // a minute should be more than enough to avoid them. - contourCmd.Terminate().Wait(time.Minute) - return os.RemoveAll(configFile) + logs := contourCmd.Terminate().Wait(time.Minute).Err.Contents() + if strings.Contains(string(logs), "DATA RACE") { + return errors.New("Detected data race, see log output above to diagnose") + } + return nil } // Convenience method for deploying the pieces of the deployment needed for diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 7ff7e351768..3af1dd02c8d 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -26,7 +26,7 @@ import ( "strconv" "time" - "github.com/bombsimon/logrusr/v2" + "github.com/bombsimon/logrusr/v4" certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" "github.com/davecgh/go-spew/spew" "github.com/onsi/ginkgo/v2" @@ -199,7 +199,7 @@ func NewFramework(inClusterTestSuite bool) *Framework { } var err error - contourBin, err = gexec.Build("github.com/projectcontour/contour/cmd/contour") + contourBin, err = gexec.Build("github.com/projectcontour/contour/cmd/contour", "-race") require.NoError(t, err) } diff --git a/test/e2e/httpproxy/default_global_rate_limiting_test.go b/test/e2e/httpproxy/default_global_rate_limiting_test.go index b9f00a643eb..7682d803745 100644 --- a/test/e2e/httpproxy/default_global_rate_limiting_test.go +++ b/test/e2e/httpproxy/default_global_rate_limiting_test.go @@ -16,6 +16,7 @@ package httpproxy import ( + "context" "net/http" . "github.com/onsi/ginkgo/v2" @@ -23,6 +24,8 @@ import ( "github.com/projectcontour/contour/test/e2e" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" ) func testDefaultGlobalRateLimitingVirtualHostNonTLS(namespace string) { @@ -362,3 +365,84 @@ func testDefaultGlobalRateLimitingVirtualHostTLS(namespace string) { require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) }) } + +func testDefaultGlobalRateLimitingWithVhRateLimitsIgnore(namespace string) { + Specify("default global rate limit policy is applied and route opted out from the virtual host rate limit policy", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "defaultglobalratelimitvhratelimits", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "defaultglobalratelimitvhratelimits.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 429 from the proxy confirming + // that we've exceeded the rate limit. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(429), + Path: "/echo", + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Another-Header": "randomvalue", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Disabled: true, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // We set vh_rate_limits to ignore, which means the route should ignore any rate limit policy + // set by the virtual host. Make another request to confirm 200. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + RequestOpts: []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "X-Another-Header": "randomvalue", + }), + }, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) +} diff --git a/test/e2e/httpproxy/global_rate_limiting_test.go b/test/e2e/httpproxy/global_rate_limiting_test.go index 0ef50bbe943..d0e4788021a 100644 --- a/test/e2e/httpproxy/global_rate_limiting_test.go +++ b/test/e2e/httpproxy/global_rate_limiting_test.go @@ -413,3 +413,210 @@ func testGlobalRateLimitingRouteTLS(namespace string) { require.Truef(t, ok, "expected 200 response code for non-rate-limited route, got %d", res.StatusCode) }) } + +func testDisableVirtualHostGlobalRateLimitingOnRoute(namespace string) { + Specify("global rate limit policy set on virtualhost is applied with disabled set to false on a route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "globalratelimitvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "globalratelimitvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "randomvalue", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Wait until we confirm a 429 response is now gotten when we exceed the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Set disabled to false explicitly on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Disabled: false, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Confirm we still see a 429 response. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + }) + + Specify("global rate limit policy set on virtualhost is applied with disabled set to true on a route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "globalratelimitvhostnontls", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "globalratelimitvhostnontls.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "echo", + Port: 80, + }, + }, + Conditions: []contourv1.MatchCondition{ + { + Prefix: "/echo", + }, + }, + }, + }, + }, + } + p, _ = f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + // Wait until we get a 200 from the proxy confirming + // the pods are up and serving traffic. + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Add a global rate limit policy on the virtual host. + p.Spec.VirtualHost.RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Descriptors: []contourv1.RateLimitDescriptor{ + { + Entries: []contourv1.RateLimitDescriptorEntry{ + { + GenericKey: &contourv1.GenericKeyDescriptor{ + Value: "randomvalue", + }, + }, + }, + }, + }, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Wait until we confirm a 429 response is now gotten when we exceed the rate limit. + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(429), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 429 response code, got %d", res.StatusCode) + + require.NoError(t, retry.RetryOnConflict(retry.DefaultBackoff, func() error { + if err := f.Client.Get(context.TODO(), client.ObjectKeyFromObject(p), p); err != nil { + return err + } + + // Disable Vhost global rate limit policy on the route. + p.Spec.Routes[0].RateLimitPolicy = &contourv1.RateLimitPolicy{ + Global: &contourv1.GlobalRateLimitPolicy{ + Disabled: true, + }, + } + + return f.Client.Update(context.TODO(), p) + })) + + // Make another request against the proxy, confirm a 200 response + // is now gotten since the route explicitly opted out from the vhost global rate limiting + res, ok = f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Path: "/echo", + Condition: e2e.HasStatusCode(200), + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + }) +} diff --git a/test/e2e/httpproxy/host_header_rewrite_test.go b/test/e2e/httpproxy/host_header_rewrite_test.go index 6e53711c6dc..ff59aad253d 100644 --- a/test/e2e/httpproxy/host_header_rewrite_test.go +++ b/test/e2e/httpproxy/host_header_rewrite_test.go @@ -16,15 +16,19 @@ package httpproxy import ( + "context" + "net/http" + . "github.com/onsi/ginkgo/v2" contourv1 "github.com/projectcontour/contour/apis/projectcontour/v1" "github.com/projectcontour/contour/test/e2e" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func testHostHeaderRewrite(namespace string) { +func testHostRewriteLiteral(namespace string) { Specify("hostname can be rewritten with policy on route", func() { t := f.T() @@ -71,3 +75,191 @@ func testHostHeaderRewrite(namespace string) { assert.Equal(t, "rewritten.com", f.GetEchoResponseBody(res.Body).Host) }) } + +func testHostRewriteHeaderHTTPService(namespace string) { + opts := []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "x-host-rewrite": "dynamichostrewritten.com", + }), + } + + Specify("hostname can be rewritten from header with policy on route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "ingress-conformance-echo") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "host-header-rewrite", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "dynamichostrewrite.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "ingress-conformance-echo", + Port: 80, + }, + }, + RequestHeadersPolicy: &contourv1.HeadersPolicy{ + Set: []contourv1.HeaderValue{ + { + Name: "Host", + Value: "%REQ(x-host-rewrite)%", + }, + }, + }, + }, + }, + }, + } + f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(200), + RequestOpts: opts, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + assert.Equal(t, "dynamichostrewritten.com", f.GetEchoResponseBody(res.Body).Host) + }) +} + +func testHostRewriteHeaderHTTPSService(namespace string) { + opts := []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "x-host-rewrite": "securedynamichostrewritten.com", + }), + } + + Specify("hostname can be rewritten with policy on route with https", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "ingress-conformance-echo") + f.Certs.CreateSelfSignedCert(namespace, "ingress-conformance-echo", "ingress-conformance-echo", "https.hostheaderrewrite.projectcontour.io") + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "host-header-rewrite", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "https.dynamichostrewrite.projectcontour.io", + TLS: &contourv1.TLS{ + SecretName: "ingress-conformance-echo", + }, + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: "ingress-conformance-echo", + Port: 80, + }, + }, + RequestHeadersPolicy: &contourv1.HeadersPolicy{ + Set: []contourv1.HeaderValue{ + { + Name: "Host", + Value: "%REQ(x-host-rewrite)%", + }, + }, + }, + }, + }, + }, + } + f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + res, ok := f.HTTP.SecureRequestUntil(&e2e.HTTPSRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(200), + RequestOpts: opts, + }) + + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + assert.Equal(t, "securedynamichostrewritten.com", f.GetEchoResponseBody(res.Body).Host) + }) +} + +func testHostRewriteHeaderExternalNameService(namespace string) { + opts := []func(*http.Request){ + e2e.OptSetHeaders(map[string]string{ + "x-host-rewrite": "external.newhostrewritten.com", + }), + } + + Specify("hostname can be rewritten from header with policy on route", func() { + t := f.T() + + f.Fixtures.Echo.Deploy(namespace, "ingress-conformance-echo") + + externalNameService := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "external-name-service", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "ingress-conformance-echo." + namespace, + Ports: []corev1.ServicePort{ + { + Name: "http", + Port: 80, + }, + }, + }, + } + require.NoError(t, f.Client.Create(context.TODO(), externalNameService)) + + p := &contourv1.HTTPProxy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "host-header-rewrite", + }, + Spec: contourv1.HTTPProxySpec{ + VirtualHost: &contourv1.VirtualHost{ + Fqdn: "externalhostheaderrewrite.projectcontour.io", + }, + Routes: []contourv1.Route{ + { + Services: []contourv1.Service{ + { + Name: externalNameService.Name, + Port: 80, + }, + }, + RequestHeadersPolicy: &contourv1.HeadersPolicy{ + Set: []contourv1.HeaderValue{ + { + Name: "Host", + Value: "%REQ(x-host-rewrite)%", + }, + }, + }, + }, + }, + }, + } + f.CreateHTTPProxyAndWaitFor(p, e2e.HTTPProxyValid) + + res, ok := f.HTTP.RequestUntil(&e2e.HTTPRequestOpts{ + Host: p.Spec.VirtualHost.Fqdn, + Condition: e2e.HasStatusCode(200), + RequestOpts: opts, + }) + require.NotNil(t, res, "request never succeeded") + require.Truef(t, ok, "expected 200 response code, got %d", res.StatusCode) + + assert.Equal(t, "external.newhostrewritten.com", f.GetEchoResponseBody(res.Body).Host) + }) +} diff --git a/test/e2e/httpproxy/httpproxy_test.go b/test/e2e/httpproxy/httpproxy_test.go index fa327366305..16d0bd2b335 100644 --- a/test/e2e/httpproxy/httpproxy_test.go +++ b/test/e2e/httpproxy/httpproxy_test.go @@ -288,7 +288,21 @@ var _ = Describe("HTTPProxy", func() { f.NamespacedTest("httpproxy-dynamic-headers", testDynamicHeaders) - f.NamespacedTest("httpproxy-host-header-rewrite", testHostHeaderRewrite) + f.NamespacedTest("httpproxy-host-header-rewrite-literal", testHostRewriteLiteral) + + f.NamespacedTest("httpproxy-host-header-rewrite-header", testHostRewriteHeaderHTTPService) + + f.NamespacedTest("httpproxy-host-header-rewrite-header-https", testHostRewriteHeaderHTTPSService) + + f.NamespacedTest("httpproxy-host-header-rewrite-header-externalname-service", func(namespace string) { + Context("with ExternalName Services enabled", func() { + BeforeEach(func() { + contourConfig.EnableExternalNameService = true + contourConfiguration.Spec.EnableExternalNameService = ref.To(true) + }) + testHostRewriteHeaderExternalNameService(namespace) + }) + }) f.NamespacedTest("httpproxy-ip-filters", func(namespace string) { // ip filter tests rely on the ability to forge x-forwarded-for @@ -404,6 +418,11 @@ descriptors: requests_per_unit: 1 - key: generic_key value: tlsroutelimit + rate_limit: + unit: hour + requests_per_unit: 1 + - key: generic_key + value: randomvalue rate_limit: unit: hour requests_per_unit: 1`)) @@ -421,6 +440,8 @@ descriptors: f.NamespacedTest("httpproxy-global-rate-limiting-vhost-tls", withRateLimitService(testGlobalRateLimitingVirtualHostTLS)) f.NamespacedTest("httpproxy-global-rate-limiting-route-tls", withRateLimitService(testGlobalRateLimitingRouteTLS)) + + f.NamespacedTest("httpproxy-global-rate-limiting-vhost-disable-per-route", withRateLimitService(testDisableVirtualHostGlobalRateLimitingOnRoute)) }) Context("default global rate limiting", func() { @@ -444,6 +465,16 @@ descriptors: }, }, }, + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ + HeaderName: "X-Another-Header", + DescriptorKey: "anotherHeader", + }, + }, + }, + }, }, }, } @@ -467,6 +498,16 @@ descriptors: }, }, }, + { + Entries: []contour_api_v1.RateLimitDescriptorEntry{ + { + RequestHeader: &contour_api_v1.RequestHeaderDescriptor{ + HeaderName: "X-Another-Header", + DescriptorKey: "anotherHeader", + }, + }, + }, + }, }, }, } @@ -486,6 +527,10 @@ descriptors: unit: hour requests_per_unit: 1 - key: customHeader + rate_limit: + unit: hour + requests_per_unit: 1 + - key: anotherHeader rate_limit: unit: hour requests_per_unit: 1`)) @@ -498,6 +543,7 @@ descriptors: f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-non-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostNonTLS)) f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-tls", withRateLimitService(testDefaultGlobalRateLimitingVirtualHostTLS)) + f.NamespacedTest("httpproxy-default-global-rate-limiting-vhost-rate-limits-ignore", withRateLimitService(testDefaultGlobalRateLimitingWithVhRateLimitsIgnore)) }) Context("cookie-rewriting", func() { diff --git a/test/e2e/incluster/incluster_test.go b/test/e2e/incluster/incluster_test.go index 89280ff5eaa..a4621ea95df 100644 --- a/test/e2e/incluster/incluster_test.go +++ b/test/e2e/incluster/incluster_test.go @@ -101,7 +101,7 @@ var _ = Describe("Incluster", func() { f.NamespacedTest("smoke-test", testSimpleSmoke) - f.NamespacedTest("leader-election", testLeaderElection) + testLeaderElection() f.NamespacedTest("projectcontour-resource-rbac", testProjectcontourResourcesRBAC) diff --git a/test/e2e/incluster/leaderelection_test.go b/test/e2e/incluster/leaderelection_test.go index 3458b33453e..2f42aa3c1f0 100644 --- a/test/e2e/incluster/leaderelection_test.go +++ b/test/e2e/incluster/leaderelection_test.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func testLeaderElection(namespace string) { +func testLeaderElection() { // This test is solely a check on the fact that we have set up leader // election resources as expected. This does not test that internal // components (e.g. status writers) are set up properly given a contour diff --git a/test/scripts/run-gateway-conformance.sh b/test/scripts/run-gateway-conformance.sh index 8bdcc637d22..4c86bc05d2b 100755 --- a/test/scripts/run-gateway-conformance.sh +++ b/test/scripts/run-gateway-conformance.sh @@ -60,7 +60,7 @@ else git checkout "${GATEWAY_API_VERSION}" # Keep the list of skipped features in sync with # test/conformance/gatewayapi/gateway_conformance_test.go. - go test -timeout=40m ./conformance -gateway-class=contour -all-features \ + go test -timeout=40m ./conformance -run TestConformance -gateway-class=contour -all-features \ -exempt-features=Mesh \ -skip-tests=HTTPRouteRedirectPortAndScheme fi diff --git a/versions.yaml b/versions.yaml index 5bc600c2f78..01af9e89a93 100644 --- a/versions.yaml +++ b/versions.yaml @@ -7,7 +7,7 @@ versions: - version: main supported: "false" dependencies: - envoy: "1.27.0" + envoy: "1.27.2" kubernetes: - "1.28" - "1.27" @@ -15,8 +15,19 @@ versions: gateway-api: - v1alpha2 - v1beta1 - - version: v1.26.0 + - version: v1.26.1 supported: "true" + dependencies: + envoy: "1.27.2" + kubernetes: + - "1.28" + - "1.27" + - "1.26" + gateway-api: + - v1alpha2 + - v1beta1 + - version: v1.26.0 + supported: "false" dependencies: envoy: "1.27.0" kubernetes: @@ -26,8 +37,19 @@ versions: gateway-api: - v1alpha2 - v1beta1 - - version: v1.25.2 + - version: v1.25.3 supported: "true" + dependencies: + envoy: "1.26.6" + kubernetes: + - "1.27" + - "1.26" + - "1.25" + gateway-api: + - v1alpha2 + - v1beta1 + - version: v1.25.2 + supported: "false" dependencies: envoy: "1.26.4" kubernetes: @@ -59,8 +81,20 @@ versions: gateway-api: - v1alpha2 - v1beta1 - - version: v1.24.5 + - version: v1.24.6 supported: "true" + dependencies: + envoy: "1.25.11" + kubernetes: + - "1.26" + - "1.25" + - "1.24" + gateway-api: + - v1alpha2 + - v1beta1 + contour-operator: "N/A" + - version: v1.24.5 + supported: "false" dependencies: envoy: "1.25.9" kubernetes: