diff --git a/.circleci/config.yml b/.circleci/config.yml
index ca346d0343373..5abbabeff266a 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,6 +1,6 @@
references:
envoy-build-image: &envoy-build-image
- envoyproxy/envoy-build:7f7f5666c72e00ac7c1909b4fc9a2121d772c859
+ envoyproxy/envoy-build:1ef23d481a4701ad4a414d1ef98036bd2ed322e7
version: 2
jobs:
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000..3f3fbbb8dc6c8
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+/docs/root/intro/version_history.rst merge=union
diff --git a/.github/stale.yml b/.github/stale.yml
index dc297cc57d898..31ea115101448 100644
--- a/.github/stale.yml
+++ b/.github/stale.yml
@@ -41,3 +41,4 @@ issues:
Thank you for your contributions.
exemptLabels:
- help wanted
+ - no stalebot
diff --git a/.gitignore b/.gitignore
index db2be52a8b129..7f4c28b379488 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,4 @@ SOURCE_VERSION
.cache
.vimrc
.vscode
+.vs
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index bb7c9efe3a1ee..41ee9af1e7402 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -90,7 +90,7 @@ maximize the chances of your PR being merged.
* We expect that once a PR is opened, it will be actively worked on until it is merged or closed.
We reserve the right to close PRs that are not making progress. This is generally defined as no
changes for 7 days. Obviously PRs that are closed due to lack of activity can be reopened later.
- Closing stale PRs helps us keep on top of all of the work currently in flight.
+ Closing stale PRs helps us to keep on top of all of the work currently in flight.
* If a commit deprecates a feature, the commit message must mention what has been deprecated.
Additionally, [DEPRECATED.md](DEPRECATED.md) must be updated as part of the commit.
* Please consider joining the [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev)
diff --git a/DEPRECATED.md b/DEPRECATED.md
index 43456db808c50..955494c23b6e5 100644
--- a/DEPRECATED.md
+++ b/DEPRECATED.md
@@ -8,13 +8,24 @@ A logged warning is expected for each deprecated item that is in deprecation win
## Version 1.8.0 (pending)
-* Use of the legacy
+* Use of the v1 API is deprecated. See envoy-announce
+ [email](https://groups.google.com/forum/#!topic/envoy-announce/oPnYMZw8H4U).
+* Use of the legacy
[ratelimit.proto](https://github.com/envoyproxy/envoy/blob/b0a518d064c8255e0e20557a8f909b6ff457558f/source/common/ratelimit/ratelimit.proto)
is deprecated, in favor of the proto defined in
[date-plane-api](https://github.com/envoyproxy/envoy/blob/master/api/envoy/service/ratelimit/v2/rls.proto)
Prior to 1.8.0, Envoy can use either proto to send client requests to a ratelimit server with the use of the
`use_data_plane_proto` boolean flag in the [ratelimit configuration](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/ratelimit/v2/rls.proto).
However, when using the deprecated client a warning is logged.
+* Use of the --v2-config-only flag.
+* Use of both `use_websocket` and `websocket_config` in
+ [route.proto](https://github.com/envoyproxy/envoy/blob/master/api/envoy/api/v2/route/route.proto)
+ is deprecated. Please use the new `upgrade_configs` in the
+ [HttpConnectionManager](https://github.com/envoyproxy/envoy/blob/master/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto)
+ instead.
+* Setting hosts via `hosts` field in `Cluster` is deprecated. Use `load_assignment` instead.
+* Use of `response_headers_to_*` and `request_headers_to_add` are deprecated at the `RouteAction`
+ level. Please use the configuration options at the `Route` level.
## Version 1.7.0
diff --git a/README.md b/README.md
index abfda00435a8c..f01a5e5ce56b8 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@ involved and how Envoy plays a role, read the CNCF
* [Official documentation](https://www.envoyproxy.io/)
* [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview)
-* [Unofficial Chinese documentation](https://github.com/lixiangyun/envoyproxy_doc_ZH_CN)
+* [Unofficial Chinese documentation](https://github.com/servicemesher/envoy/)
* Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE)
([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/))
to find out more about the origin story and design philosophy of Envoy
diff --git a/REPO_LAYOUT.md b/REPO_LAYOUT.md
index bc2bb7660a810..cd87e015ac5b8 100644
--- a/REPO_LAYOUT.md
+++ b/REPO_LAYOUT.md
@@ -28,7 +28,7 @@ are:
## [source/](source/)
* [common/](source/common/): Core Envoy code (not specific to extensions) that is also not
- specific to a standalone server implementation. I.e., this is code that could be used if Envoy
+ specific to a standalone server implementation. I.e., this is the code that could be used if Envoy
were eventually embedded as a library.
* [docs/](source/docs/): Miscellaneous developer/design documentation that is not relevant for
the public user documentation.
diff --git a/SECURITY_RELEASE_PROCESS.md b/SECURITY_RELEASE_PROCESS.md
index ca8d16e276781..ac49c34c04745 100644
--- a/SECURITY_RELEASE_PROCESS.md
+++ b/SECURITY_RELEASE_PROCESS.md
@@ -1,7 +1,7 @@
# Security Release Process
Envoy is a large growing community of volunteers, users, and vendors. The Envoy community has
-adopted this security disclosures and response policy to ensure we responsibly handle critical
+adopted this security disclosure and response policy to ensure we responsibly handle critical
issues.
## Product Security Team (PST)
@@ -73,7 +73,7 @@ These steps should be completed within the 1-7 days of Disclosure.
- The Fix Lead and the Fix Team will create a
[CVSS](https://www.first.org/cvss/specification-document) using the [CVSS
Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the
- calculated CVSS; it is better to move quickly than make the CVSS perfect.
+ calculated CVSS; it is better to move quickly than making the CVSS perfect.
- The Fix Team will notify the Fix Lead that work on the fix branch is complete once there are LGTMs
on all commits in the private repo from one or more maintainers.
@@ -160,7 +160,7 @@ said issue, they must agree to the same terms and only find out information on a
In the unfortunate event you share the information beyond what is allowed by this policy, you _must_
urgently inform the envoy-security@googlegroups.com mailing list of exactly what information leaked
-and to whom. A retrospective will take place after the leak so we can assess how to not make the
+and to whom. A retrospective will take place after the leak so we can assess how to prevent making the
same mistake in the future.
If you continue to leak information and break the policy outlined here, you will be removed from the
diff --git a/STYLE.md b/STYLE.md
index 2b641607e23de..72a5411ae48cb 100644
--- a/STYLE.md
+++ b/STYLE.md
@@ -1,6 +1,6 @@
# C++ coding style
-* The Envoy source code is formatted using clang-format. Thus all white space, etc.
+* The Envoy source code is formatted using clang-format. Thus all white spaces, etc.
issues are taken care of automatically. The Travis tests will automatically check
the code format and fail. There are make targets that can both check the format
(check_format) as well as fix the code format for you (fix_format).
@@ -96,7 +96,7 @@ A few general notes on our error handling philosophy:
silently be ignored and should crash the process either via the C++ allocation error exception, an
explicit `RELEASE_ASSERT` following a third party library call, or an obvious crash on a subsequent
line via null pointer dereference. This rule is again based on the philosophy that the engineering
- costs of properly handling these cases is not worth it. Time is better spent designing proper system
+ costs of properly handling these cases are not worth it. Time is better spent designing proper system
controls that shed load if resource usage becomes too high, etc.
* The "less is more" error handling philosophy described in the previous two points is primarily
based on the fact that restarts are designed to be fast, reliable and cheap.
diff --git a/api/STYLE.md b/api/STYLE.md
index d932c3a3b17cd..92592d4aac2e1 100644
--- a/api/STYLE.md
+++ b/api/STYLE.md
@@ -131,3 +131,6 @@ the build system to prevent circular dependency formation. Package group
`//envoy/api/v2:friends` selects consumers of the core API package (services and configs)
and is the default visibility for the core API packages. The default visibility
for services and configs should be `//docs` (proto documentation tool).
+
+Extensions should use the regular hierarchy. For example, configuration for network filters belongs
+in a package under `envoy.config.filter.network`.
diff --git a/api/XDS_PROTOCOL.md b/api/XDS_PROTOCOL.md
index 2021c68334bd5..67c7cc1a7bfe6 100644
--- a/api/XDS_PROTOCOL.md
+++ b/api/XDS_PROTOCOL.md
@@ -147,7 +147,7 @@ management server will provide the complete state of the LDS/CDS resources in
each response. An absent `Listener` or `Cluster` will be deleted.
For EDS/RDS, the management server does not need to supply every requested
-resource and may also supply additional, unrequested resources, `resource_names`
+resource and may also supply additional, unrequested resources. `resource_names`
is only a hint. Envoy will silently ignore any superfluous resources. When a
requested resource is missing in a RDS or EDS update, Envoy will retain the last
known value for this resource. The management server may be able to infer all
@@ -166,7 +166,7 @@ For EDS/RDS, Envoy may either generate a distinct stream for each resource of a
given type (e.g. if each `ConfigSource` has its own distinct upstream cluster
for a management server), or may combine together multiple resource requests for
a given resource type when they are destined for the same management server.
-This is left to implementation specifics, management servers should be capable
+While this is left to implementation specifics, management servers should be capable
of handling one or more `resource_names` for a given resource type in each
request. Both sequence diagrams below are valid for fetching two EDS resources
`{foo, bar}`:
@@ -285,6 +285,51 @@ admin:
```
+### Incremental xDS
+
+Incremental xDS is a separate xDS endpoint available for ADS, CDS and RDS that
+allows:
+
+ * Incremental updates of the list of tracked resources by the xDS client.
+ This supports Envoy on-demand / lazily requesting additional resources. For
+ example, this may occur when a request corresponding to an unknown cluster
+ arrives.
+ * The xDS server can incremetally update the resources on the client.
+ This supports the goal of scalability of xDS resources. Rather than deliver
+ all 100k clusters when a single cluster is modified, the management server
+ only needs to deliver the single cluster that changed.
+
+An xDS incremental session is always in the context of a gRPC bidirectional
+stream. This allows the xDS server to keep track of the state of xDS clients
+connected to it. There is no REST version of Incremental xDS.
+
+In incremental xDS the nonce field is required and used to pair a
+[`IncrementalDiscoveryResponse`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest)
+to a [`IncrementalDiscoveryRequest`](https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/discovery.proto#discoveryrequest)
+ACK or NACK.
+Optionally, a response message level system_version_info is present for
+debugging purposes only.
+
+`IncrementalDiscoveryRequest` can be sent in 3 situations:
+ 1. Initial message in a xDS bidirectional gRPC stream.
+ 2. As an ACK or NACK response to a previous `IncrementalDiscoveryResponse`.
+ In this case the `response_nonce` is set to the nonce value in the Response.
+ ACK or NACK is determined by the absence or presence of `error_detail`.
+ 3. Spontaneous `IncrementalDiscoveryRequest` from the client.
+ This can be done to dynamically add or remove elements from the tracked
+ `resource_names` set. In this case `response_nonce` must be omitted.
+
+In this first example the client connects and receives a first update that it
+ACKs. The second update fails and the client NACKs the update. Later the xDS
+client spontaneously requests the "wc" resource.
+
+
+
+On reconnect the xDS Incremental client may tell the server of its known resources
+to avoid resending them over the network.
+
+
+
## REST-JSON polling subscriptions
Synchronous (long) polling via REST endpoints is also available for the xDS
diff --git a/api/bazel/api_build_system.bzl b/api/bazel/api_build_system.bzl
index 875df406bdc41..497d82c5ccc07 100644
--- a/api/bazel/api_build_system.bzl
+++ b/api/bazel/api_build_system.bzl
@@ -1,23 +1,22 @@
load("@com_google_protobuf//:protobuf.bzl", "py_proto_library")
load("@com_lyft_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library", "go_grpc_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_grpc_library", "go_proto_library")
load("@io_bazel_rules_go//go:def.bzl", "go_test")
-_PY_SUFFIX="_py"
-_CC_SUFFIX="_cc"
-_GO_PROTO_SUFFIX="_go_proto"
-_GO_GRPC_SUFFIX="_go_grpc"
-_GO_IMPORTPATH_PREFIX="github.com/envoyproxy/data-plane-api/api/"
+_PY_SUFFIX = "_py"
+_CC_SUFFIX = "_cc"
+_GO_PROTO_SUFFIX = "_go_proto"
+_GO_GRPC_SUFFIX = "_go_grpc"
+_GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/data-plane-api/api/"
def _Suffix(d, suffix):
- return d + suffix
+ return d + suffix
def _LibrarySuffix(library_name, suffix):
- # Transform //a/b/c to //a/b/c:c in preparation for suffix operation below.
- if library_name.startswith("//") and ":" not in library_name:
- library_name += ":" + Label(library_name).name
- return _Suffix(library_name, suffix)
-
+ # Transform //a/b/c to //a/b/c:c in preparation for suffix operation below.
+ if library_name.startswith("//") and ":" not in library_name:
+ library_name += ":" + Label(library_name).name
+ return _Suffix(library_name, suffix)
# TODO(htuch): has_services is currently ignored but will in future support
# gRPC stub generation.
@@ -32,6 +31,7 @@ def api_py_proto_library(name, srcs = [], deps = [], has_services = 0):
protoc = "@com_google_protobuf//:protoc",
deps = [_LibrarySuffix(d, _PY_SUFFIX) for d in deps] + [
"@com_lyft_protoc_gen_validate//validate:validate_py",
+ "@googleapis//:api_httpbody_protos_py",
"@googleapis//:http_api_protos_py",
"@googleapis//:rpc_status_protos_py",
"@com_github_gogo_protobuf//:gogo_proto_py",
@@ -54,7 +54,7 @@ def api_go_proto_library(name, proto, deps = []):
"@com_github_golang_protobuf//ptypes/any:go_default_library",
"@com_lyft_protoc_gen_validate//validate:go_default_library",
"@googleapis//:rpc_status_go_proto",
- ]
+ ],
)
def api_go_grpc_library(name, proto, deps = []):
@@ -71,9 +71,19 @@ def api_go_grpc_library(name, proto, deps = []):
"@com_github_golang_protobuf//ptypes/any:go_default_library",
"@com_lyft_protoc_gen_validate//validate:go_default_library",
"@googleapis//:http_api_go_proto",
- ]
+ ],
)
+# This is api_proto_library plus some logic internal to //envoy/api.
+def api_proto_library_internal(visibility = ["//visibility:private"], **kwargs):
+ # //envoy/docs/build.sh needs visibility in order to generate documents.
+ if visibility == ["//visibility:private"]:
+ visibility = ["//docs"]
+ elif visibility != ["//visibility:public"]:
+ visibility = visibility + ["//docs"]
+
+ api_proto_library(visibility = visibility, **kwargs)
+
# TODO(htuch): has_services is currently ignored but will in future support
# gRPC stub generation.
# TODO(htuch): Automatically generate go_proto_library and go_grpc_library
@@ -86,11 +96,6 @@ def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], de
# it can play well with the PGV plugin and (2) other language support that
# can make use of native proto_library.
- if visibility == ["//visibility:private"]:
- visibility = ["//docs"]
- elif visibility != ["//visibility:public"]:
- visibility = visibility + ["//docs"]
-
native.proto_library(
name = name,
srcs = srcs,
@@ -102,6 +107,7 @@ def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], de
"@com_google_protobuf//:struct_proto",
"@com_google_protobuf//:timestamp_proto",
"@com_google_protobuf//:wrappers_proto",
+ "@googleapis//:api_httpbody_protos_proto",
"@googleapis//:http_api_protos_proto",
"@googleapis//:rpc_status_protos_lib",
"@com_github_gogo_protobuf//:gogo_proto",
@@ -109,6 +115,7 @@ def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], de
],
visibility = visibility,
)
+
# Under the hood, this is just an extension of the Protobuf library's
# bespoke cc_proto_library. It doesn't consume proto_library as a proto
# provider. Hopefully one day we can move to a model where this target and
@@ -126,7 +133,7 @@ def api_proto_library(name, visibility = ["//visibility:private"], srcs = [], de
visibility = ["//visibility:public"],
)
if (require_py == 1):
- api_py_proto_library(name, srcs, deps, has_services)
+ api_py_proto_library(name, srcs, deps, has_services)
def api_cc_test(name, srcs, proto_deps):
native.cc_test(
diff --git a/api/bazel/repositories.bzl b/api/bazel/repositories.bzl
index 840b2c6625c42..2e497d712fc87 100644
--- a/api/bazel/repositories.bzl
+++ b/api/bazel/repositories.bzl
@@ -1,9 +1,9 @@
-GOOGLEAPIS_SHA = "d642131a6e6582fc226caf9893cb7fe7885b3411" # May 23, 2018
-GOGOPROTO_SHA = "1adfc126b41513cc696b209667c8656ea7aac67c" # v1.0.0
-PROMETHEUS_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017
-OPENCENSUS_SHA = "ab82e5fdec8267dc2a726544b10af97675970847" # May 23, 2018
+GOOGLEAPIS_SHA = "d642131a6e6582fc226caf9893cb7fe7885b3411" # May 23, 2018
+GOGOPROTO_SHA = "1adfc126b41513cc696b209667c8656ea7aac67c" # v1.0.0
+PROMETHEUS_SHA = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" # Nov 17, 2017
+OPENCENSUS_SHA = "ab82e5fdec8267dc2a726544b10af97675970847" # May 23, 2018
-PGV_GIT_SHA = "345b6b478ef955ad31382955d21fb504e95f38c7"
+PGV_GIT_SHA = "f9d2b11e44149635b23a002693b76512b01ae515"
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
@@ -17,10 +17,59 @@ def api_dependencies():
name = "googleapis",
strip_prefix = "googleapis-" + GOOGLEAPIS_SHA,
url = "https://github.com/googleapis/googleapis/archive/" + GOOGLEAPIS_SHA + ".tar.gz",
+ # TODO(dio): Consider writing a Skylark macro for importing Google API proto.
build_file_content = """
load("@com_google_protobuf//:protobuf.bzl", "cc_proto_library", "py_proto_library")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+filegroup(
+ name = "api_httpbody_protos_src",
+ srcs = [
+ "google/api/httpbody.proto",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+proto_library(
+ name = "api_httpbody_protos_proto",
+ srcs = [":api_httpbody_protos_src"],
+ deps = ["@com_google_protobuf//:descriptor_proto"],
+ visibility = ["//visibility:public"],
+)
+
+cc_proto_library(
+ name = "api_httpbody_protos",
+ srcs = [
+ "google/api/httpbody.proto",
+ ],
+ default_runtime = "@com_google_protobuf//:protobuf",
+ protoc = "@com_google_protobuf//:protoc",
+ deps = ["@com_google_protobuf//:cc_wkt_protos"],
+ visibility = ["//visibility:public"],
+)
+
+py_proto_library(
+ name = "api_httpbody_protos_py",
+ srcs = [
+ "google/api/httpbody.proto",
+ ],
+ include = ".",
+ default_runtime = "@com_google_protobuf//:protobuf_python",
+ protoc = "@com_google_protobuf//:protoc",
+ visibility = ["//visibility:public"],
+ deps = ["@com_google_protobuf//:protobuf_python"],
+)
+
+go_proto_library(
+ name = "api_httpbody_go_proto",
+ importpath = "google.golang.org/genproto/googleapis/api/httpbody",
+ proto = ":api_httpbody_protos_proto",
+ visibility = ["//visibility:public"],
+ deps = [
+ ":descriptor_go_proto",
+ ],
+)
+
filegroup(
name = "http_api_protos_src",
srcs = [
@@ -28,7 +77,7 @@ filegroup(
"google/api/http.proto",
],
visibility = ["//visibility:public"],
- )
+)
go_proto_library(
name = "descriptor_go_proto",
@@ -93,6 +142,7 @@ proto_library(
deps = ["@com_google_protobuf//:any_proto"],
visibility = ["//visibility:public"],
)
+
cc_proto_library(
name = "rpc_status_protos",
srcs = ["google/rpc/status.proto"],
@@ -189,7 +239,7 @@ py_proto_library(
)
native.new_http_archive(
- name = "promotheus_metrics_model",
+ name = "prometheus_metrics_model",
strip_prefix = "client_model-" + PROMETHEUS_SHA,
url = "https://github.com/prometheus/client_model/archive/" + PROMETHEUS_SHA + ".tar.gz",
build_file_content = """
diff --git a/api/diagrams/incremental-reconnect.svg b/api/diagrams/incremental-reconnect.svg
new file mode 100644
index 0000000000000..ef8472340ab5d
--- /dev/null
+++ b/api/diagrams/incremental-reconnect.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/api/diagrams/incremental.svg b/api/diagrams/incremental.svg
new file mode 100644
index 0000000000000..e0e93b8a56725
--- /dev/null
+++ b/api/diagrams/incremental.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/api/docs/BUILD b/api/docs/BUILD
index ffd68728a1e8a..54a7b87eea4f5 100644
--- a/api/docs/BUILD
+++ b/api/docs/BUILD
@@ -12,6 +12,7 @@ package_group(
proto_library(
name = "protos",
deps = [
+ "//envoy/admin/v2alpha:clusters",
"//envoy/admin/v2alpha:config_dump",
"//envoy/api/v2:cds",
"//envoy/api/v2:discovery",
@@ -57,6 +58,7 @@ proto_library(
"//envoy/config/trace/v2:trace",
"//envoy/config/transport_socket/capture/v2alpha:capture",
"//envoy/data/accesslog/v2:accesslog",
+ "//envoy/data/core/v2alpha:health_check_event",
"//envoy/data/tap/v2alpha:capture",
"//envoy/service/accesslog/v2:als",
"//envoy/service/auth/v2alpha:attribute_context",
@@ -66,5 +68,8 @@ proto_library(
"//envoy/service/metrics/v2:metrics_service",
"//envoy/type:percent",
"//envoy/type:range",
+ "//envoy/type/matcher:metadata",
+ "//envoy/type/matcher:number",
+ "//envoy/type/matcher:string",
],
)
diff --git a/api/envoy/admin/v2alpha/BUILD b/api/envoy/admin/v2alpha/BUILD
index 9d2875da2a443..98696461bd1d1 100644
--- a/api/envoy/admin/v2alpha/BUILD
+++ b/api/envoy/admin/v2alpha/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "config_dump",
srcs = ["config_dump.proto"],
visibility = ["//visibility:public"],
@@ -13,3 +13,21 @@ api_proto_library(
"//envoy/config/bootstrap/v2:bootstrap",
],
)
+
+api_proto_library_internal(
+ name = "clusters",
+ srcs = ["clusters.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":metrics",
+ "//envoy/api/v2/core:address",
+ "//envoy/api/v2/core:health_check",
+ "//envoy/type:percent",
+ ],
+)
+
+api_proto_library_internal(
+ name = "metrics",
+ srcs = ["metrics.proto"],
+ visibility = ["//visibility:public"],
+)
diff --git a/api/envoy/admin/v2alpha/clusters.proto b/api/envoy/admin/v2alpha/clusters.proto
new file mode 100644
index 0000000000000..fc8d91eac3075
--- /dev/null
+++ b/api/envoy/admin/v2alpha/clusters.proto
@@ -0,0 +1,75 @@
+syntax = "proto3";
+
+package envoy.admin.v2alpha;
+
+import "envoy/admin/v2alpha/metrics.proto";
+import "envoy/api/v2/core/address.proto";
+import "envoy/api/v2/core/health_check.proto";
+import "envoy/type/percent.proto";
+
+// [#protodoc-title: Clusters]
+
+// Admin endpoint uses this wrapper for `/clusters` to display cluster status information.
+// See :ref:`/clusters ` for more information.
+message Clusters {
+ // Mapping from cluster name to each cluster's status.
+ repeated ClusterStatus cluster_statuses = 1;
+}
+
+// Details an individual cluster's current status.
+message ClusterStatus {
+ // Name of the cluster.
+ string name = 1;
+
+ // Denotes whether this cluster was added via API or configured statically.
+ bool added_via_api = 2;
+
+ // The success rate threshold used in the last interval. The threshold is used to eject hosts
+ // based on their success rate. See
+ // :ref:`Cluster outlier detection ` statistics
+ //
+ // Note: this field may be omitted in any of the three following cases:
+ //
+ // 1. There were not enough hosts with enough request volume to proceed with success rate based
+ // outlier ejection.
+ // 2. The threshold is computed to be < 0 because a negative value implies that there was no
+ // threshold for that interval.
+ // 3. Outlier detection is not enabled for this cluster.
+ envoy.type.Percent success_rate_ejection_threshold = 3;
+
+ // Mapping from host address to the host's current status.
+ repeated HostStatus host_statuses = 4;
+}
+
+// Current state of a particular host.
+message HostStatus {
+ // Address of this host.
+ envoy.api.v2.core.Address address = 1;
+
+ // Mapping from the name of the statistic to the current value.
+ map stats = 2;
+
+ // The host's current health status.
+ HostHealthStatus health_status = 3;
+
+ // Request success rate for this host over the last calculated interval.
+ //
+ // Note: the message will not be present if host did not have enough request volume to calculate
+ // success rate or the cluster did not have enough hosts to run through success rate outlier
+ // ejection.
+ envoy.type.Percent success_rate = 4;
+}
+
+// Health status for a host.
+message HostHealthStatus {
+ // The host is currently failing active health checks.
+ bool failed_active_health_check = 1;
+
+ // The host is currently considered an outlier and has been ejected.
+ bool failed_outlier_check = 2;
+
+ // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported
+ // here.
+ // TODO(mrice32): pipe through remaining EDS health status possibilities.
+ envoy.api.v2.core.HealthStatus eds_health_status = 3;
+}
diff --git a/api/envoy/admin/v2alpha/metrics.proto b/api/envoy/admin/v2alpha/metrics.proto
new file mode 100644
index 0000000000000..93927157c1ef6
--- /dev/null
+++ b/api/envoy/admin/v2alpha/metrics.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+package envoy.admin.v2alpha;
+
+// [#protodoc-title: Metrics]
+
+// Proto representation of an Envoy Counter or Gauge value.
+message SimpleMetric {
+ enum Type {
+ COUNTER = 0;
+ GAUGE = 1;
+ }
+
+ // Type of metric represented.
+ Type type = 1;
+
+ // Current metric value.
+ uint64 value = 2;
+}
diff --git a/api/envoy/api/v2/BUILD b/api/envoy/api/v2/BUILD
index 3e557a10239ed..261d140819985 100644
--- a/api/envoy/api/v2/BUILD
+++ b/api/envoy/api/v2/BUILD
@@ -1,4 +1,4 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
@@ -16,7 +16,7 @@ package_group(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "discovery",
srcs = ["discovery.proto"],
visibility = [":friends"],
@@ -29,7 +29,7 @@ api_go_proto_library(
deps = ["//envoy/api/v2/core:base_go_proto"],
)
-api_proto_library(
+api_proto_library_internal(
name = "eds",
srcs = ["eds.proto"],
has_services = 1,
@@ -40,6 +40,7 @@ api_proto_library(
"//envoy/api/v2/core:base",
"//envoy/api/v2/core:health_check",
"//envoy/api/v2/endpoint",
+ "//envoy/type:percent",
],
)
@@ -52,10 +53,11 @@ api_go_grpc_library(
"//envoy/api/v2/core:base_go_proto",
"//envoy/api/v2/core:health_check_go_proto",
"//envoy/api/v2/endpoint:endpoint_go_proto",
+ "//envoy/type:percent_go_proto",
],
)
-api_proto_library(
+api_proto_library_internal(
name = "cds",
srcs = ["cds.proto"],
has_services = 1,
@@ -95,7 +97,7 @@ api_go_grpc_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "lds",
srcs = ["lds.proto"],
has_services = 1,
@@ -119,7 +121,7 @@ api_go_grpc_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "rds",
srcs = ["rds.proto"],
has_services = 1,
diff --git a/api/envoy/api/v2/auth/BUILD b/api/envoy/api/v2/auth/BUILD
index c3ea89de1ff1d..55f522c0085a9 100644
--- a/api/envoy/api/v2/auth/BUILD
+++ b/api/envoy/api/v2/auth/BUILD
@@ -1,4 +1,4 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
@@ -15,7 +15,7 @@ package_group(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "cert",
srcs = ["cert.proto"],
visibility = [":friends"],
diff --git a/api/envoy/api/v2/cds.proto b/api/envoy/api/v2/cds.proto
index 8359cd51964b0..e23cda0c8f8f1 100644
--- a/api/envoy/api/v2/cds.proto
+++ b/api/envoy/api/v2/cds.proto
@@ -31,6 +31,10 @@ service ClusterDiscoveryService {
rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
+ rpc IncrementalClusters(stream IncrementalDiscoveryRequest)
+ returns (stream IncrementalDiscoveryResponse) {
+ }
+
rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:clusters"
@@ -156,7 +160,13 @@ message Cluster {
// :ref:`STRICT_DNS`
// or :ref:`LOGICAL_DNS`,
// then hosts is required.
- repeated core.Address hosts = 7;
+ //
+ // .. attention::
+ //
+ // **This field is deprecated**. Set the
+ // :ref:`load_assignment` field instead.
+ //
+ repeated core.Address hosts = 7 [deprecated = true];
// Setting this is required for specifying members of
// :ref:`STATIC`,
@@ -172,7 +182,6 @@ message Cluster {
// :ref:`endpoint assignments`.
// Setting this overrides :ref:`hosts` values.
//
- // [#not-implemented-hide:]
ClusterLoadAssignment load_assignment = 33;
// Optional :ref:`active health checking `
@@ -341,6 +350,18 @@ message Cluster {
// weighted cluster contains the same keys and values as the subset's
// metadata. The same host may appear in multiple subsets.
repeated LbSubsetSelector subset_selectors = 3;
+
+ // If true, routing to subsets will take into account the localities and locality weights of the
+ // endpoints when making the routing decision.
+ //
+ // There are some potential pitfalls associated with enabling this feature, as the resulting
+ // traffic split after applying both a subset match and locality weights might be undesirable.
+ //
+ // Consider for example a situation in which you have 50/50 split across two localities X/Y
+ // which have 100 hosts each without subsetting. If the subset LB results in X having only 1
+ // host selected but Y having 100, then a lot more load is being dumped on the single host in X
+ // than originally anticipated in the load balancing assignment delivered via EDS.
+ bool locality_weight_aware = 4;
}
// Configuration for load balancing subsetting.
@@ -416,6 +437,17 @@ message Cluster {
ZoneAwareLbConfig zone_aware_lb_config = 2;
LocalityWeightedLbConfig locality_weighted_lb_config = 3;
}
+ // If set, all health check/weight/metadata updates that happen within this duration will be
+ // merged and delivered in one shot when the duration expires. The start of the duration is when
+ // the first update happens. This is useful for big clusters, with potentially noisy deploys
+ // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes
+ // or metadata updates. By default, this is not configured and updates apply immediately. Also,
+ // the first set of updates to be seen apply immediately as well (e.g.: a new cluster).
+ //
+ // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is
+ // because merging those updates isn't currently safe. See
+ // https://github.com/envoyproxy/envoy/pull/3941.
+ google.protobuf.Duration update_merge_window = 4;
}
// Common configuration for all load balancer implementations.
diff --git a/api/envoy/api/v2/cluster/BUILD b/api/envoy/api/v2/cluster/BUILD
index 16e759069359e..a3b091dea5f28 100644
--- a/api/envoy/api/v2/cluster/BUILD
+++ b/api/envoy/api/v2/cluster/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "circuit_breaker",
srcs = ["circuit_breaker.proto"],
visibility = [
@@ -21,7 +21,7 @@ api_go_proto_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "outlier_detection",
srcs = ["outlier_detection.proto"],
visibility = [
diff --git a/api/envoy/api/v2/cluster/circuit_breaker.proto b/api/envoy/api/v2/cluster/circuit_breaker.proto
index 19e378d779de2..1d574311d2009 100644
--- a/api/envoy/api/v2/cluster/circuit_breaker.proto
+++ b/api/envoy/api/v2/cluster/circuit_breaker.proto
@@ -2,6 +2,7 @@ syntax = "proto3";
package envoy.api.v2.cluster;
option go_package = "cluster";
+option csharp_namespace = "Envoy.Api.V2.ClusterNS";
import "envoy/api/v2/core/base.proto";
diff --git a/api/envoy/api/v2/cluster/outlier_detection.proto b/api/envoy/api/v2/cluster/outlier_detection.proto
index 8fc873cbd08df..3ef961928d5b1 100644
--- a/api/envoy/api/v2/cluster/outlier_detection.proto
+++ b/api/envoy/api/v2/cluster/outlier_detection.proto
@@ -1,6 +1,7 @@
syntax = "proto3";
package envoy.api.v2.cluster;
+option csharp_namespace = "Envoy.Api.V2.ClusterNS";
import "google/protobuf/duration.proto";
import "google/protobuf/wrappers.proto";
diff --git a/api/envoy/api/v2/core/BUILD b/api/envoy/api/v2/core/BUILD
index 666315758b826..71a8d33f59d35 100644
--- a/api/envoy/api/v2/core/BUILD
+++ b/api/envoy/api/v2/core/BUILD
@@ -1,4 +1,4 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
@@ -16,7 +16,7 @@ package_group(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "address",
srcs = ["address.proto"],
visibility = [
@@ -31,7 +31,7 @@ api_go_proto_library(
deps = [":base_go_proto"],
)
-api_proto_library(
+api_proto_library_internal(
name = "base",
srcs = ["base.proto"],
visibility = [
@@ -44,7 +44,7 @@ api_go_proto_library(
proto = ":base",
)
-api_proto_library(
+api_proto_library_internal(
name = "health_check",
srcs = ["health_check.proto"],
visibility = [
@@ -59,7 +59,7 @@ api_go_proto_library(
deps = [":base_go_proto"],
)
-api_proto_library(
+api_proto_library_internal(
name = "config_source",
srcs = ["config_source.proto"],
visibility = [
@@ -80,7 +80,12 @@ api_go_proto_library(
],
)
-api_proto_library(
+api_go_proto_library(
+ name = "http_uri",
+ proto = ":http_uri",
+)
+
+api_proto_library_internal(
name = "http_uri",
srcs = ["http_uri.proto"],
visibility = [
@@ -88,12 +93,7 @@ api_proto_library(
],
)
-api_go_proto_library(
- name = "http_uri",
- proto = ":http_uri",
-)
-
-api_proto_library(
+api_proto_library_internal(
name = "grpc_service",
srcs = ["grpc_service.proto"],
visibility = [
@@ -108,7 +108,7 @@ api_go_proto_library(
deps = [":base_go_proto"],
)
-api_proto_library(
+api_proto_library_internal(
name = "protocol",
srcs = ["protocol.proto"],
visibility = [
diff --git a/api/envoy/api/v2/core/base.proto b/api/envoy/api/v2/core/base.proto
index ffdd03fdfe595..1e86c529c46e2 100644
--- a/api/envoy/api/v2/core/base.proto
+++ b/api/envoy/api/v2/core/base.proto
@@ -133,7 +133,7 @@ enum RequestMethod {
// Header name/value pair.
message HeaderValue {
// Header name.
- string key = 1;
+ string key = 1 [(validate.rules).string.min_bytes = 1];
// Header value.
//
diff --git a/api/envoy/api/v2/core/config_source.proto b/api/envoy/api/v2/core/config_source.proto
index 17bdbbeb28d9e..1ebb265da2ae4 100644
--- a/api/envoy/api/v2/core/config_source.proto
+++ b/api/envoy/api/v2/core/config_source.proto
@@ -28,7 +28,7 @@ message ApiConfigSource {
GRPC = 2;
}
ApiType api_type = 1 [(validate.rules).enum.defined_only = true];
- // Multiple cluster names may be provided for REST_LEGACY/REST. If > 1
+ // Cluster names should be used only with REST_LEGACY/REST. If > 1
// cluster is defined, clusters will be cycled through if any kind of failure
// occurs.
//
@@ -40,11 +40,6 @@ message ApiConfigSource {
// Multiple gRPC services be provided for GRPC. If > 1 cluster is defined,
// services will be cycled through if any kind of failure occurs.
- //
- // .. note::
- //
- // If a gRPC service points to a ``cluster_name``, it must be statically
- // defined and its type must not be ``EDS``.
repeated GrpcService grpc_services = 4;
// For REST APIs, the delay between successive polls.
diff --git a/api/envoy/api/v2/core/health_check.proto b/api/envoy/api/v2/core/health_check.proto
index 55df7947e9f5f..ad35ca61536a0 100644
--- a/api/envoy/api/v2/core/health_check.proto
+++ b/api/envoy/api/v2/core/health_check.proto
@@ -21,15 +21,29 @@ option (gogoproto.equal_all) = true;
message HealthCheck {
// The time to wait for a health check response. If the timeout is reached the
// health check attempt will be considered a failure.
- google.protobuf.Duration timeout = 1 [(validate.rules).duration.required = true];
+ google.protobuf.Duration timeout = 1 [
+ (validate.rules).duration = {required: true, gt: {seconds: 0}},
+ (gogoproto.stdduration) = true
+ ];
// The interval between health checks.
- google.protobuf.Duration interval = 2 [(validate.rules).duration.required = true];
+ google.protobuf.Duration interval = 2 [
+ (validate.rules).duration = {required: true, gt: {seconds: 0}},
+ (gogoproto.stdduration) = true
+ ];
// An optional jitter amount in millseconds. If specified, during every
- // internal Envoy will add 0 to interval_jitter to the wait time.
+ // interval Envoy will add 0 to interval_jitter to the wait time.
google.protobuf.Duration interval_jitter = 3;
+ // An optional jitter amount as a percentage of interval_ms. If specified,
+ // during every interval Envoy will add 0 to interval_ms *
+ // interval_jitter_percent / 100 to the wait time.
+ //
+ // If interval_jitter_ms and interval_jitter_percent are both set, both of
+ // them will be used to increase the wait time.
+ uint32 interval_jitter_percent = 18;
+
// The number of unhealthy health checks required before a host is marked
// unhealthy. Note that for *http* health checking if a host responds with 503
// this threshold is ignored and the host is considered unhealthy immediately.
@@ -81,7 +95,9 @@ message HealthCheck {
string service_name = 5;
// Specifies a list of HTTP headers that should be added to each request that is sent to the
- // health checked cluster.
+ // health checked cluster. For more information, including details on header value syntax, see
+ // the documentation on :ref:`custom request headers
+ // `.
repeated core.HeaderValueOption request_headers_to_add = 6;
// If set, health checks will be made using http/2.
@@ -138,9 +154,6 @@ message HealthCheck {
// TCP health check.
TcpHealthCheck tcp_health_check = 9;
- // Redis health check.
- RedisHealthCheck redis_health_check = 10;
-
// gRPC health check.
GrpcHealthCheck grpc_health_check = 11;
@@ -148,6 +161,10 @@ message HealthCheck {
CustomHealthCheck custom_health_check = 13;
}
+ reserved 10; // redis_health_check is deprecated by :ref:`custom_health_check
+ // `
+ reserved "redis_health_check";
+
// The "no traffic interval" is a special health check interval that is used when a cluster has
// never had traffic routed to it. This lower interval allows cluster information to be kept up to
// date, without sending a potentially large amount of active health checking traffic for no
@@ -179,6 +196,10 @@ message HealthCheck {
//
// The default value for "healthy edge interval" is the same as the default interval.
google.protobuf.Duration healthy_edge_interval = 16;
+
+ // Specifies the path to the :ref:`health check event log `.
+ // If empty, no event log will be written.
+ string event_log_path = 17;
}
// Endpoint health status.
diff --git a/api/envoy/api/v2/discovery.proto b/api/envoy/api/v2/discovery.proto
index 74e7c5a2be965..f3ab1913d9146 100644
--- a/api/envoy/api/v2/discovery.proto
+++ b/api/envoy/api/v2/discovery.proto
@@ -93,3 +93,103 @@ message DiscoveryResponse {
// required for non-stream based xDS implementations.
string nonce = 5;
}
+
+// IncrementalDiscoveryRequest and IncrementalDiscoveryResponse are used in a
+// new gRPC endpoint for Incremental xDS. The feature is not supported for REST
+// management servers.
+//
+// With Incremental xDS, the IncrementalDiscoveryResponses do not need to
+// include a full snapshot of the tracked resources. Instead
+// IncrementalDiscoveryResponses are a diff to the state of a xDS client.
+// In Incremental XDS there are per resource versions which allows to track
+// state at the resource granularity.
+// An xDS Incremental session is always in the context of a gRPC bidirectional
+// stream. This allows the xDS server to keep track of the state of xDS clients
+// connected to it.
+//
+// In Incremental xDS the nonce field is required and used to pair
+// IncrementalDiscoveryResponse to a IncrementalDiscoveryRequest ACK or NACK.
+// Optionaly, a response message level system_version_info is present for
+// debugging purposes only.
+//
+// IncrementalDiscoveryRequest can be sent in 3 situations:
+// 1. Initial message in a xDS bidirectional gRPC stream.
+// 2. As a ACK or NACK response to a previous IncrementalDiscoveryResponse.
+// In this case the response_nonce is set to the nonce value in the Response.
+// ACK or NACK is determined by the absence or presence of error_detail.
+// 3. Spontaneous IncrementalDiscoveryRequest from the client.
+// This can be done to dynamically add or remove elements from the tracked
+// resource_names set. In this case response_nonce must be omitted.
+message IncrementalDiscoveryRequest {
+ // The node making the request.
+ core.Node node = 1;
+
+ // Type of the resource that is being requested, e.g.
+ // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit
+ // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is
+ // required for ADS.
+ string type_url = 2;
+
+ // IncrementalDiscoveryRequests allow the client to add or remove individual
+ // resources to the set of tracked resources in the context of a stream.
+ // All resource names in the resource_names_subscribe list are added to the
+ // set of tracked resources and all resource names in the resource_names_unsubscribe
+ // list are removed from the set of tracked resources.
+ // Unlike in non incremental xDS, an empty resource_names_subscribe or
+ // resource_names_unsubscribe list simply means that no resources are to be
+ // added or removed to the resource list.
+ // The xDS server must send updates for all tracked resources but can also
+ // send updates for resources the client has not subscribed to. This behavior
+ // is similar to non incremental xDS.
+ // These two fields can be set for all types of IncrementalDiscoveryRequests
+ // (initial, ACK/NACK or spontaneous).
+ //
+ // A list of Resource names to add to the list of tracked resources.
+ repeated string resource_names_subscribe = 3;
+
+ // A list of Resource names to remove from the list of tracked resources.
+ repeated string resource_names_unsubscribe = 4;
+
+ // This map must be populated when the IncrementalDiscoveryRequest is the
+ // first in a stream. The keys are the resources names of the xDS resources
+ // known to the xDS client. The values in the map are the associated resource
+ // level version info.
+ map initial_resource_versions = 5;
+
+ // When the IncrementalDiscoveryRequest is a ACK or NACK message in response
+ // to a previous IncrementalDiscoveryResponse, the response_nonce must be the
+ // nonce in the IncrementalDiscoveryResponse.
+ // Otherwise response_nonce must be omitted.
+ string response_nonce = 6;
+
+ // This is populated when the previous :ref:`DiscoveryResponse `
+ // failed to update configuration. The *message* field in *error_details*
+ // provides the Envoy internal exception related to the failure.
+ google.rpc.Status error_detail = 7;
+}
+
+message IncrementalDiscoveryResponse {
+ // The version of the response data (used for debugging).
+ string system_version_info = 1;
+
+ // The response resources. These are typed resources that match the type url
+ // in the IncrementalDiscoveryRequest.
+ repeated Resource resources = 2 [(gogoproto.nullable) = false];
+
+ // Resources names of resources that have be deleted and to be removed from the xDS Client.
+ // Removed resources for missing resources can be ignored.
+ repeated string removed_resources = 6;
+
+ // The nonce provides a way for IncrementalDiscoveryRequests to uniquely
+ // reference a IncrementalDiscoveryResponse. The nonce is required.
+ string nonce = 5;
+}
+
+message Resource {
+ // The resource level version. It allows xDS to track the state of individual
+ // resources.
+ string version = 1;
+
+ // The resource being tracked.
+ google.protobuf.Any resource = 2;
+}
diff --git a/api/envoy/api/v2/eds.proto b/api/envoy/api/v2/eds.proto
index 0c63fbaa58484..44a1fe0a97496 100644
--- a/api/envoy/api/v2/eds.proto
+++ b/api/envoy/api/v2/eds.proto
@@ -6,6 +6,7 @@ option java_generic_services = true;
import "envoy/api/v2/discovery.proto";
import "envoy/api/v2/endpoint/endpoint.proto";
+import "envoy/type/percent.proto";
import "google/api/annotations.proto";
@@ -50,12 +51,35 @@ message ClusterLoadAssignment {
// Load balancing policy settings.
message Policy {
- // Percentage of traffic (0-100) that should be dropped. This
- // action allows protection of upstream hosts should they unable to
- // recover from an outage or should they be unable to autoscale and hence
- // overall incoming traffic volume need to be trimmed to protect them.
- // [#v2-api-diff: This is known as maintenance mode in v1.]
- double drop_overload = 1 [(validate.rules).double = {gte: 0, lte: 100}];
+ reserved 1;
+
+ message DropOverload {
+ // Identifier for the policy specifying the drop.
+ string category = 1 [(validate.rules).string.min_bytes = 1];
+
+ // Percentage of traffic that should be dropped for the category.
+ envoy.type.FractionalPercent drop_percentage = 2;
+ }
+ // Action to trim the overall incoming traffic to protect the upstream
+ // hosts. This action allows protection in case the hosts are unable to
+ // recover from an outage, or unable to autoscale or unable to handle
+ // incoming traffic volume for any reason.
+ //
+ // At the client each category is applied one after the other to generate
+ // the 'actual' drop percentage on all outgoing traffic. For example:
+ //
+ // .. code-block:: json
+ //
+ // { "drop_overloads": [
+ // { "category": "throttle", "drop_percentage": 60 }
+ // { "category": "lb", "drop_percentage": 50 }
+ // ]}
+ //
+ // The actual drop percentages applied to the traffic at the clients will be
+ // "throttle"_drop = 60%
+ // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%.
+ // actual_outgoing_load = 20% // remaining after applying all categories.
+ repeated DropOverload drop_overloads = 2;
}
// Load balancing policy settings.
diff --git a/api/envoy/api/v2/endpoint/BUILD b/api/envoy/api/v2/endpoint/BUILD
index 14808743df530..87884fe33342b 100644
--- a/api/envoy/api/v2/endpoint/BUILD
+++ b/api/envoy/api/v2/endpoint/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "endpoint",
srcs = ["endpoint.proto"],
visibility = ["//envoy/api/v2:friends"],
@@ -29,7 +29,7 @@ api_go_proto_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "load_report",
srcs = ["load_report.proto"],
visibility = ["//envoy/api/v2:friends"],
diff --git a/api/envoy/api/v2/endpoint/endpoint.proto b/api/envoy/api/v2/endpoint/endpoint.proto
index 6f4cad1ce66e9..c1983f175440e 100644
--- a/api/envoy/api/v2/endpoint/endpoint.proto
+++ b/api/envoy/api/v2/endpoint/endpoint.proto
@@ -29,7 +29,7 @@ message Endpoint {
// and will be resolved via DNS.
core.Address address = 1;
- // [#not-implemented-hide:] The optional health check configuration.
+ // The optional health check configuration.
message HealthCheckConfig {
// Optional alternative health check port value.
//
@@ -37,11 +37,11 @@ message Endpoint {
// as the host's serving address port. This provides an alternative health
// check port. Setting this with a non-zero value allows an upstream host
// to have different health check address port.
- uint32 port_value = 1;
+ uint32 port_value = 1 [(validate.rules).uint32.lte = 65535];
}
- // [#not-implemented-hide:] The optional health check configuration is used as
- // configuration for the health checker to contact the health checked host.
+ // The optional health check configuration is used as configuration for the
+ // health checker to contact the health checked host.
//
// .. attention::
//
@@ -123,5 +123,5 @@ message LocalityLbEndpoints {
// next highest priority group.
//
// Priorities should range from 0 (highest) to N (lowest) without skipping.
- uint32 priority = 5;
+ uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}];
}
diff --git a/api/envoy/api/v2/endpoint/load_report.proto b/api/envoy/api/v2/endpoint/load_report.proto
index b61a0025a7a05..45ca3a168bbaf 100644
--- a/api/envoy/api/v2/endpoint/load_report.proto
+++ b/api/envoy/api/v2/endpoint/load_report.proto
@@ -4,6 +4,8 @@ package envoy.api.v2.endpoint;
import "envoy/api/v2/core/base.proto";
+import "google/protobuf/duration.proto";
+
import "validate/validate.proto";
import "gogoproto/gogo.proto";
@@ -93,4 +95,20 @@ message ClusterStats {
// The total number of dropped requests. This covers requests
// deliberately dropped by the drop_overload policy and circuit breaking.
uint64 total_dropped_requests = 3;
+
+ message DroppedRequests {
+ // Identifier for the policy specifying the drop.
+ string category = 1 [(validate.rules).string.min_bytes = 1];
+ // Total number of deliberately dropped requests for the category.
+ uint64 dropped_count = 2;
+ }
+ // Information about deliberately dropped requests for each category specified
+ // in the DropOverload policy.
+ repeated DroppedRequests dropped_requests = 5;
+
+ // Period over which the actual load report occurred. This will be guaranteed to include every
+ // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy
+ // and the *LoadStatsResponse* message sent from the management server, this may be longer than
+ // the requested load reporting interval in the *LoadStatsResponse*.
+ google.protobuf.Duration load_report_interval = 4;
}
diff --git a/api/envoy/api/v2/listener/BUILD b/api/envoy/api/v2/listener/BUILD
index e8f48a10b29f2..bfa6a1407107f 100644
--- a/api/envoy/api/v2/listener/BUILD
+++ b/api/envoy/api/v2/listener/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "listener",
srcs = ["listener.proto"],
visibility = ["//envoy/api/v2:friends"],
diff --git a/api/envoy/api/v2/listener/listener.proto b/api/envoy/api/v2/listener/listener.proto
index f9436c24d1c32..1e8015dbb2446 100644
--- a/api/envoy/api/v2/listener/listener.proto
+++ b/api/envoy/api/v2/listener/listener.proto
@@ -2,6 +2,7 @@ syntax = "proto3";
package envoy.api.v2.listener;
option go_package = "listener";
+option csharp_namespace = "Envoy.Api.V2.ListenerNS";
import "envoy/api/v2/core/address.proto";
import "envoy/api/v2/auth/cert.proto";
@@ -54,10 +55,11 @@ message Filter {
//
// The following order applies:
//
-// [#comment:TODO(PiotrSikora): destination IP / ranges are going to be 1.]
-// 1. Server name (e.g. SNI for TLS protocol),
-// 2. Transport protocol.
-// 3. Application protocols (e.g. ALPN for TLS protocol).
+// 1. Destination port.
+// 2. Destination IP address.
+// 3. Server name (e.g. SNI for TLS protocol),
+// 4. Transport protocol.
+// 5. Application protocols (e.g. ALPN for TLS protocol).
//
// For criterias that allow ranges or wildcards, the most specific value in any
// of the configured filter chains that matches the incoming connection is going
@@ -70,9 +72,12 @@ message Filter {
//
// [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules]
message FilterChainMatch {
+ // Optional destination port to consider when use_original_dst is set on the
+ // listener in determining a filter chain match.
+ google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {gte: 1, lte: 65535}];
+
// If non-empty, an IP address and prefix length to match addresses when the
// listener is bound to 0.0.0.0/:: or when use_original_dst is specified.
- // [#not-implemented-hide:]
repeated core.CidrRange prefix_ranges = 3;
// If non-empty, an IP address and suffix length to match addresses when the
@@ -96,11 +101,6 @@ message FilterChainMatch {
// [#not-implemented-hide:]
repeated google.protobuf.UInt32Value source_ports = 7;
- // Optional destination port to consider when use_original_dst is set on the
- // listener in determining a filter chain match.
- // [#not-implemented-hide:]
- google.protobuf.UInt32Value destination_port = 8;
-
// If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining
// a filter chain match. Those values will be compared against the server names of a new
// connection, when detected by one of the listener filters.
@@ -148,20 +148,8 @@ message FilterChainMatch {
// unless all connecting clients are known to use ALPN.
repeated string application_protocols = 10;
- // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining
- // a filter chain match. Those values will be compared against the server names of a new
- // connection, when detected by one of the listener filters.
- //
- // The server name will be matched against all wildcard domains, i.e. ``www.example.com``
- // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``.
- //
- // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid.
- //
- // .. attention::
- //
- // Deprecated. Use :ref:`server_names `
- // instead.
- repeated string sni_domains = 1 [deprecated = true];
+ reserved 1;
+ reserved "sni_domains";
}
// A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and
diff --git a/api/envoy/api/v2/ratelimit/BUILD b/api/envoy/api/v2/ratelimit/BUILD
index 0c6497e63a1fa..6e640b04986c6 100644
--- a/api/envoy/api/v2/ratelimit/BUILD
+++ b/api/envoy/api/v2/ratelimit/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "ratelimit",
srcs = ["ratelimit.proto"],
visibility = ["//envoy/api/v2:friends"],
diff --git a/api/envoy/api/v2/rds.proto b/api/envoy/api/v2/rds.proto
index e820852defc4a..00ac0145b301a 100644
--- a/api/envoy/api/v2/rds.proto
+++ b/api/envoy/api/v2/rds.proto
@@ -28,6 +28,10 @@ service RouteDiscoveryService {
rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) {
}
+ rpc IncrementalRoutes(stream IncrementalDiscoveryRequest)
+ returns (stream IncrementalDiscoveryResponse) {
+ }
+
rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) {
option (google.api.http) = {
post: "/v2/discovery:routes"
diff --git a/api/envoy/api/v2/route/BUILD b/api/envoy/api/v2/route/BUILD
index 09c6b2dd553e3..5bc60102532e4 100644
--- a/api/envoy/api/v2/route/BUILD
+++ b/api/envoy/api/v2/route/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "route",
srcs = ["route.proto"],
visibility = ["//envoy/api/v2:friends"],
diff --git a/api/envoy/api/v2/route/route.proto b/api/envoy/api/v2/route/route.proto
index 9d972f9d9af36..a497a1871f9e9 100644
--- a/api/envoy/api/v2/route/route.proto
+++ b/api/envoy/api/v2/route/route.proto
@@ -75,7 +75,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be added to each request
// handled by this virtual host. Headers specified at this level are applied
- // after headers from enclosed :ref:`envoy_api_msg_route.RouteAction` and before headers from the
+ // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the
// enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including
// details on header value syntax, see the documentation on :ref:`custom request headers
// `.
@@ -83,7 +83,7 @@ message VirtualHost {
// Specifies a list of HTTP headers that should be added to each response
// handled by this virtual host. Headers specified at this level are applied
- // after headers from enclosed :ref:`envoy_api_msg_route.RouteAction` and before headers from the
+ // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the
// enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including
// details on header value syntax, see the documentation on :ref:`custom request headers
// `.
@@ -148,6 +148,26 @@ message Route {
// specific; see the :ref:`HTTP filter documentation ` for
// if and how it is utilized.
map per_filter_config = 8;
+
+ // Specifies a set of headers that will be added to requests matching this
+ // route. Headers specified at this level are applied before headers from the
+ // enclosing :ref:`envoy_api_msg_route.VirtualHost` and
+ // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on
+ // header value syntax, see the documentation on :ref:`custom request headers
+ // `.
+ repeated core.HeaderValueOption request_headers_to_add = 9;
+
+ // Specifies a set of headers that will be added to responses to requests
+ // matching this route. Headers specified at this level are applied before
+ // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and
+ // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including
+ // details on header value syntax, see the documentation on
+ // :ref:`custom request headers `.
+ repeated core.HeaderValueOption response_headers_to_add = 10;
+
+ // Specifies a list of HTTP headers that should be removed from each response
+ // to requests matching this route.
+ repeated string response_headers_to_remove = 11;
}
// Compared to the :ref:`cluster ` field that specifies a
@@ -176,8 +196,7 @@ message WeightedCluster {
// Specifies a list of headers to be added to requests when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_route.RouteAction`.
// Headers specified at this level are applied before headers from the enclosing
- // :ref:`envoy_api_msg_route.RouteAction`,
- // :ref:`envoy_api_msg_route.VirtualHost`, and
+ // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and
// :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on
// header value syntax, see the documentation on :ref:`custom request headers
// `.
@@ -186,8 +205,7 @@ message WeightedCluster {
// Specifies a list of headers to be added to responses when this cluster is selected
// through the enclosing :ref:`envoy_api_msg_route.RouteAction`.
// Headers specified at this level are applied before headers from the enclosing
- // :ref:`envoy_api_msg_route.RouteAction`,
- // :ref:`envoy_api_msg_route.VirtualHost`, and
+ // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and
// :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on
// header value syntax, see the documentation on :ref:`custom request headers
// `.
@@ -282,10 +300,18 @@ message RouteMatch {
repeated QueryParameterMatcher query_parameters = 7;
}
+// [#comment:next free field: 9]
message CorsPolicy {
// Specifies the origins that will be allowed to do CORS requests.
+ //
+ // An origin is allowed if either allow_origin or allow_origin_regex match.
repeated string allow_origin = 1;
+ // Specifies regex patterns that match allowed origins.
+ //
+ // An origin is allowed if either allow_origin or allow_origin_regex match.
+ repeated string allow_origin_regex = 8;
+
// Specifies the content for the *access-control-allow-methods* header.
string allow_methods = 2;
@@ -305,7 +331,7 @@ message CorsPolicy {
google.protobuf.BoolValue enabled = 7;
}
-// [#comment:next free field: 24]
+// [#comment:next free field: 25]
message RouteAction {
oneof cluster_specifier {
option (validate.required) = true;
@@ -393,7 +419,9 @@ message RouteAction {
google.protobuf.BoolValue auto_host_rewrite = 7;
}
- // Specifies the timeout for the route. If not specified, the default is 15s.
+ // Specifies the upstream timeout for the route. If not specified, the default is 15s. This
+ // spans between the point at which the entire downstream request (i.e. end-of-stream) has been
+ // processed and when the upstream response has been completely processed.
//
// .. note::
//
@@ -415,8 +443,8 @@ message RouteAction {
// :ref:`config_http_filters_router_x-envoy-max-retries`.
google.protobuf.UInt32Value num_retries = 2;
- // Specifies a non-zero timeout per retry attempt. This parameter is optional.
- // The same conditions documented for
+ // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The
+ // same conditions documented for
// :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply.
//
// .. note::
@@ -429,6 +457,27 @@ message RouteAction {
google.protobuf.Duration per_try_timeout = 3 [(gogoproto.stdduration) = true];
}
+ // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout
+ // specified, although the connection manager wide :ref:`stream_idle_timeout
+ // `
+ // will still apply. A value of 0 will completely disable the route's idle timeout, even if a
+ // connection manager stream idle timeout is configured.
+ //
+ // The idle timeout is distinct to :ref:`timeout
+ // `, which provides an upper bound
+ // on the upstream response time; :ref:`idle_timeout
+ // ` instead bounds the amount
+ // of time the request's stream may be idle.
+ //
+ // After header decoding, the idle timeout will apply on downstream and
+ // upstream request events. Each time an encode/decode event for headers or
+ // data is processed for the stream, the timer will be reset. If the timeout
+ // fires, the stream is terminated with a 408 Request Timeout error code if no
+ // upstream response header has been received, otherwise a stream reset
+ // occurs.
+ google.protobuf.Duration idle_timeout = 24
+ [(validate.rules).duration.gt = {}, (gogoproto.stdduration) = true];
+
// Indicates that the route has a retry policy.
RetryPolicy retry_policy = 9;
@@ -461,25 +510,14 @@ message RouteAction {
// https://github.com/lyft/protoc-gen-validate/issues/42 is resolved.]
core.RoutingPriority priority = 11;
- // Specifies a set of headers that will be added to requests matching this
- // route. Headers specified at this level are applied before headers from the
- // enclosing :ref:`envoy_api_msg_route.VirtualHost` and
- // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on
- // header value syntax, see the documentation on :ref:`custom request headers
- // `.
- repeated core.HeaderValueOption request_headers_to_add = 12;
+ // [#not-implemented-hide:]
+ repeated core.HeaderValueOption request_headers_to_add = 12 [deprecated = true];
- // Specifies a set of headers that will be added to responses to requests
- // matching this route. Headers specified at this level are applied before
- // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and
- // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including
- // details on header value syntax, see the documentation on
- // :ref:`custom request headers `.
- repeated core.HeaderValueOption response_headers_to_add = 18;
+ // [#not-implemented-hide:]
+ repeated core.HeaderValueOption response_headers_to_add = 18 [deprecated = true];
- // Specifies a list of HTTP headers that should be removed from each response
- // to requests matching this route.
- repeated string response_headers_to_remove = 19;
+ // [#not-implemented-hide:]
+ repeated string response_headers_to_remove = 19 [deprecated = true];
// Specifies a set of rate limit configurations that could be applied to the
// route.
@@ -573,7 +611,7 @@ message RouteAction {
// proxy data from the client to the upstream server.
//
// Redirects are not supported on routes where WebSocket upgrades are allowed.
- google.protobuf.BoolValue use_websocket = 16;
+ google.protobuf.BoolValue use_websocket = 16 [deprecated = true];
message WebSocketProxyConfig {
// See :ref:`stat_prefix
@@ -603,7 +641,7 @@ message RouteAction {
// Proxy configuration used for WebSocket connections. If unset, the default values as specified
// in :ref:`TcpProxy ` are used.
- WebSocketProxyConfig websocket_config = 22;
+ WebSocketProxyConfig websocket_config = 22 [deprecated = true];
// Indicates that the route has a CORS policy.
CorsPolicy cors = 17;
@@ -680,8 +718,9 @@ message DirectResponseAction {
//
// .. note::
//
- // Headers can be specified using *response_headers_to_add* in
- // :ref:`envoy_api_msg_RouteConfiguration`.
+ // Headers can be specified using *response_headers_to_add* in the enclosing
+ // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or
+ // :ref:`envoy_api_msg_route.VirtualHost`.
core.DataSource body = 2;
}
@@ -888,37 +927,24 @@ message RateLimit {
// "name": ":method",
// "value": "POST"
// }
+//
+// .. attention::
+// In the absence of any header match specifier, match will default to :ref:`present_match
+// `. i.e, a request that has the :ref:`name
+// ` header will match, regardless of the header's
+// value.
+//
message HeaderMatcher {
// Specifies the name of the header in the request.
string name = 1 [(validate.rules).string.min_bytes = 1];
- // Specifies the value of the header. If the value is absent a request that
- // has the name header will match, regardless of the header’s value.
- //
- // .. attention::
- // Deprecated. Use :ref:`exact_match ` instead.
- string value = 2 [deprecated = true];
-
- // Specifies whether the header value is a regular
- // expression or not. Defaults to false. The entire request header value must match the regex. The
- // rule will not match if only a subsequence of the request header value matches the regex. The
- // regex grammar used in the value field is defined
- // `here `_.
- //
- // Examples:
- //
- // * The regex *\d{3}* matches the value *123*
- // * The regex *\d{3}* does not match the value *1234*
- // * The regex *\d{3}* does not match the value *123.456*
- //
- // .. attention::
- // Deprecated. Use :ref:`regex_match ` instead.
- google.protobuf.BoolValue regex = 3 [deprecated = true];
+ reserved 2; // value deprecated by :ref:`exact_match
+ // `
+
+ reserved 3; // regex deprecated by :ref:`regex_match
+ // `
// Specifies how the header match will be performed to route the request.
- // If header_match_specifier is absent, a request that has the
- // :ref:`envoy_api_msg_route.HeaderMatcher.name` header will match, regardless of the header's
- // value.
oneof header_match_specifier {
// If specified, header match will be performed based on the value of the header.
string exact_match = 4;
diff --git a/api/envoy/config/accesslog/v2/BUILD b/api/envoy/config/accesslog/v2/BUILD
index 63bdc5c5283c6..63ef7b0ae8057 100644
--- a/api/envoy/config/accesslog/v2/BUILD
+++ b/api/envoy/config/accesslog/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "als",
srcs = ["als.proto"],
deps = [
@@ -10,7 +10,7 @@ api_proto_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "file",
srcs = ["file.proto"],
)
diff --git a/api/envoy/config/bootstrap/v2/BUILD b/api/envoy/config/bootstrap/v2/BUILD
index 9b97ffab07d1b..4024b11a13c56 100644
--- a/api/envoy/config/bootstrap/v2/BUILD
+++ b/api/envoy/config/bootstrap/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "bootstrap",
srcs = ["bootstrap.proto"],
visibility = ["//visibility:public"],
diff --git a/api/envoy/config/filter/accesslog/v2/BUILD b/api/envoy/config/filter/accesslog/v2/BUILD
index fbab9f76ba4a2..3eedcf397000e 100644
--- a/api/envoy/config/filter/accesslog/v2/BUILD
+++ b/api/envoy/config/filter/accesslog/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "accesslog",
srcs = ["accesslog.proto"],
visibility = [
diff --git a/api/envoy/config/filter/accesslog/v2/accesslog.proto b/api/envoy/config/filter/accesslog/v2/accesslog.proto
index af78123e36091..6642560694eaf 100644
--- a/api/envoy/config/filter/accesslog/v2/accesslog.proto
+++ b/api/envoy/config/filter/accesslog/v2/accesslog.proto
@@ -61,6 +61,9 @@ message AccessLogFilter {
// Header filter.
HeaderFilter header_filter = 8;
+
+ // Response flag filter.
+ ResponseFlagFilter response_flag_filter = 9;
}
}
@@ -150,3 +153,15 @@ message HeaderFilter {
// check.
envoy.api.v2.route.HeaderMatcher header = 1 [(validate.rules).message.required = true];
}
+
+// Filters requests that received responses with an Envoy response flag set.
+// A list of the response flags can be found
+// in the access log formatter :ref:`documentation`.
+message ResponseFlagFilter {
+ // Only responses with the any of the flags listed in this field will be logged.
+ // This field is optional. If it is not specified, then any response flag will pass
+ // the filter check.
+ repeated string flags = 1 [(validate.rules).repeated .items.string = {
+ in: ["LH", "UH", "UT", "LR", "UR", "UF", "UC", "UO", "NR", "DI", "FI", "RL", "UAEX"]
+ }];
+}
diff --git a/api/envoy/config/filter/fault/v2/BUILD b/api/envoy/config/filter/fault/v2/BUILD
index 0b4310f48e36e..9fba2fbed3e17 100644
--- a/api/envoy/config/filter/fault/v2/BUILD
+++ b/api/envoy/config/filter/fault/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "fault",
srcs = ["fault.proto"],
visibility = [
diff --git a/api/envoy/config/filter/http/buffer/v2/BUILD b/api/envoy/config/filter/http/buffer/v2/BUILD
index d2be36c572c4d..0460c2d43e3ef 100644
--- a/api/envoy/config/filter/http/buffer/v2/BUILD
+++ b/api/envoy/config/filter/http/buffer/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "buffer",
srcs = ["buffer.proto"],
)
diff --git a/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD b/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD
index 62e7fc3d64641..8ab214517f914 100644
--- a/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD
+++ b/api/envoy/config/filter/http/ext_authz/v2alpha/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "ext_authz",
srcs = ["ext_authz.proto"],
deps = [
diff --git a/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto b/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto
index 9d602298ce170..c85a799cddcae 100644
--- a/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto
+++ b/api/envoy/config/filter/http/ext_authz/v2alpha/ext_authz.proto
@@ -6,24 +6,18 @@ option go_package = "v2alpha";
import "envoy/api/v2/core/grpc_service.proto";
import "envoy/api/v2/core/http_uri.proto";
-// [#protodoc-title: HTTP External Authorization ]
-// The external authorization HTTP service configuration
+// [#protodoc-title: External Authorization ]
+// The external authorization service configuration
// :ref:`configuration overview `.
-// [#not-implemented-hide:]
-// [#comment: The HttpService is under development and will be supported soon.]
-message HttpService {
- // Sets the HTTP server URI which the authorization requests must be sent to.
- envoy.api.v2.core.HttpUri server_uri = 1;
-
- // Sets an optional prefix to the value of authorization request header `path`.
- string path_prefix = 2;
-}
-
-// External Authorization filter calls out to an external service over the
-// gRPC Authorization API defined by
-// :ref:`CheckRequest `.
-// A failed check will cause this filter to close the HTTP request with 403(Forbidden).
+// External Authorization filter calls out to an external service over either:
+//
+// 1. gRPC Authorization API defined by :ref:`CheckRequest
+// `.
+// 2. Raw HTTP Authorization server by passing the request headers to the service.
+//
+// A failed check will cause this filter to close the HTTP request normally with 403 (Forbidden),
+// unless a different status code has been indicated in the authorization response.
message ExtAuthz {
oneof services {
@@ -32,7 +26,7 @@ message ExtAuthz {
envoy.api.v2.core.GrpcService grpc_service = 1;
// The external authorization HTTP service configuration.
- // [#not-implemented-hide:]
+ // The default timeout is set to 200ms by this filter.
HttpService http_service = 3;
}
@@ -42,3 +36,30 @@ message ExtAuthz {
// Defaults to false.
bool failure_mode_allow = 2;
}
+
+// External Authorization filter calls out to an upstream authorization server by passing the raw
+// HTTP request headers to the server. This allows the authorization service to take a decision
+// whether the request is authorized or not.
+//
+// A successful check allows the authorization service adding or overriding headers from the
+// original request before dispatching it to the upstream. This is done by including the headers in
+// the response sent back from the authorization service to the filter. Note that `Status`,
+// `Method`, `Path` and `Content Length` response headers are automatically removed from this
+// response by the filter. If other headers need be deleted, they should be specified in
+// `response_headers_to_remove` field.
+//
+// A failed check will cause this filter to close the HTTP request normally with 403 (Forbidden),
+// unless a different status code has been indicated by the authorization service via response
+// headers. The HTTP service also allows the authorization filter to also pass data from the
+// response body to the downstream client in case of a denied request.
+message HttpService {
+ // Sets the HTTP server URI which the authorization requests must be sent to.
+ envoy.api.v2.core.HttpUri server_uri = 1;
+
+ // Sets an optional prefix to the value of authorization request header `path`.
+ string path_prefix = 2;
+
+ // Sets a list of headers that should be not be sent *from the authorization server* to the
+ // upstream.
+ repeated string response_headers_to_remove = 3;
+}
diff --git a/api/envoy/config/filter/http/fault/v2/BUILD b/api/envoy/config/filter/http/fault/v2/BUILD
index 0c517c3e666db..7b414c48af121 100644
--- a/api/envoy/config/filter/http/fault/v2/BUILD
+++ b/api/envoy/config/filter/http/fault/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "fault",
srcs = ["fault.proto"],
deps = [
diff --git a/api/envoy/config/filter/http/gzip/v2/BUILD b/api/envoy/config/filter/http/gzip/v2/BUILD
index e1b592f4aee79..79c1076d7c77e 100644
--- a/api/envoy/config/filter/http/gzip/v2/BUILD
+++ b/api/envoy/config/filter/http/gzip/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "gzip",
srcs = ["gzip.proto"],
)
diff --git a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD
index 102dd076346dc..67b45090a654f 100644
--- a/api/envoy/config/filter/http/header_to_metadata/v2/BUILD
+++ b/api/envoy/config/filter/http/header_to_metadata/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "header_to_metadata",
srcs = ["header_to_metadata.proto"],
deps = [],
diff --git a/api/envoy/config/filter/http/health_check/v2/BUILD b/api/envoy/config/filter/http/health_check/v2/BUILD
index 1616f046e1fa6..37152bde6f020 100644
--- a/api/envoy/config/filter/http/health_check/v2/BUILD
+++ b/api/envoy/config/filter/http/health_check/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "health_check",
srcs = ["health_check.proto"],
deps = [
diff --git a/api/envoy/config/filter/http/health_check/v2/health_check.proto b/api/envoy/config/filter/http/health_check/v2/health_check.proto
index 88106e93136cd..0f584b451f68a 100644
--- a/api/envoy/config/filter/http/health_check/v2/health_check.proto
+++ b/api/envoy/config/filter/http/health_check/v2/health_check.proto
@@ -19,11 +19,8 @@ message HealthCheck {
// Specifies whether the filter operates in pass through mode or not.
google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message.required = true];
- // Specifies the incoming HTTP endpoint that should be considered the
- // health check endpoint. For example */healthcheck*.
- // Note that this field is deprecated in favor of
- // :ref:`headers `.
- string endpoint = 2 [deprecated = true];
+ reserved 2;
+ reserved "endpoint";
// If operating in pass through mode, the amount of time in milliseconds
// that the filter should cache the upstream response.
@@ -36,8 +33,6 @@ message HealthCheck {
// Specifies a set of health check request headers to match on. The health check filter will
// check a request’s headers against all the specified headers. To specify the health check
- // endpoint, set the ``:path`` header to match on. Note that if the
- // :ref:`endpoint `
- // field is set, it will overwrite any ``:path`` header to match.
+ // endpoint, set the ``:path`` header to match on.
repeated envoy.api.v2.route.HeaderMatcher headers = 5;
}
diff --git a/api/envoy/config/filter/http/ip_tagging/v2/BUILD b/api/envoy/config/filter/http/ip_tagging/v2/BUILD
index 147693b86c088..8a6c0ee5be259 100644
--- a/api/envoy/config/filter/http/ip_tagging/v2/BUILD
+++ b/api/envoy/config/filter/http/ip_tagging/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "ip_tagging",
srcs = ["ip_tagging.proto"],
deps = ["//envoy/api/v2/core:address"],
diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD
index cc07bd29bddaa..90863e3f5bed2 100644
--- a/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD
+++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD
@@ -1,8 +1,8 @@
licenses(["notice"]) # Apache 2
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
-api_proto_library(
+api_proto_library_internal(
name = "jwt_authn",
srcs = ["config.proto"],
deps = [
diff --git a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
index d58e960c37f68..1350070f6806b 100644
--- a/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
+++ b/api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto
@@ -22,13 +22,13 @@ import "validate/validate.proto";
// issuer: https://example.com
// audiences:
// - bookstore_android.apps.googleusercontent.com
-// bookstore_web.apps.googleusercontent.com
+// - bookstore_web.apps.googleusercontent.com
// remote_jwks:
-// - http_uri:
-// - uri: https://example.com/.well-known/jwks.json
+// http_uri:
+// uri: https://example.com/.well-known/jwks.json
// cluster: example_jwks_cluster
// cache_duration:
-// - seconds: 300
+// seconds: 300
//
// [#not-implemented-hide:]
message JwtProvider {
@@ -50,7 +50,7 @@ message JwtProvider {
//
// audiences:
// - bookstore_android.apps.googleusercontent.com
- // bookstore_web.apps.googleusercontent.com
+ // - bookstore_web.apps.googleusercontent.com
//
repeated string audiences = 2;
@@ -67,11 +67,11 @@ message JwtProvider {
// .. code-block:: yaml
//
// remote_jwks:
- // - http_uri:
- // - uri: https://www.googleapis.com/oauth2/v1/certs
+ // http_uri:
+ // uri: https://www.googleapis.com/oauth2/v1/certs
// cluster: jwt.www.googleapis.com|443
// cache_duration:
- // - seconds: 300
+ // seconds: 300
//
RemoteJwks remote_jwks = 3;
@@ -83,14 +83,14 @@ message JwtProvider {
// .. code-block:: yaml
//
// local_jwks:
- // - filename: /etc/envoy/jwks/jwks1.txt
+ // filename: /etc/envoy/jwks/jwks1.txt
//
// Example: inline_string
//
// .. code-block:: yaml
//
// local_jwks:
- // - inline_string: "ACADADADADA"
+ // inline_string: "ACADADADADA"
//
envoy.api.v2.core.DataSource local_jwks = 4;
}
@@ -163,7 +163,7 @@ message RemoteJwks {
// .. code-block:: yaml
//
// http_uri:
- // - uri: https://www.googleapis.com/oauth2/v1/certs
+ // uri: https://www.googleapis.com/oauth2/v1/certs
// cluster: jwt.www.googleapis.com|443
//
envoy.api.v2.core.HttpUri http_uri = 1;
diff --git a/api/envoy/config/filter/http/lua/v2/BUILD b/api/envoy/config/filter/http/lua/v2/BUILD
index ce571d9720db6..d399bc5b066be 100644
--- a/api/envoy/config/filter/http/lua/v2/BUILD
+++ b/api/envoy/config/filter/http/lua/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "lua",
srcs = ["lua.proto"],
)
diff --git a/api/envoy/config/filter/http/rate_limit/v2/BUILD b/api/envoy/config/filter/http/rate_limit/v2/BUILD
index 484e19c40d322..3b90a57c80ae3 100644
--- a/api/envoy/config/filter/http/rate_limit/v2/BUILD
+++ b/api/envoy/config/filter/http/rate_limit/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "rate_limit",
srcs = ["rate_limit.proto"],
)
diff --git a/api/envoy/config/filter/http/rbac/v2/BUILD b/api/envoy/config/filter/http/rbac/v2/BUILD
index e96a01d560f74..d325e3bcde2d7 100644
--- a/api/envoy/config/filter/http/rbac/v2/BUILD
+++ b/api/envoy/config/filter/http/rbac/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "rbac",
srcs = ["rbac.proto"],
deps = ["//envoy/config/rbac/v2alpha:rbac"],
diff --git a/api/envoy/config/filter/http/router/v2/BUILD b/api/envoy/config/filter/http/router/v2/BUILD
index 00392ac7f98a5..38697ac806806 100644
--- a/api/envoy/config/filter/http/router/v2/BUILD
+++ b/api/envoy/config/filter/http/router/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "router",
srcs = ["router.proto"],
deps = ["//envoy/config/filter/accesslog/v2:accesslog"],
diff --git a/api/envoy/config/filter/http/squash/v2/BUILD b/api/envoy/config/filter/http/squash/v2/BUILD
index ea5e9c6c4c158..8cf2c80dde1e7 100644
--- a/api/envoy/config/filter/http/squash/v2/BUILD
+++ b/api/envoy/config/filter/http/squash/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "squash",
srcs = ["squash.proto"],
)
diff --git a/api/envoy/config/filter/http/transcoder/v2/BUILD b/api/envoy/config/filter/http/transcoder/v2/BUILD
index 087f8ce8cefb3..eddef5a7ebd03 100644
--- a/api/envoy/config/filter/http/transcoder/v2/BUILD
+++ b/api/envoy/config/filter/http/transcoder/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "transcoder",
srcs = ["transcoder.proto"],
)
diff --git a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD
index d382848c92393..a6d31d6396111 100644
--- a/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD
+++ b/api/envoy/config/filter/network/client_ssl_auth/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "client_ssl_auth",
srcs = ["client_ssl_auth.proto"],
deps = ["//envoy/api/v2/core:address"],
diff --git a/api/envoy/config/filter/network/ext_authz/v2/BUILD b/api/envoy/config/filter/network/ext_authz/v2/BUILD
index 22dc891526f98..4d716dee9744a 100644
--- a/api/envoy/config/filter/network/ext_authz/v2/BUILD
+++ b/api/envoy/config/filter/network/ext_authz/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "ext_authz",
srcs = ["ext_authz.proto"],
deps = ["//envoy/api/v2/core:grpc_service"],
diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
index da2f4ddabc10e..c89ea09ad2909 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
+++ b/api/envoy/config/filter/network/http_connection_manager/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "http_connection_manager",
srcs = ["http_connection_manager.proto"],
deps = [
diff --git a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
index 5087298e8c6fa..1e6999cf2077a 100644
--- a/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
+++ b/api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto
@@ -19,7 +19,7 @@ import "gogoproto/gogo.proto";
// [#protodoc-title: HTTP connection manager]
// HTTP connection manager :ref:`configuration overview `.
-// [#comment:next free field: 24]
+// [#comment:next free field: 25]
message HttpConnectionManager {
enum CodecType {
option (gogoproto.goproto_enum_prefix) = false;
@@ -137,6 +137,33 @@ message HttpConnectionManager {
// `.
google.protobuf.Duration idle_timeout = 11 [(gogoproto.stdduration) = true];
+ // The stream idle timeout for connections managed by the connection manager.
+ // If not specified, this defaults to 5 minutes. The default value was selected
+ // so as not to interfere with any smaller configured timeouts that may have
+ // existed in configurations prior to the introduction of this feature, while
+ // introducing robustness to TCP connections that terminate without a FIN.
+ //
+ // This idle timeout applies to new streams and is overridable by the
+ // :ref:`route-level idle_timeout
+ // `. Even on a stream in
+ // which the override applies, prior to receipt of the initial request
+ // headers, the :ref:`stream_idle_timeout
+ // `
+ // applies. Each time an encode/decode event for headers or data is processed
+ // for the stream, the timer will be reset. If the timeout fires, the stream
+ // is terminated with a 408 Request Timeout error code if no upstream response
+ // header has been received, otherwise a stream reset occurs.
+ //
+ // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due
+ // to the granularity of events presented to the connection manager. For example, while receiving
+ // very large request headers, it may be the case that there is traffic regularly arriving on the
+ // wire while the connection manage is only able to observe the end-of-headers event, hence the
+ // stream may still idle timeout.
+ //
+ // A value of 0 will completely disable the connection manager stream idle
+ // timeout, although per-route idle timeout overrides will continue to apply.
+ google.protobuf.Duration stream_idle_timeout = 24 [(gogoproto.stdduration) = true];
+
// The time that Envoy will wait between sending an HTTP/2 “shutdown
// notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame.
// This is used so that Envoy provides a grace period for new streams that
@@ -221,9 +248,7 @@ message HttpConnectionManager {
// Whether to forward the subject of the client cert. Defaults to false.
google.protobuf.BoolValue subject = 1;
- // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to
- // false. This field is deprecated, use URI field instead.
- google.protobuf.BoolValue san = 2 [deprecated = true];
+ reserved 2; // san deprecated by uri
// Whether to forward the entire client cert in URL encoded PEM format. This will appear in the
// XFCC header comma separated from other values with the value Cert="PEM".
@@ -268,7 +293,6 @@ message HttpConnectionManager {
// control.
bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20;
- // [#not-implemented-hide:]
// The configuration for HTTP upgrades.
// For each upgrade type desired, an UpgradeConfig must be added.
//
@@ -277,6 +301,10 @@ message HttpConnectionManager {
// The current implementation of upgrade headers does not handle
// multi-valued upgrade headers. Support for multi-valued headers may be
// added in the future if needed.
+ //
+ // .. warning::
+ // The current implementation of upgrade headers does not work with HTTP/2
+ // upstreams.
message UpgradeConfig {
// The case-insensitive name of this upgrade, e.g. "websocket".
// For each upgrade type present in upgrade_configs, requests with
@@ -288,7 +316,6 @@ message HttpConnectionManager {
// HTTP connections will be used for this upgrade type.
repeated HttpFilter filters = 2;
};
- // [#not-implemented-hide:]
repeated UpgradeConfig upgrade_configs = 23;
}
diff --git a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD
index 03bc303476752..69b0f85e156d0 100644
--- a/api/envoy/config/filter/network/mongo_proxy/v2/BUILD
+++ b/api/envoy/config/filter/network/mongo_proxy/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "mongo_proxy",
srcs = ["mongo_proxy.proto"],
deps = ["//envoy/config/filter/fault/v2:fault"],
diff --git a/api/envoy/config/filter/network/rate_limit/v2/BUILD b/api/envoy/config/filter/network/rate_limit/v2/BUILD
index b1936e3bb2c2f..2cda26cfde99e 100644
--- a/api/envoy/config/filter/network/rate_limit/v2/BUILD
+++ b/api/envoy/config/filter/network/rate_limit/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "rate_limit",
srcs = ["rate_limit.proto"],
deps = ["//envoy/api/v2/ratelimit"],
diff --git a/api/envoy/config/filter/network/redis_proxy/v2/BUILD b/api/envoy/config/filter/network/redis_proxy/v2/BUILD
index 78f269301fe05..c35e219b44659 100644
--- a/api/envoy/config/filter/network/redis_proxy/v2/BUILD
+++ b/api/envoy/config/filter/network/redis_proxy/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "redis_proxy",
srcs = ["redis_proxy.proto"],
)
diff --git a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD
index 2e7296fa3f969..7cb467d6fb10d 100644
--- a/api/envoy/config/filter/network/tcp_proxy/v2/BUILD
+++ b/api/envoy/config/filter/network/tcp_proxy/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "tcp_proxy",
srcs = ["tcp_proxy.proto"],
deps = [
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD
new file mode 100644
index 0000000000000..da39334babd17
--- /dev/null
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD
@@ -0,0 +1,11 @@
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library_internal(
+ name = "thrift_proxy",
+ srcs = [
+ "route.proto",
+ "thrift_proxy.proto",
+ ],
+)
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/README.md b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md
similarity index 100%
rename from api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/README.md
rename to api/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto
new file mode 100644
index 0000000000000..f70523e57f212
--- /dev/null
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto
@@ -0,0 +1,41 @@
+syntax = "proto3";
+
+package envoy.config.filter.network.thrift_proxy.v2alpha1;
+option go_package = "v2";
+
+import "validate/validate.proto";
+import "gogoproto/gogo.proto";
+
+// [#protodoc-title: Thrift route configuration]
+
+// [#comment:next free field: 3]
+message RouteConfiguration {
+ // The name of the route configuration. Reserved for future use in asynchronous route discovery.
+ string name = 1;
+
+ // The list of routes that will be matched, in order, against incoming requests. The first route
+ // that matches will be used.
+ repeated Route routes = 2 [(gogoproto.nullable) = false];
+}
+
+// [#comment:next free field: 3]
+message Route {
+ // Route matching prarameters.
+ RouteMatch match = 1 [(validate.rules).message.required = true, (gogoproto.nullable) = false];
+
+ // Route request to some upstream cluster.
+ RouteAction route = 2 [(validate.rules).message.required = true, (gogoproto.nullable) = false];
+}
+
+// [#comment:next free field: 2]
+message RouteMatch {
+ // If specified, the route must exactly match the request method name. As a special case, an
+ // empty string matches any request method name.
+ string method = 1;
+}
+
+// [#comment:next free field: 2]
+message RouteAction {
+ // Indicates the upstream cluster to which the request should be routed.
+ string cluster = 1 [(validate.rules).string.min_bytes = 1];
+}
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/BUILD b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/BUILD
new file mode 100644
index 0000000000000..ce0ad0e254f03
--- /dev/null
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/BUILD
@@ -0,0 +1,8 @@
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library_internal(
+ name = "router",
+ srcs = ["router.proto"],
+)
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/router.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/router.proto
new file mode 100644
index 0000000000000..5ad9863b07dec
--- /dev/null
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/router/router.proto
@@ -0,0 +1,9 @@
+syntax = "proto3";
+
+package envoy.config.filter.network.thrift_proxy.v2alpha1.router;
+option go_package = "router";
+
+// [#protodoc-title: Thrift Router]
+// Thrift Router configuration.
+message Router {
+}
diff --git a/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto
new file mode 100644
index 0000000000000..1a7176dc33031
--- /dev/null
+++ b/api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto
@@ -0,0 +1,57 @@
+syntax = "proto3";
+
+package envoy.config.filter.network.thrift_proxy.v2alpha1;
+option go_package = "v2";
+
+import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto";
+
+import "validate/validate.proto";
+import "gogoproto/gogo.proto";
+
+// [#protodoc-title: Extensions Thrift Proxy]
+// Thrift Proxy filter configuration.
+// [#comment:next free field: 5]
+message ThriftProxy {
+ enum TransportType {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // For every new connection, the Thrift proxy will determine which transport to use.
+ AUTO_TRANSPORT = 0;
+
+ // The Thrift proxy will assume the client is using the Thrift framed transport.
+ FRAMED = 1;
+
+ // The Thrift proxy will assume the client is using the Thrift unframed transport.
+ UNFRAMED = 2;
+ }
+
+ // Supplies the type of transport that the Thrift proxy should use. Defaults to `AUTO_TRANSPORT`.
+ TransportType transport = 2 [(validate.rules).enum.defined_only = true];
+
+ enum ProtocolType {
+ option (gogoproto.goproto_enum_prefix) = false;
+
+ // For every new connection, the Thrift proxy will determine which protocol to use.
+ // N.B. The older, non-strict binary protocol is not included in automatic protocol
+ // detection.
+ AUTO_PROTOCOL = 0;
+
+ // The Thrift proxy will assume the client is using the Thrift binary protocol.
+ BINARY = 1;
+
+ // The Thrift proxy will assume the client is using the Thrift non-strict binary protocol.
+ LAX_BINARY = 2;
+
+ // The Thrift proxy will assume the client is using the Thrift compact protocol.
+ COMPACT = 3;
+ }
+
+ // Supplies the type of protocol that the Thrift proxy should use. Defaults to `AUTO_PROTOCOL`.
+ ProtocolType protocol = 3 [(validate.rules).enum.defined_only = true];
+
+ // The human readable prefix to use when emitting statistics.
+ string stat_prefix = 1 [(validate.rules).string.min_bytes = 1];
+
+ // The route table for the connection manager is static and is specified in this property.
+ RouteConfiguration route_config = 4;
+}
diff --git a/api/envoy/config/grpc_credential/v2alpha/BUILD b/api/envoy/config/grpc_credential/v2alpha/BUILD
index 09f3e691f63a6..ca0a71eaef6cc 100644
--- a/api/envoy/config/grpc_credential/v2alpha/BUILD
+++ b/api/envoy/config/grpc_credential/v2alpha/BUILD
@@ -1,8 +1,8 @@
licenses(["notice"]) # Apache 2
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
-api_proto_library(
+api_proto_library_internal(
name = "file_based_metadata",
srcs = ["file_based_metadata.proto"],
deps = ["//envoy/api/v2/core:base"],
diff --git a/api/envoy/config/health_checker/redis/v2/BUILD b/api/envoy/config/health_checker/redis/v2/BUILD
index 7d217c54dda8c..b784e8d150621 100644
--- a/api/envoy/config/health_checker/redis/v2/BUILD
+++ b/api/envoy/config/health_checker/redis/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "redis",
srcs = ["redis.proto"],
)
diff --git a/api/envoy/config/metrics/v2/BUILD b/api/envoy/config/metrics/v2/BUILD
index 1c682a133a420..9d061aeb918e6 100644
--- a/api/envoy/config/metrics/v2/BUILD
+++ b/api/envoy/config/metrics/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "metrics_service",
srcs = ["metrics_service.proto"],
visibility = [
@@ -21,7 +21,7 @@ api_go_proto_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "stats",
srcs = ["stats.proto"],
visibility = [
diff --git a/api/envoy/config/metrics/v2/stats.proto b/api/envoy/config/metrics/v2/stats.proto
index 8d128611cedde..121f59a9f85a2 100644
--- a/api/envoy/config/metrics/v2/stats.proto
+++ b/api/envoy/config/metrics/v2/stats.proto
@@ -21,6 +21,7 @@ message StatsSink {
// * :ref:`envoy.statsd `
// * :ref:`envoy.dog_statsd `
// * :ref:`envoy.metrics_service `
+ // * :ref:`envoy.stat_sinks.hystrix `
//
// Sinks optionally support tagged/multiple dimensional metrics.
string name = 1;
@@ -200,3 +201,27 @@ message DogStatsdSink {
reserved 2;
}
+
+// Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink.
+// The sink emits stats in `text/event-stream
+// `_
+// formatted stream for use by `Hystrix dashboard
+// `_.
+//
+// Note that only a single HystrixSink should be configured.
+//
+// Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`.
+message HystrixSink {
+ // The number of buckets the rolling statistical window is divided into.
+ //
+ // Each time the sink is flushed, all relevant Envoy statistics are sampled and
+ // added to the rolling window (removing the oldest samples in the window
+ // in the process). The sink then outputs the aggregate statistics across the
+ // current rolling window to the event stream(s).
+ //
+ // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets
+ //
+ // More detailed explanation can be found in `Hystix wiki
+ // `_.
+ int64 num_buckets = 1;
+}
diff --git a/api/envoy/config/overload/v2alpha/BUILD b/api/envoy/config/overload/v2alpha/BUILD
new file mode 100644
index 0000000000000..ef06407fb9ea0
--- /dev/null
+++ b/api/envoy/config/overload/v2alpha/BUILD
@@ -0,0 +1,8 @@
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library_internal(
+ name = "overload",
+ srcs = ["overload.proto"],
+)
diff --git a/api/envoy/config/overload/v2alpha/overload.proto b/api/envoy/config/overload/v2alpha/overload.proto
new file mode 100644
index 0000000000000..6b70d11d3243f
--- /dev/null
+++ b/api/envoy/config/overload/v2alpha/overload.proto
@@ -0,0 +1,63 @@
+syntax = "proto3";
+
+package envoy.config.overload.v2alpha;
+option go_package = "v2alpha";
+
+import "google/protobuf/duration.proto";
+import "google/protobuf/struct.proto";
+
+import "validate/validate.proto";
+
+// The Overload Manager provides an extensible framework to protect Envoy instances
+// from overload of various resources (memory, cpu, file descriptors, etc)
+
+message EmptyConfig {
+}
+
+message ResourceMonitor {
+ // The name of the resource monitor to instantiate. Must match a registered
+ // resource monitor type.
+ string name = 1 [(validate.rules).string.min_bytes = 1];
+
+ // Configuration for the resource monitor being instantiated.
+ google.protobuf.Struct config = 2;
+}
+
+message ThresholdTrigger {
+ // If the resource pressure is greater than or equal to this value, the trigger
+ // will fire.
+ double value = 1 [(validate.rules).double = {gte: 0, lte: 1}];
+}
+
+message Trigger {
+ // The name of the resource this is a trigger for.
+ string name = 1 [(validate.rules).string.min_bytes = 1];
+
+ oneof trigger_oneof {
+ option (validate.required) = true;
+ ThresholdTrigger threshold = 2;
+ }
+}
+
+message OverloadAction {
+ // The name of the overload action. This is just a well-known string that listeners can
+ // use for registering callbacks. Custom overload actions should be named using reverse
+ // DNS to ensure uniqueness.
+ string name = 1 [(validate.rules).string.min_bytes = 1];
+
+ // A set of triggers for this action. If any of these triggers fires the overload action
+ // is activated. Listeners are notified when the overload action transitions from
+ // inactivated to activated, or vice versa.
+ repeated Trigger triggers = 2 [(validate.rules).repeated .min_items = 1];
+}
+
+message OverloadManager {
+ // The interval for refreshing resource usage.
+ google.protobuf.Duration refresh_interval = 1;
+
+ // The set of resources to monitor.
+ repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated .min_items = 1];
+
+ // The set of overload actions.
+ repeated OverloadAction actions = 3 [(validate.rules).repeated .min_items = 1];
+}
diff --git a/api/envoy/config/ratelimit/v2/BUILD b/api/envoy/config/ratelimit/v2/BUILD
index 08bad146ed4a7..2e69326aa3b1d 100644
--- a/api/envoy/config/ratelimit/v2/BUILD
+++ b/api/envoy/config/ratelimit/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "rls",
srcs = ["rls.proto"],
visibility = [
diff --git a/api/envoy/config/ratelimit/v2/rls.proto b/api/envoy/config/ratelimit/v2/rls.proto
index b9ffe80f79dc4..3a0f5dbedb35a 100644
--- a/api/envoy/config/ratelimit/v2/rls.proto
+++ b/api/envoy/config/ratelimit/v2/rls.proto
@@ -29,9 +29,9 @@ message RateLimitServiceConfig {
}
// Specifies if Envoy should use the data-plane-api client
- // :repo:`api/envoy/service/ratelimit/v2/rls.proto` or the legacy
+ // :repo:`api/envoy/service/ratelimit/v2/rls.proto` or the legacy
// client :repo:`source/common/ratelimit/ratelimit.proto` when
- // making requests to the rate limit service.
+ // making requests to the rate limit service.
//
// .. note::
//
diff --git a/api/envoy/config/rbac/v2alpha/BUILD b/api/envoy/config/rbac/v2alpha/BUILD
index 396982264e3a3..f24c8594ad2eb 100644
--- a/api/envoy/config/rbac/v2alpha/BUILD
+++ b/api/envoy/config/rbac/v2alpha/BUILD
@@ -1,15 +1,15 @@
licenses(["notice"]) # Apache 2
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
-api_proto_library(
+api_proto_library_internal(
name = "rbac",
srcs = ["rbac.proto"],
visibility = ["//visibility:public"],
deps = [
"//envoy/api/v2/core:address",
"//envoy/api/v2/route",
- "//envoy/type:string_match",
+ "//envoy/type/matcher:metadata",
],
)
@@ -19,6 +19,6 @@ api_go_proto_library(
deps = [
"//envoy/api/v2/core:address_go_proto",
"//envoy/api/v2/route:route_go_proto",
- "//envoy/type:string_match_go_proto",
+ "//envoy/type/matcher:metadata_go_proto",
],
)
diff --git a/api/envoy/config/rbac/v2alpha/rbac.proto b/api/envoy/config/rbac/v2alpha/rbac.proto
index cb9e53b5d9b12..ab32aaf475fd8 100644
--- a/api/envoy/config/rbac/v2alpha/rbac.proto
+++ b/api/envoy/config/rbac/v2alpha/rbac.proto
@@ -3,6 +3,7 @@ syntax = "proto3";
import "validate/validate.proto";
import "envoy/api/v2/core/address.proto";
import "envoy/api/v2/route/route.proto";
+import "envoy/type/matcher/metadata.proto";
package envoy.config.rbac.v2alpha;
option go_package = "v2alpha";
@@ -15,12 +16,11 @@ option go_package = "v2alpha";
//
// Here is an example of RBAC configuration. It has two policies:
//
-// * Service account "cluster.local/ns/default/sa/admin" has full access (empty permission entry
-// means full access) to the service.
+// * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so
+// does "cluster.local/ns/default/sa/superuser".
//
-// * Any user (empty principal entry means any user) can read ("GET") the service at paths with
-// prefix "/products" or suffix "/reviews" when request header "version" set to either "v1" or
-// "v2".
+// * Any user can read ("GET") the service at paths with prefix "/products", so long as the
+// destination port is either 80 or 443.
//
// .. code-block:: yaml
//
@@ -111,6 +111,14 @@ message Permission {
// A port number that describes the destination port connecting to.
uint32 destination_port = 6 [(validate.rules).uint32.lte = 65535];
+
+ // Metadata that describes additional information about the action.
+ envoy.type.matcher.MetadataMatcher metadata = 7;
+
+ // Negates matching the provided permission. For instance, if the value of `not_rule` would
+ // match, this permission would not match. Conversely, if the value of `not_rule` would not
+ // match, this permission would match.
+ Permission not_rule = 8;
}
}
@@ -150,5 +158,13 @@ message Principal {
// A header (or psuedo-header such as :path or :method) on the incoming HTTP request.
envoy.api.v2.route.HeaderMatcher header = 6;
+
+ // Metadata that describes additional information about the principal.
+ envoy.type.matcher.MetadataMatcher metadata = 7;
+
+ // Negates matching the provided principal. For instance, if the value of `not_id` would match,
+ // this principal would not match. Conversely, if the value of `not_id` would not match, this
+ // principal would match.
+ Principal not_id = 8;
}
}
diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD
new file mode 100644
index 0000000000000..adc77e5b5e0d3
--- /dev/null
+++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD
@@ -0,0 +1,8 @@
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library_internal(
+ name = "fixed_heap",
+ srcs = ["fixed_heap.proto"],
+)
diff --git a/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto
new file mode 100644
index 0000000000000..08e3c6536f5d3
--- /dev/null
+++ b/api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto
@@ -0,0 +1,10 @@
+syntax = "proto3";
+
+package envoy.config.resource_monitor.fixed_heap.v2alpha;
+option go_package = "v2alpha";
+
+message FixedHeapConfig {
+ // Limit of the Envoy process heap size. This is used to calculate heap memory pressure which
+ // is defined as (current heap size)/max_heap_size_bytes.
+ uint64 max_heap_size_bytes = 1;
+}
diff --git a/api/envoy/config/trace/v2/BUILD b/api/envoy/config/trace/v2/BUILD
index b888bd1b8e400..518395f230707 100644
--- a/api/envoy/config/trace/v2/BUILD
+++ b/api/envoy/config/trace/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "trace",
srcs = ["trace.proto"],
visibility = [
diff --git a/api/envoy/config/transport_socket/capture/v2alpha/BUILD b/api/envoy/config/transport_socket/capture/v2alpha/BUILD
index 1786d008b9e7d..bd25da3e6c7ea 100644
--- a/api/envoy/config/transport_socket/capture/v2alpha/BUILD
+++ b/api/envoy/config/transport_socket/capture/v2alpha/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "capture",
srcs = ["capture.proto"],
deps = [
diff --git a/api/envoy/data/accesslog/v2/BUILD b/api/envoy/data/accesslog/v2/BUILD
index 21c1ea449e775..8ecfdd5b6d119 100644
--- a/api/envoy/data/accesslog/v2/BUILD
+++ b/api/envoy/data/accesslog/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "accesslog",
srcs = ["accesslog.proto"],
visibility = [
diff --git a/api/envoy/data/core/v2alpha/BUILD b/api/envoy/data/core/v2alpha/BUILD
new file mode 100644
index 0000000000000..740e4304cca72
--- /dev/null
+++ b/api/envoy/data/core/v2alpha/BUILD
@@ -0,0 +1,15 @@
+load("//bazel:api_build_system.bzl", "api_proto_library")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library(
+ name = "health_check_event",
+ srcs = ["health_check_event.proto"],
+ visibility = [
+ "//visibility:public",
+ ],
+ deps = [
+ "//envoy/api/v2/core:address",
+ "//envoy/api/v2/core:base",
+ ],
+)
diff --git a/api/envoy/data/core/v2alpha/health_check_event.proto b/api/envoy/data/core/v2alpha/health_check_event.proto
new file mode 100644
index 0000000000000..5c9e28f6846dd
--- /dev/null
+++ b/api/envoy/data/core/v2alpha/health_check_event.proto
@@ -0,0 +1,58 @@
+syntax = "proto3";
+
+package envoy.data.core.v2alpha;
+
+import "envoy/api/v2/core/address.proto";
+import "envoy/api/v2/core/base.proto";
+
+import "google/protobuf/duration.proto";
+import "google/protobuf/wrappers.proto";
+
+import "validate/validate.proto";
+import "gogoproto/gogo.proto";
+
+option (gogoproto.equal_all) = true;
+
+// [#protodoc-title: Health check logging events]
+// :ref:`Health check logging `.
+
+message HealthCheckEvent {
+ HealthCheckerType health_checker_type = 1 [(validate.rules).enum.defined_only = true];
+ envoy.api.v2.core.Address host = 2;
+ string cluster_name = 3 [(validate.rules).string.min_bytes = 1];
+
+ oneof event {
+ option (validate.required) = true;
+
+ // Host ejection.
+ HealthCheckEjectUnhealthy eject_unhealthy_event = 4;
+
+ // Host addition.
+ HealthCheckAddHealthy add_healthy_event = 5;
+ }
+}
+
+enum HealthCheckFailureType {
+ ACTIVE = 0;
+ PASSIVE = 1;
+ NETWORK = 2;
+}
+
+enum HealthCheckerType {
+ HTTP = 0;
+ TCP = 1;
+ GRPC = 2;
+ REDIS = 3;
+}
+
+message HealthCheckEjectUnhealthy {
+ // The type of failure that caused this ejection.
+ HealthCheckFailureType failure_type = 1 [(validate.rules).enum.defined_only = true];
+}
+
+message HealthCheckAddHealthy {
+ // Whether this addition is the result of the first ever health check on a host, in which case
+ // the configured :ref:`healthy threshold `
+ // is bypassed and the host is immediately added.
+ bool first_check = 1;
+}
diff --git a/api/envoy/data/tap/v2alpha/BUILD b/api/envoy/data/tap/v2alpha/BUILD
index 2211bb37ca5bc..46de68e3a825b 100644
--- a/api/envoy/data/tap/v2alpha/BUILD
+++ b/api/envoy/data/tap/v2alpha/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "capture",
srcs = ["capture.proto"],
deps = ["//envoy/api/v2/core:address"],
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/BUILD b/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/BUILD
deleted file mode 100644
index 19eea2ec3bfa2..0000000000000
--- a/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/BUILD
+++ /dev/null
@@ -1,8 +0,0 @@
-load("//bazel:api_build_system.bzl", "api_proto_library")
-
-licenses(["notice"]) # Apache 2
-
-api_proto_library(
- name = "thrift_proxy",
- srcs = ["thrift_proxy.proto"],
-)
diff --git a/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/thrift_proxy.proto b/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/thrift_proxy.proto
deleted file mode 100644
index e2d6bd02cb261..0000000000000
--- a/api/envoy/extensions/filters/network/thrift_proxy/v2alpha1/thrift_proxy.proto
+++ /dev/null
@@ -1,13 +0,0 @@
-syntax = "proto3";
-
-package envoy.extensions.filters.network.thrift_proxy.v2alpha1;
-option go_package = "v2";
-
-import "validate/validate.proto";
-
-// [#protodoc-title: Extensions Thrift Proxy]
-// Thrift Proxy filter configuration.
-message ThriftProxy {
- // The human readable prefix to use when emitting statistics.
- string stat_prefix = 1 [(validate.rules).string.min_bytes = 1];
-}
diff --git a/api/envoy/service/accesslog/v2/BUILD b/api/envoy/service/accesslog/v2/BUILD
index c5073996f7189..e6e389e22a02f 100644
--- a/api/envoy/service/accesslog/v2/BUILD
+++ b/api/envoy/service/accesslog/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "als",
srcs = ["als.proto"],
has_services = 1,
diff --git a/api/envoy/service/auth/v2alpha/BUILD b/api/envoy/service/auth/v2alpha/BUILD
index 323a49eee7dff..5faba48ac3dbc 100644
--- a/api/envoy/service/auth/v2alpha/BUILD
+++ b/api/envoy/service/auth/v2alpha/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "attribute_context",
srcs = [
"attribute_context.proto",
@@ -12,7 +12,7 @@ api_proto_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "external_auth",
srcs = [
"external_auth.proto",
@@ -20,5 +20,7 @@ api_proto_library(
has_services = 1,
deps = [
":attribute_context",
+ "//envoy/api/v2/core:base",
+ "//envoy/type:http_status",
],
)
diff --git a/api/envoy/service/auth/v2alpha/external_auth.proto b/api/envoy/service/auth/v2alpha/external_auth.proto
index 601c4dea6c218..caa5e3089573b 100644
--- a/api/envoy/service/auth/v2alpha/external_auth.proto
+++ b/api/envoy/service/auth/v2alpha/external_auth.proto
@@ -4,6 +4,8 @@ package envoy.service.auth.v2alpha;
option go_package = "v2alpha";
option java_generic_services = true;
+import "envoy/api/v2/core/base.proto";
+import "envoy/type/http_status.proto";
import "envoy/service/auth/v2alpha/attribute_context.proto";
import "google/rpc/status.proto";
@@ -27,21 +29,45 @@ message CheckRequest {
AttributeContext attributes = 1;
}
+// HTTP attributes for a denied response.
+message DeniedHttpResponse {
+ // This field allows the authorization service to send a HTTP response status
+ // code to the downstream client other than 403 (Forbidden).
+ envoy.type.HttpStatus status = 1 [(validate.rules).message.required = true];
+
+ // This field allows the authorization service to send HTTP response headers
+ // to the the downstream client.
+ repeated envoy.api.v2.core.HeaderValueOption headers = 2;
+
+ // This field allows the authorization service to send a response body data
+ // to the the downstream client.
+ string body = 3;
+}
+
+// HTTP attributes for an ok response.
+message OkHttpResponse {
+ // HTTP entity headers in addition to the original request headers. This allows the authorization
+ // service to append, to add or to override headers from the original request before
+ // dispatching it to the upstream. By setting `append` field to `true` in the `HeaderValueOption`,
+ // the filter will append the correspondent header value to the matched request header. Note that
+ // by Leaving `append` as false, the filter will either add a new header, or override an existing
+ // one if there is a match.
+ repeated envoy.api.v2.core.HeaderValueOption headers = 2;
+}
+
+// Intended for gRPC and Network Authorization servers `only`.
message CheckResponse {
// Status `OK` allows the request. Any other status indicates the request should be denied.
google.rpc.Status status = 1;
- // An optional message that contains HTTP response attributes. This message is
+ // An message that contains HTTP response attributes. This message is
// used when the authorization service needs to send custom responses to the
// downstream client or, to modify/add request headers being dispatched to the upstream.
- message HttpResponse {
- // Http status code.
- uint32 status_code = 1 [(validate.rules).uint32 = {gte: 100, lt: 600}];
-
- // Http entity headers.
- map headers = 2;
+ oneof http_response {
+ // Supplies http attributes for a denied response.
+ DeniedHttpResponse denied_response = 2;
- // Http entity body.
- string body = 3;
+ // Supplies http attributes for an ok response.
+ OkHttpResponse ok_response = 3;
}
}
diff --git a/api/envoy/service/discovery/v2/BUILD b/api/envoy/service/discovery/v2/BUILD
index f0a67f206c8dd..ac652cf1859a4 100644
--- a/api/envoy/service/discovery/v2/BUILD
+++ b/api/envoy/service/discovery/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "ads",
srcs = ["ads.proto"],
has_services = 1,
@@ -19,7 +19,7 @@ api_go_grpc_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "hds",
srcs = ["hds.proto"],
has_services = 1,
@@ -40,7 +40,7 @@ api_go_grpc_library(
],
)
-api_proto_library(
+api_proto_library_internal(
name = "sds",
srcs = ["sds.proto"],
has_services = 1,
diff --git a/api/envoy/service/discovery/v2/ads.proto b/api/envoy/service/discovery/v2/ads.proto
index 821ccb341db52..16953ee7b9a6c 100644
--- a/api/envoy/service/discovery/v2/ads.proto
+++ b/api/envoy/service/discovery/v2/ads.proto
@@ -27,4 +27,8 @@ service AggregatedDiscoveryService {
rpc StreamAggregatedResources(stream envoy.api.v2.DiscoveryRequest)
returns (stream envoy.api.v2.DiscoveryResponse) {
}
+
+ rpc IncrementalAggregatedResources(stream envoy.api.v2.IncrementalDiscoveryRequest)
+ returns (stream envoy.api.v2.IncrementalDiscoveryResponse) {
+ }
}
diff --git a/api/envoy/service/load_stats/v2/BUILD b/api/envoy/service/load_stats/v2/BUILD
index 4068eafb3e973..66294100bf701 100644
--- a/api/envoy/service/load_stats/v2/BUILD
+++ b/api/envoy/service/load_stats/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "lrs",
srcs = ["lrs.proto"],
has_services = 1,
diff --git a/api/envoy/service/load_stats/v2/lrs.proto b/api/envoy/service/load_stats/v2/lrs.proto
index 2181fa0ff16f9..43971649504fa 100644
--- a/api/envoy/service/load_stats/v2/lrs.proto
+++ b/api/envoy/service/load_stats/v2/lrs.proto
@@ -63,6 +63,12 @@ message LoadStatsResponse {
// Clusters to report stats for.
repeated string clusters = 1 [(validate.rules).repeated .min_items = 1];
- // The interval of time to collect stats. The default is 10 seconds.
+ // The minimum interval of time to collect stats over. This is only a minimum for two reasons:
+ // 1. There may be some delay from when the timer fires until stats sampling occurs.
+ // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic
+ // that is observed in between the corresponding previous *LoadStatsRequest* and this
+ // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period
+ // of inobservability that might otherwise exists between the messages. New clusters are not
+ // subject to this consideration.
google.protobuf.Duration load_reporting_interval = 2;
}
diff --git a/api/envoy/service/metrics/v2/BUILD b/api/envoy/service/metrics/v2/BUILD
index bbad50c789e5d..6d14bfe414796 100644
--- a/api/envoy/service/metrics/v2/BUILD
+++ b/api/envoy/service/metrics/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "metrics_service",
srcs = ["metrics_service.proto"],
has_services = 1,
@@ -10,7 +10,7 @@ api_proto_library(
deps = [
"//envoy/api/v2/core:base",
"//envoy/api/v2/core:grpc_service",
- "@promotheus_metrics_model//:client_model",
+ "@prometheus_metrics_model//:client_model",
],
)
@@ -19,6 +19,6 @@ api_go_grpc_library(
proto = ":metrics_service",
deps = [
"//envoy/api/v2/core:base_go_proto",
- "@promotheus_metrics_model//:client_model_go_proto",
+ "@prometheus_metrics_model//:client_model_go_proto",
],
)
diff --git a/api/envoy/service/ratelimit/v2/BUILD b/api/envoy/service/ratelimit/v2/BUILD
index be6fdbc915ee0..4ee72b6518882 100644
--- a/api/envoy/service/ratelimit/v2/BUILD
+++ b/api/envoy/service/ratelimit/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "rls",
srcs = ["rls.proto"],
has_services = 1,
diff --git a/api/envoy/service/trace/v2/BUILD b/api/envoy/service/trace/v2/BUILD
index a5f13f2c482e7..49c935f12938d 100644
--- a/api/envoy/service/trace/v2/BUILD
+++ b/api/envoy/service/trace/v2/BUILD
@@ -1,8 +1,8 @@
-load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_grpc_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
name = "trace_service",
srcs = ["trace_service.proto"],
has_services = 1,
diff --git a/api/envoy/type/BUILD b/api/envoy/type/BUILD
index 4859476efbd9d..150e226517b50 100644
--- a/api/envoy/type/BUILD
+++ b/api/envoy/type/BUILD
@@ -1,8 +1,19 @@
-load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
-api_proto_library(
+api_proto_library_internal(
+ name = "http_status",
+ srcs = ["http_status.proto"],
+ visibility = ["//visibility:public"],
+)
+
+api_go_proto_library(
+ name = "http_status",
+ proto = ":http_status",
+)
+
+api_proto_library_internal(
name = "percent",
srcs = ["percent.proto"],
visibility = ["//visibility:public"],
@@ -13,7 +24,7 @@ api_go_proto_library(
proto = ":percent",
)
-api_proto_library(
+api_proto_library_internal(
name = "range",
srcs = ["range.proto"],
visibility = ["//visibility:public"],
@@ -23,14 +34,3 @@ api_go_proto_library(
name = "range",
proto = ":range",
)
-
-api_proto_library(
- name = "string_match",
- srcs = ["string_match.proto"],
- visibility = ["//visibility:public"],
-)
-
-api_go_proto_library(
- name = "string_match",
- proto = ":string_match",
-)
diff --git a/api/envoy/type/http_status.proto b/api/envoy/type/http_status.proto
new file mode 100644
index 0000000000000..35655613c198c
--- /dev/null
+++ b/api/envoy/type/http_status.proto
@@ -0,0 +1,81 @@
+syntax = "proto3";
+
+package envoy.type;
+
+import "validate/validate.proto";
+
+// HTTP response codes supported in Envoy.
+// For more details: http://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
+enum StatusCode {
+ // Empty - This code not part of the HTTP status code specification, but it is needed for proto
+ // `enum` type.
+ Empty = 0;
+
+ Continue = 100;
+
+ OK = 200;
+ Created = 201;
+ Accepted = 202;
+ NonAuthoritativeInformation = 203;
+ NoContent = 204;
+ ResetContent = 205;
+ PartialContent = 206;
+ MultiStatus = 207;
+ AlreadyReported = 208;
+ IMUsed = 226;
+
+ MultipleChoices = 300;
+ MovedPermanently = 301;
+ Found = 302;
+ SeeOther = 303;
+ NotModified = 304;
+ UseProxy = 305;
+ TemporaryRedirect = 307;
+ PermanentRedirect = 308;
+
+ BadRequest = 400;
+ Unauthorized = 401;
+ PaymentRequired = 402;
+ Forbidden = 403;
+ NotFound = 404;
+ MethodNotAllowed = 405;
+ NotAcceptable = 406;
+ ProxyAuthenticationRequired = 407;
+ RequestTimeout = 408;
+ Conflict = 409;
+ Gone = 410;
+ LengthRequired = 411;
+ PreconditionFailed = 412;
+ PayloadTooLarge = 413;
+ URITooLong = 414;
+ UnsupportedMediaType = 415;
+ RangeNotSatisfiable = 416;
+ ExpectationFailed = 417;
+ MisdirectedRequest = 421;
+ UnprocessableEntity = 422;
+ Locked = 423;
+ FailedDependency = 424;
+ UpgradeRequired = 426;
+ PreconditionRequired = 428;
+ TooManyRequests = 429;
+ RequestHeaderFieldsTooLarge = 431;
+
+ InternalServerError = 500;
+ NotImplemented = 501;
+ BadGateway = 502;
+ ServiceUnavailable = 503;
+ GatewayTimeout = 504;
+ HTTPVersionNotSupported = 505;
+ VariantAlsoNegotiates = 506;
+ InsufficientStorage = 507;
+ LoopDetected = 508;
+ NotExtended = 510;
+ NetworkAuthenticationRequired = 511;
+}
+
+// HTTP status.
+message HttpStatus {
+ // Supplies HTTP response code.
+ StatusCode code = 1
+ [(validate.rules).enum = {not_in: [0]}, (validate.rules).enum.defined_only = true];
+}
diff --git a/api/envoy/type/matcher/BUILD b/api/envoy/type/matcher/BUILD
new file mode 100644
index 0000000000000..eb261e6f7ddbb
--- /dev/null
+++ b/api/envoy/type/matcher/BUILD
@@ -0,0 +1,50 @@
+load("//bazel:api_build_system.bzl", "api_go_proto_library", "api_proto_library_internal")
+
+licenses(["notice"]) # Apache 2
+
+api_proto_library_internal(
+ name = "metadata",
+ srcs = ["metadata.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ ":number",
+ ":string",
+ ],
+)
+
+api_go_proto_library(
+ name = "metadata",
+ proto = ":metadata",
+ deps = [
+ ":number_go_proto",
+ ":string_go_proto",
+ ],
+)
+
+api_proto_library_internal(
+ name = "number",
+ srcs = ["number.proto"],
+ visibility = ["//visibility:public"],
+ deps = [
+ "//envoy/type:range",
+ ],
+)
+
+api_go_proto_library(
+ name = "number",
+ proto = ":number",
+ deps = [
+ "//envoy/type:range_go_proto",
+ ],
+)
+
+api_proto_library_internal(
+ name = "string",
+ srcs = ["string.proto"],
+ visibility = ["//visibility:public"],
+)
+
+api_go_proto_library(
+ name = "string",
+ proto = ":string",
+)
diff --git a/api/envoy/type/matcher/metadata.proto b/api/envoy/type/matcher/metadata.proto
new file mode 100644
index 0000000000000..f899bc1305251
--- /dev/null
+++ b/api/envoy/type/matcher/metadata.proto
@@ -0,0 +1,123 @@
+syntax = "proto3";
+
+package envoy.type.matcher;
+option go_package = "matcher";
+
+import "envoy/type/matcher/string.proto";
+import "envoy/type/matcher/number.proto";
+
+import "validate/validate.proto";
+
+// [#protodoc-title: MetadataMatcher]
+
+// MetadataMatcher provides a general interface to check if a given value is matched in
+// :ref:`Metadata `. It uses `filter` and `path` to retrieve the value
+// from the Metadata and then check if it's matched to the specified value.
+//
+// For example, for the following Metadata:
+//
+// .. code-block:: yaml
+//
+// filter_metadata:
+// envoy.filters.http.rbac:
+// fields:
+// a:
+// struct_value:
+// fields:
+// b:
+// struct_value:
+// fields:
+// c:
+// string_value: pro
+// t:
+// list_value:
+// values:
+// - string_value: m
+// - string_value: n
+//
+// The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro"
+// from the Metadata which is matched to the specified prefix match.
+//
+// .. code-block:: yaml
+//
+// filter: envoy.filters.http.rbac
+// path:
+// - key: a
+// - key: b
+// - key: c
+// value:
+// string_match:
+// prefix: pr
+//
+// The following MetadataMatcher is not matched as the path [a, t] is pointing to a list value in
+// the Metadata which is not supported for now.
+//
+// .. code-block:: yaml
+//
+// filter: envoy.filters.http.rbac
+// path:
+// - key: a
+// - key: t
+// value:
+// string_match:
+// exact: m
+//
+// An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to
+// enforce access control based on dynamic metadata in a request. See :ref:`Permission
+// ` and :ref:`Principal
+// `.
+message MetadataMatcher {
+ // Specifies the segment in a path to retrieve value from Metadata.
+ // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means it
+ // will always be not matched if the associated value of the key is a list.
+ message PathSegment {
+ oneof segment {
+ option (validate.required) = true;
+
+ // If specified, use the key to retrieve the value in a Struct.
+ string key = 1 [(validate.rules).string.min_bytes = 1];
+ }
+ }
+
+ // Specifies the value to match. Only primitive value are supported. For non-primitive values, the
+ // result is always not matched.
+ message Value {
+ // NullMatch is an empty message to specify a null value.
+ message NullMatch {
+ }
+
+ // Specifies how to match a value.
+ oneof match_pattern {
+ option (validate.required) = true;
+
+ // If specified, a match occurs if and only if the target value is a NullValue.
+ NullMatch null_match = 1;
+
+ // If specified, a match occurs if and only if the target value is a double value and is
+ // matched to this field.
+ DoubleMatcher double_match = 2;
+
+ // If specified, a match occurs if and only if the target value is a string value and is
+ // matched to this field.
+ StringMatcher string_match = 3;
+
+ // If specified, a match occurs if and only if the target value is a bool value and is equal
+ // to this field.
+ bool bool_match = 4;
+
+ // If specified, value match will be performed based on whether the path is referring to a
+ // valid primitive value in the metadata. If the path is referring to a non-primitive value,
+ // the result is always not matched.
+ bool present_match = 5;
+ }
+ }
+
+ // The filter name to retrieve the Struct from the Metadata.
+ string filter = 1 [(validate.rules).string.min_bytes = 1];
+
+ // The path to retrieve the Value from the Struct.
+ repeated PathSegment path = 2 [(validate.rules).repeated .min_items = 1];
+
+ // The MetadataMatcher is matched if the value retrieved by path is matched to this value.
+ Value value = 3 [(validate.rules).message.required = true];
+}
diff --git a/api/envoy/type/matcher/number.proto b/api/envoy/type/matcher/number.proto
new file mode 100644
index 0000000000000..9cf4ff1f10458
--- /dev/null
+++ b/api/envoy/type/matcher/number.proto
@@ -0,0 +1,24 @@
+syntax = "proto3";
+
+package envoy.type.matcher;
+option go_package = "matcher";
+
+import "envoy/type/range.proto";
+
+import "validate/validate.proto";
+
+// [#protodoc-title: NumberMatcher]
+
+// Specifies the way to match a double value.
+message DoubleMatcher {
+ oneof match_pattern {
+ option (validate.required) = true;
+
+ // If specified, the input double value must be in the range specified here.
+ // Note: The range is using half-open interval semantics [start, end).
+ envoy.type.DoubleRange range = 1;
+
+ // If specified, the input double value must be equal to the value specified here.
+ double exact = 2;
+ }
+}
diff --git a/api/envoy/type/matcher/string.proto b/api/envoy/type/matcher/string.proto
new file mode 100644
index 0000000000000..afb419a613b39
--- /dev/null
+++ b/api/envoy/type/matcher/string.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+
+package envoy.type.matcher;
+option go_package = "matcher";
+
+import "validate/validate.proto";
+
+// [#protodoc-title: StringMatcher]
+
+// Specifies the way to match a string.
+message StringMatcher {
+ oneof match_pattern {
+ option (validate.required) = true;
+
+ // The input string must match exactly the string specified here.
+ //
+ // Examples:
+ //
+ // * *abc* only matches the value *abc*.
+ string exact = 1;
+
+ // The input string must have the prefix specified here.
+ // Note: empty prefix is not allowed, please use regex instead.
+ //
+ // Examples:
+ //
+ // * *abc* matches the value *abc.xyz*
+ string prefix = 2 [(validate.rules).string.min_bytes = 1];
+
+ // The input string must have the suffix specified here.
+ // Note: empty prefix is not allowed, please use regex instead.
+ //
+ // Examples:
+ //
+ // * *abc* matches the value *xyz.abc*
+ string suffix = 3 [(validate.rules).string.min_bytes = 1];
+
+ // The input string must match the regular expression specified here.
+ // The regex grammar is defined `here
+ // `_.
+ //
+ // Examples:
+ //
+ // * The regex *\d{3}* matches the value *123*
+ // * The regex *\d{3}* does not match the value *1234*
+ // * The regex *\d{3}* does not match the value *123.456*
+ string regex = 4;
+ }
+}
diff --git a/api/envoy/type/range.proto b/api/envoy/type/range.proto
index fd6045e7fd289..115091ddf9f69 100644
--- a/api/envoy/type/range.proto
+++ b/api/envoy/type/range.proto
@@ -18,3 +18,13 @@ message Int64Range {
// end of the range (exclusive)
int64 end = 2;
}
+
+// Specifies the double start and end of the range using half-open interval semantics [start,
+// end).
+message DoubleRange {
+ // start of the range (inclusive)
+ double start = 1;
+
+ // end of the range (exclusive)
+ double end = 2;
+}
diff --git a/api/envoy/type/string_match.proto b/api/envoy/type/string_match.proto
deleted file mode 100644
index c1e2468ad5899..0000000000000
--- a/api/envoy/type/string_match.proto
+++ /dev/null
@@ -1,30 +0,0 @@
-syntax = "proto3";
-
-package envoy.type;
-option go_package = "envoy_type";
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.equal_all) = true;
-
-// [#protodoc-title: StringMatch]
-
-// Specifies the way to match a string.
-message StringMatch {
- oneof match_pattern {
- // The input string must match exactly the string specified here.
- // Or it is a "*", which means that it matches any string.
- string simple = 1;
-
- // The input string must have the prefix specified here.
- string prefix = 2;
-
- // The input string must have the suffix specified here.
- string suffix = 3;
-
- // The input string must match the regular expression specified here.
- // The regex grammar is defined `here
- // `_.
- string regex = 4;
- }
-}
diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD
index 2707e02cda541..2c98249c78859 100644
--- a/api/test/validate/BUILD
+++ b/api/test/validate/BUILD
@@ -1,4 +1,4 @@
-load("//bazel:api_build_system.bzl", "api_cc_test", "api_proto_library")
+load("//bazel:api_build_system.bzl", "api_cc_test", "api_proto_library_internal")
licenses(["notice"]) # Apache 2
diff --git a/bazel/BUILD b/bazel/BUILD
index 6a5258d5440b0..223d2d0b7ee19 100644
--- a/bazel/BUILD
+++ b/bazel/BUILD
@@ -35,6 +35,35 @@ genrule(
stamp = 1,
)
+config_setting(
+ name = "windows_x86_64",
+ values = {"cpu": "x64_windows"},
+)
+
+config_setting(
+ name = "windows_opt_build",
+ values = {
+ "cpu": "x64_windows",
+ "compilation_mode": "opt",
+ },
+)
+
+config_setting(
+ name = "windows_dbg_build",
+ values = {
+ "cpu": "x64_windows",
+ "compilation_mode": "dbg",
+ },
+)
+
+config_setting(
+ name = "windows_fastbuild_build",
+ values = {
+ "cpu": "x64_windows",
+ "compilation_mode": "fastbuild",
+ },
+)
+
config_setting(
name = "opt_build",
values = {"compilation_mode": "opt"},
diff --git a/bazel/README.md b/bazel/README.md
index 6b9631f327113..0b68e7ec16e85 100644
--- a/bazel/README.md
+++ b/bazel/README.md
@@ -27,7 +27,7 @@ up-to-date with the latest security patches. See
for how to update or override dependencies.
1. Install the latest version of [Bazel](https://bazel.build/versions/master/docs/install.html) in your environment.
-2. Install external dependencies libtool, cmake, and realpath libraries separately.
+2. Install external dependencies libtool, cmake, ninja, and realpath libraries separately.
On Ubuntu, run the following commands:
```
apt-get install libtool
@@ -35,6 +35,7 @@ On Ubuntu, run the following commands:
apt-get install realpath
apt-get install clang-format-5.0
apt-get install automake
+ apt-get install ninja-build
```
On Fedora (maybe also other red hat distros), run the following:
@@ -51,6 +52,7 @@ brew install libtool
brew install go
brew install bazel
brew install automake
+brew install ninja
```
Envoy compiles and passes tests with the version of clang installed by XCode 9.3.0:
@@ -353,6 +355,14 @@ then log back in and it should start working.
The latest coverage report for master is available
[here](https://s3.amazonaws.com/lyft-envoy/coverage/report-master/coverage.html).
+It's also possible to specialize the coverage build to a single test target. This is useful
+when doing things like exploring the coverage of a fuzzer over its corpus. This can be done with
+the `COVERAGE_TARGET` and `VALIDATE_COVERAGE` environment variables, e.g.:
+
+```
+COVERAGE_TARGET=//test/common/common:base64_fuzz_test VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh
+```
+
# Cleaning the build and test artifacts
`bazel clean` will nuke all the build/test artifacts from the Bazel cache for
diff --git a/bazel/cc_configure.bzl b/bazel/cc_configure.bzl
index 35b005557c6ea..eb1dead6b260a 100644
--- a/bazel/cc_configure.bzl
+++ b/bazel/cc_configure.bzl
@@ -5,86 +5,86 @@ load("@bazel_tools//tools/cpp:unix_cc_configure.bzl", "find_cc")
# Stub for `repository_ctx.which()` that always succeeds. See comments in
# `_find_cxx` for details.
def _quiet_fake_which(program):
- return struct(_envoy_fake_which = program)
+ return struct(_envoy_fake_which = program)
# Stub for `repository_ctx.which()` that always fails. See comments in
# `_find_cxx` for details.
def _noisy_fake_which(program):
- return None
+ return None
# Find a good path for the C++ compiler, by hooking into Bazel's C compiler
# detection. Uses `$CXX` if found, otherwise defaults to `g++` because Bazel
# defaults to `gcc`.
def _find_cxx(repository_ctx):
- # Bazel's `find_cc` helper uses the repository context to inspect `$CC`.
- # Replace this value with `$CXX` if set.
- environ_cxx = repository_ctx.os.environ.get("CXX", "g++")
- fake_os = struct(
- environ = {"CC": environ_cxx},
- )
+ # Bazel's `find_cc` helper uses the repository context to inspect `$CC`.
+ # Replace this value with `$CXX` if set.
+ environ_cxx = repository_ctx.os.environ.get("CXX", "g++")
+ fake_os = struct(
+ environ = {"CC": environ_cxx},
+ )
- # We can't directly assign `repository_ctx.which` to a struct attribute
- # because Skylark doesn't support bound method references. Instead, stub
- # out `which()` using a two-pass approach:
- #
- # * The first pass uses a stub that always succeeds, passing back a special
- # value containing the original parameter.
- # * If we detect the special value, we know that `find_cc` found a compiler
- # name but don't know if that name could be resolved to an executable path.
- # So do the `which()` call ourselves.
- # * If our `which()` failed, call `find_cc` again with a dummy which that
- # always fails. The error raised by `find_cc` will be identical to what Bazel
- # would generate for a missing C compiler.
- #
- # See https://github.com/bazelbuild/bazel/issues/4644 for more context.
- real_cxx = find_cc(struct(
- which = _quiet_fake_which,
- os = fake_os,
- ), {})
- if hasattr(real_cxx, "_envoy_fake_which"):
- real_cxx = repository_ctx.which(real_cxx._envoy_fake_which)
- if real_cxx == None:
- find_cc(struct(
- which = _noisy_fake_which,
+ # We can't directly assign `repository_ctx.which` to a struct attribute
+ # because Skylark doesn't support bound method references. Instead, stub
+ # out `which()` using a two-pass approach:
+ #
+ # * The first pass uses a stub that always succeeds, passing back a special
+ # value containing the original parameter.
+ # * If we detect the special value, we know that `find_cc` found a compiler
+ # name but don't know if that name could be resolved to an executable path.
+ # So do the `which()` call ourselves.
+ # * If our `which()` failed, call `find_cc` again with a dummy which that
+ # always fails. The error raised by `find_cc` will be identical to what Bazel
+ # would generate for a missing C compiler.
+ #
+ # See https://github.com/bazelbuild/bazel/issues/4644 for more context.
+ real_cxx = find_cc(struct(
+ which = _quiet_fake_which,
os = fake_os,
- ), {})
- return real_cxx
+ ), {})
+ if hasattr(real_cxx, "_envoy_fake_which"):
+ real_cxx = repository_ctx.which(real_cxx._envoy_fake_which)
+ if real_cxx == None:
+ find_cc(struct(
+ which = _noisy_fake_which,
+ os = fake_os,
+ ), {})
+ return real_cxx
def _build_envoy_cc_wrapper(repository_ctx):
- real_cc = find_cc(repository_ctx, {})
- real_cxx = _find_cxx(repository_ctx)
+ real_cc = find_cc(repository_ctx, {})
+ real_cxx = _find_cxx(repository_ctx)
- # Copy our CC wrapper script into @local_config_cc, with the true paths
- # to the C and C++ compiler injected in. The wrapper will use these paths
- # to invoke the compiler after deciding which one is correct for the current
- # invocation.
- #
- # Since the script is Python, we can inject values using `repr(str(value))`
- # and escaping will be handled correctly.
- repository_ctx.template("extra_tools/envoy_cc_wrapper", repository_ctx.attr._envoy_cc_wrapper, {
- "{ENVOY_REAL_CC}": repr(str(real_cc)),
- "{ENVOY_REAL_CXX}": repr(str(real_cxx)),
- })
- return repository_ctx.path("extra_tools/envoy_cc_wrapper")
+ # Copy our CC wrapper script into @local_config_cc, with the true paths
+ # to the C and C++ compiler injected in. The wrapper will use these paths
+ # to invoke the compiler after deciding which one is correct for the current
+ # invocation.
+ #
+ # Since the script is Python, we can inject values using `repr(str(value))`
+ # and escaping will be handled correctly.
+ repository_ctx.template("extra_tools/envoy_cc_wrapper", repository_ctx.attr._envoy_cc_wrapper, {
+ "{ENVOY_REAL_CC}": repr(str(real_cc)),
+ "{ENVOY_REAL_CXX}": repr(str(real_cxx)),
+ })
+ return repository_ctx.path("extra_tools/envoy_cc_wrapper")
def _needs_envoy_cc_wrapper(repository_ctx):
- # When building for Linux we set additional C++ compiler options that aren't
- # handled well by Bazel, so we need a wrapper around $CC to fix its
- # compiler invocations.
- cpu_value = get_cpu_value(repository_ctx)
- return cpu_value not in ["freebsd", "x64_windows", "darwin"]
+ # When building for Linux we set additional C++ compiler options that aren't
+ # handled well by Bazel, so we need a wrapper around $CC to fix its
+ # compiler invocations.
+ cpu_value = get_cpu_value(repository_ctx)
+ return cpu_value not in ["freebsd", "x64_windows", "darwin"]
def cc_autoconf_impl(repository_ctx):
- overriden_tools = {}
- if _needs_envoy_cc_wrapper(repository_ctx):
- # Bazel uses "gcc" as a generic name for all C and C++ compilers.
- overriden_tools["gcc"] = _build_envoy_cc_wrapper(repository_ctx)
- return _upstream_cc_autoconf_impl(repository_ctx, overriden_tools=overriden_tools)
+ overriden_tools = {}
+ if _needs_envoy_cc_wrapper(repository_ctx):
+ # Bazel uses "gcc" as a generic name for all C and C++ compilers.
+ overriden_tools["gcc"] = _build_envoy_cc_wrapper(repository_ctx)
+ return _upstream_cc_autoconf_impl(repository_ctx, overriden_tools = overriden_tools)
cc_autoconf = repository_rule(
implementation = cc_autoconf_impl,
attrs = {
- "_envoy_cc_wrapper": attr.label(default="@envoy//bazel:cc_wrapper.py"),
+ "_envoy_cc_wrapper": attr.label(default = "@envoy//bazel:cc_wrapper.py"),
},
environ = [
"ABI_LIBC_VERSION",
@@ -116,8 +116,10 @@ cc_autoconf = repository_rule(
"VS100COMNTOOLS",
"VS110COMNTOOLS",
"VS120COMNTOOLS",
- "VS140COMNTOOLS"])
+ "VS140COMNTOOLS",
+ ],
+)
def cc_configure():
- cc_autoconf(name="local_config_cc")
- native.bind(name="cc_toolchain", actual="@local_config_cc//:toolchain")
+ cc_autoconf(name = "local_config_cc")
+ native.bind(name = "cc_toolchain", actual = "@local_config_cc//:toolchain")
diff --git a/bazel/envoy_build_system.bzl b/bazel/envoy_build_system.bzl
index bf7885cc7d6c5..9f79cdf88565c 100644
--- a/bazel/envoy_build_system.bzl
+++ b/bazel/envoy_build_system.bzl
@@ -5,7 +5,7 @@ def envoy_package():
# Compute the final copts based on various options.
def envoy_copts(repository, test = False):
- return [
+ posix_options = [
"-Wall",
"-Wextra",
"-Werror",
@@ -13,81 +13,110 @@ def envoy_copts(repository, test = False):
"-Woverloaded-virtual",
"-Wold-style-cast",
"-std=c++14",
- ] + select({
- # Bazel adds an implicit -DNDEBUG for opt.
- repository + "//bazel:opt_build": [] if test else ["-ggdb3"],
- repository + "//bazel:fastbuild_build": [],
- repository + "//bazel:dbg_build": ["-ggdb3"],
- }) + select({
- repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"],
- "//conditions:default": ["-DTCMALLOC"],
- }) + select({
- repository + "//bazel:disable_signal_trace": [],
- "//conditions:default": ["-DENVOY_HANDLE_SIGNALS"],
- }) + select({
- # TCLAP command line parser needs this to support int64_t/uint64_t
- "@bazel_tools//tools/osx:darwin": ["-DHAVE_LONG_LONG"],
- "//conditions:default": [],
- }) + envoy_select_hot_restart(["-DENVOY_HOT_RESTART"], repository) + \
- envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \
- envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository)
+ ]
+
+ msvc_options = [
+ "-WX",
+ "-DWIN32",
+ "-DWIN32_LEAN_AND_MEAN",
+ # need win8 for ntohll
+ # https://msdn.microsoft.com/en-us/library/windows/desktop/aa383745(v=vs.85).aspx
+ "-D_WIN32_WINNT=0x0602",
+ "-DNTDDI_VERSION=0x06020000",
+ "-DCARES_STATICLIB",
+ "-DNGHTTP2_STATICLIB",
+ ]
+
+ return select({
+ repository + "//bazel:windows_x86_64": msvc_options,
+ "//conditions:default": posix_options,
+ }) + select({
+ # Bazel adds an implicit -DNDEBUG for opt.
+ repository + "//bazel:opt_build": [] if test else ["-ggdb3"],
+ repository + "//bazel:fastbuild_build": [],
+ repository + "//bazel:dbg_build": ["-ggdb3"],
+ repository + "//bazel:windows_opt_build": [],
+ repository + "//bazel:windows_fastbuild_build": [],
+ repository + "//bazel:windows_dbg_build": [],
+ }) + select({
+ repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"],
+ "//conditions:default": ["-DTCMALLOC"],
+ }) + select({
+ repository + "//bazel:disable_signal_trace": [],
+ "//conditions:default": ["-DENVOY_HANDLE_SIGNALS"],
+ }) + select({
+ # TCLAP command line parser needs this to support int64_t/uint64_t
+ "@bazel_tools//tools/osx:darwin": ["-DHAVE_LONG_LONG"],
+ "//conditions:default": [],
+ }) + envoy_select_hot_restart(["-DENVOY_HOT_RESTART"], repository) + \
+ envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \
+ envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository)
def envoy_static_link_libstdcpp_linkopts():
- return envoy_select_force_libcpp(["--stdlib=libc++"],
- ["-static-libstdc++", "-static-libgcc"])
+ return envoy_select_force_libcpp(
+ ["--stdlib=libc++"],
+ ["-static-libstdc++", "-static-libgcc"],
+ )
# Compute the final linkopts based on various options.
def envoy_linkopts():
return select({
- # The OSX system library transitively links common libraries (e.g., pthread).
+ # The OSX system library transitively links common libraries (e.g., pthread).
+ "@bazel_tools//tools/osx:darwin": [
+ # See note here: http://luajit.org/install.html
+ "-pagezero_size 10000",
+ "-image_base 100000000",
+ ],
+ "@envoy//bazel:windows_x86_64": [
+ "-DEFAULTLIB:advapi32.lib",
+ ],
+ "//conditions:default": [
+ "-pthread",
+ "-lrt",
+ "-ldl",
+ "-Wl,--hash-style=gnu",
+ ],
+ }) + envoy_static_link_libstdcpp_linkopts() + \
+ envoy_select_exported_symbols(["-Wl,-E"])
+
+def _envoy_stamped_linkopts():
+ return select({
+ # Coverage builds in CI are failing to link when setting a build ID.
+ #
+ # /usr/bin/ld.gold: internal error in write_build_id, at ../../gold/layout.cc:5419
+ "@envoy//bazel:coverage_build": [],
+ "@envoy//bazel:windows_x86_64": [],
+
+ # MacOS doesn't have an official equivalent to the `.note.gnu.build-id`
+ # ELF section, so just stuff the raw ID into a new text section.
"@bazel_tools//tools/osx:darwin": [
- # See note here: http://luajit.org/install.html
- "-pagezero_size 10000", "-image_base 100000000",
+ "-sectcreate __TEXT __build_id",
+ "$(location @envoy//bazel:raw_build_id.ldscript)",
],
+
+ # Note: assumes GNU GCC (or compatible) handling of `--build-id` flag.
"//conditions:default": [
- "-pthread",
- "-lrt",
- "-ldl",
- '-Wl,--hash-style=gnu',
+ "-Wl,@$(location @envoy//bazel:gnu_build_id.ldscript)",
],
- }) + envoy_static_link_libstdcpp_linkopts() \
- + envoy_select_exported_symbols(["-Wl,-E"])
-
-def _envoy_stamped_linkopts():
- return select({
- # Coverage builds in CI are failing to link when setting a build ID.
- #
- # /usr/bin/ld.gold: internal error in write_build_id, at ../../gold/layout.cc:5419
- "@envoy//bazel:coverage_build": [],
-
- # MacOS doesn't have an official equivalent to the `.note.gnu.build-id`
- # ELF section, so just stuff the raw ID into a new text section.
- "@bazel_tools//tools/osx:darwin": [
- "-sectcreate __TEXT __build_id", "$(location @envoy//bazel:raw_build_id.ldscript)"
- ],
-
- # Note: assumes GNU GCC (or compatible) handling of `--build-id` flag.
- "//conditions:default": [
- "-Wl,@$(location @envoy//bazel:gnu_build_id.ldscript)",
- ],
- })
+ })
def _envoy_stamped_deps():
- return select({
- "@bazel_tools//tools/osx:darwin": [
- "@envoy//bazel:raw_build_id.ldscript"
- ],
- "//conditions:default": [
- "@envoy//bazel:gnu_build_id.ldscript",
- ],
- })
+ return select({
+ "@bazel_tools//tools/osx:darwin": [
+ "@envoy//bazel:raw_build_id.ldscript",
+ ],
+ "//conditions:default": [
+ "@envoy//bazel:gnu_build_id.ldscript",
+ ],
+ })
# Compute the test linkopts based on various options.
def envoy_test_linkopts():
return select({
"@bazel_tools//tools/osx:darwin": [
# See note here: http://luajit.org/install.html
- "-pagezero_size 10000", "-image_base 100000000",
+ "-pagezero_size 10000",
+ "-image_base 100000000",
],
# TODO(mattklein123): It's not great that we universally link against the following libs.
@@ -119,8 +148,8 @@ def tcmalloc_external_deps(repository):
# exporting the package headers at (e.g. envoy/common). Source files can then
# include using this path scheme (e.g. #include "envoy/common/time.h").
def envoy_include_prefix(path):
- if path.startswith('source/') or path.startswith('include/'):
- return '/'.join(path.split('/')[1:])
+ if path.startswith("source/") or path.startswith("include/"):
+ return "/".join(path.split("/")[1:])
return None
# Envoy C++ library targets that need no transformations or additional dependencies before being
@@ -131,20 +160,22 @@ def envoy_basic_cc_library(name, **kargs):
native.cc_library(name = name, **kargs)
# Envoy C++ library targets should be specified with this function.
-def envoy_cc_library(name,
- srcs = [],
- hdrs = [],
- copts = [],
- visibility = None,
- external_deps = [],
- tcmalloc_dep = None,
- repository = "",
- linkstamp = None,
- tags = [],
- deps = [],
- strip_include_prefix = None):
+def envoy_cc_library(
+ name,
+ srcs = [],
+ hdrs = [],
+ copts = [],
+ visibility = None,
+ external_deps = [],
+ tcmalloc_dep = None,
+ repository = "",
+ linkstamp = None,
+ tags = [],
+ deps = [],
+ strip_include_prefix = None):
if tcmalloc_dep:
deps += tcmalloc_external_deps(repository)
+
native.cc_library(
name = name,
srcs = srcs,
@@ -155,29 +186,32 @@ def envoy_cc_library(name,
deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [
repository + "//include/envoy/common:base_includes",
repository + "//source/common/common:fmt_lib",
- envoy_external_dep_path('abseil_strings'),
- envoy_external_dep_path('spdlog'),
- envoy_external_dep_path('fmtlib'),
+ envoy_external_dep_path("abseil_strings"),
+ envoy_external_dep_path("spdlog"),
+ envoy_external_dep_path("fmtlib"),
],
include_prefix = envoy_include_prefix(PACKAGE_NAME),
alwayslink = 1,
linkstatic = 1,
- linkstamp = linkstamp,
+ linkstamp = select({
+ repository + "//bazel:windows_x86_64": None,
+ "//conditions:default": linkstamp,
+ }),
strip_include_prefix = strip_include_prefix,
- )
+ )
# Envoy C++ binary targets should be specified with this function.
-def envoy_cc_binary(name,
- srcs = [],
- data = [],
- testonly = 0,
- visibility = None,
- external_deps = [],
- repository = "",
- stamped = False,
- deps = [],
- linkopts = []):
-
+def envoy_cc_binary(
+ name,
+ srcs = [],
+ data = [],
+ testonly = 0,
+ visibility = None,
+ external_deps = [],
+ repository = "",
+ stamped = False,
+ deps = [],
+ linkopts = []):
if not linkopts:
linkopts = envoy_linkopts()
if stamped:
@@ -233,20 +267,21 @@ def envoy_cc_fuzz_test(name, corpus, deps = [], **kwargs):
)
# Envoy C++ test targets should be specified with this function.
-def envoy_cc_test(name,
- srcs = [],
- data = [],
- # List of pairs (Bazel shell script target, shell script args)
- repository = "",
- external_deps = [],
- deps = [],
- tags = [],
- args = [],
- coverage = True,
- local = False):
+def envoy_cc_test(
+ name,
+ srcs = [],
+ data = [],
+ # List of pairs (Bazel shell script target, shell script args)
+ repository = "",
+ external_deps = [],
+ deps = [],
+ tags = [],
+ args = [],
+ coverage = True,
+ local = False):
test_lib_tags = []
if coverage:
- test_lib_tags.append("coverage_test_lib")
+ test_lib_tags.append("coverage_test_lib")
envoy_cc_test_library(
name = name + "_lib",
srcs = srcs,
@@ -264,7 +299,7 @@ def envoy_cc_test(name,
malloc = tcmalloc_external_dep(repository),
deps = [
":" + name + "_lib",
- repository + "//test:main"
+ repository + "//test:main",
],
# from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51
# 2 - by default, mocks act as StrictMocks.
@@ -275,14 +310,15 @@ def envoy_cc_test(name,
# Envoy C++ test related libraries (that want gtest, gmock) should be specified
# with this function.
-def envoy_cc_test_library(name,
- srcs = [],
- hdrs = [],
- data = [],
- external_deps = [],
- deps = [],
- repository = "",
- tags = []):
+def envoy_cc_test_library(
+ name,
+ srcs = [],
+ hdrs = [],
+ data = [],
+ external_deps = [],
+ deps = [],
+ repository = "",
+ tags = []):
native.cc_library(
name = name,
srcs = srcs,
@@ -291,7 +327,7 @@ def envoy_cc_test_library(name,
copts = envoy_copts(repository, test = True),
testonly = 1,
deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [
- envoy_external_dep_path('googletest'),
+ envoy_external_dep_path("googletest"),
repository + "//test/test_common:printers_includes",
],
tags = tags,
@@ -300,18 +336,22 @@ def envoy_cc_test_library(name,
)
# Envoy test binaries should be specified with this function.
-def envoy_cc_test_binary(name,
- **kargs):
- envoy_cc_binary(name,
- testonly = 1,
- linkopts = envoy_test_linkopts() + envoy_static_link_libstdcpp_linkopts(),
- **kargs)
+def envoy_cc_test_binary(
+ name,
+ **kargs):
+ envoy_cc_binary(
+ name,
+ testonly = 1,
+ linkopts = envoy_test_linkopts() + envoy_static_link_libstdcpp_linkopts(),
+ **kargs
+ )
# Envoy Python test binaries should be specified with this function.
-def envoy_py_test_binary(name,
- external_deps = [],
- deps = [],
- **kargs):
+def envoy_py_test_binary(
+ name,
+ external_deps = [],
+ deps = [],
+ **kargs):
native.py_binary(
name = name,
deps = deps + [envoy_external_dep_path(dep) for dep in external_deps],
@@ -323,41 +363,46 @@ def envoy_cc_mock(name, **kargs):
envoy_cc_test_library(name = name, **kargs)
# Envoy shell tests that need to be included in coverage run should be specified with this function.
-def envoy_sh_test(name,
- srcs = [],
- data = [],
- **kargs):
- test_runner_cc = name + "_test_runner.cc"
- native.genrule(
- name = name + "_gen_test_runner",
- srcs = srcs,
- outs = [test_runner_cc],
- cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@",
- tools = ["//bazel:gen_sh_test_runner.sh"],
- )
- envoy_cc_test_library(
- name = name + "_lib",
- srcs = [test_runner_cc],
- data = srcs + data,
- tags = ["coverage_test_lib"],
- deps = ["//test/test_common:environment_lib"],
- )
- native.sh_test(
- name = name,
- srcs = ["//bazel:sh_test_wrapper.sh"],
- data = srcs + data,
- args = srcs,
- **kargs
- )
+def envoy_sh_test(
+ name,
+ srcs = [],
+ data = [],
+ **kargs):
+ test_runner_cc = name + "_test_runner.cc"
+ native.genrule(
+ name = name + "_gen_test_runner",
+ srcs = srcs,
+ outs = [test_runner_cc],
+ cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@",
+ tools = ["//bazel:gen_sh_test_runner.sh"],
+ )
+ envoy_cc_test_library(
+ name = name + "_lib",
+ srcs = [test_runner_cc],
+ data = srcs + data,
+ tags = ["coverage_test_lib"],
+ deps = ["//test/test_common:environment_lib"],
+ )
+ native.sh_test(
+ name = name,
+ srcs = ["//bazel:sh_test_wrapper.sh"],
+ data = srcs + data,
+ args = srcs,
+ **kargs
+ )
def _proto_header(proto_path):
- if proto_path.endswith(".proto"):
- return proto_path[:-5] + "pb.h"
- return None
+ if proto_path.endswith(".proto"):
+ return proto_path[:-5] + "pb.h"
+ return None
# Envoy proto targets should be specified with this function.
-def envoy_proto_library(name, srcs = [], deps = [], external_deps = [],
- generate_python = True):
+def envoy_proto_library(
+ name,
+ srcs = [],
+ deps = [],
+ external_deps = [],
+ generate_python = True):
# Ideally this would be native.{proto_library, cc_proto_library}.
# Unfortunately, this doesn't work with http_api_protos due to the PGV
# requirement to also use them in the non-native protobuf.bzl
@@ -366,6 +411,10 @@ def envoy_proto_library(name, srcs = [], deps = [], external_deps = [],
cc_proto_deps = []
py_proto_deps = ["@com_google_protobuf//:protobuf_python"]
+ if "api_httpbody_protos" in external_deps:
+ cc_proto_deps.append("@googleapis//:api_httpbody_protos")
+ py_proto_deps.append("@googleapis//:api_httpbody_protos_py")
+
if "http_api_protos" in external_deps:
cc_proto_deps.append("@googleapis//:http_api_protos")
py_proto_deps.append("@googleapis//:http_api_protos_py")
@@ -403,6 +452,10 @@ def envoy_proto_descriptor(name, out, srcs = [], external_deps = []):
input_files = ["$(location " + src + ")" for src in srcs]
include_paths = [".", PACKAGE_NAME]
+ if "api_httpbody_protos" in external_deps:
+ srcs.append("@googleapis//:api_httpbody_protos_src")
+ include_paths.append("external/googleapis")
+
if "http_api_protos" in external_deps:
srcs.append("@googleapis//:http_api_protos_src")
include_paths.append("external/googleapis")
@@ -432,13 +485,11 @@ def envoy_select_hot_restart(xs, repository = ""):
"//conditions:default": xs,
})
-
def envoy_select_perf_annotation(xs):
return select({
"@envoy//bazel:enable_perf_annotation": xs,
"//conditions:default": [],
- })
-
+ })
# Selects the given values if Google gRPC is enabled in the current build.
def envoy_select_google_grpc(xs, repository = ""):
@@ -458,5 +509,6 @@ def envoy_select_force_libcpp(if_libcpp, default = None):
return select({
"@envoy//bazel:force_libcpp": if_libcpp,
"@bazel_tools//tools/osx:darwin": [],
+ "@envoy//bazel:windows_x86_64": [],
"//conditions:default": default or [],
})
diff --git a/bazel/external/apache_thrift.BUILD b/bazel/external/apache_thrift.BUILD
new file mode 100644
index 0000000000000..8b296fc00672b
--- /dev/null
+++ b/bazel/external/apache_thrift.BUILD
@@ -0,0 +1,21 @@
+# The apache-thrift distribution does not keep the thrift files in a directory with the
+# expected package name (it uses src/Thrift.py vs src/thrift/Thrift.py), so we provide a
+# genrule to copy src/**/*.py to thrift/**/*.py.
+src_files = glob(["src/**/*.py"])
+
+genrule(
+ name = "thrift_files",
+ srcs = src_files,
+ outs = [f.replace("src/", "thrift/") for f in src_files],
+ cmd = '\n'.join(
+ ['mkdir -p $$(dirname $(location %s)) && cp $(location %s) $(location :%s)' % (f, f, f.replace('src/', 'thrift/')) for f in src_files]
+ ),
+ visibility = ["//visibility:private"],
+)
+
+py_library(
+ name = "apache_thrift",
+ srcs = [":thrift_files"],
+ visibility = ["//visibility:public"],
+ deps = ["@six_archive//:six"],
+)
diff --git a/bazel/external/libcircllhist.BUILD b/bazel/external/libcircllhist.BUILD
index 4e109f0b38d47..a937b65a382c6 100644
--- a/bazel/external/libcircllhist.BUILD
+++ b/bazel/external/libcircllhist.BUILD
@@ -6,4 +6,8 @@ cc_library(
],
includes = ["src"],
visibility = ["//visibility:public"],
+ copts = select({
+ "@envoy//bazel:windows_x86_64": ["-DWIN32"],
+ "//conditions:default": [],
+ }),
)
diff --git a/bazel/external/twitter_common_finagle_thrift.BUILD b/bazel/external/twitter_common_finagle_thrift.BUILD
new file mode 100644
index 0000000000000..1ca6af126c596
--- /dev/null
+++ b/bazel/external/twitter_common_finagle_thrift.BUILD
@@ -0,0 +1,7 @@
+py_library(
+ name = "twitter_common_finagle_thrift",
+ srcs = glob([
+ "gen/**/*.py",
+ ]),
+ visibility = ["//visibility:public"],
+)
diff --git a/bazel/external/twitter_common_lang.BUILD b/bazel/external/twitter_common_lang.BUILD
new file mode 100644
index 0000000000000..f4300b37b05d2
--- /dev/null
+++ b/bazel/external/twitter_common_lang.BUILD
@@ -0,0 +1,7 @@
+py_library(
+ name = "twitter_common_lang",
+ srcs = glob([
+ "twitter/**/*.py",
+ ]),
+ visibility = ["//visibility:public"],
+)
diff --git a/bazel/external/twitter_common_rpc.BUILD b/bazel/external/twitter_common_rpc.BUILD
new file mode 100644
index 0000000000000..7a13ec511a667
--- /dev/null
+++ b/bazel/external/twitter_common_rpc.BUILD
@@ -0,0 +1,11 @@
+py_library(
+ name = "twitter_common_rpc",
+ srcs = glob([
+ "twitter/**/*.py",
+ ]),
+ visibility = ["//visibility:public"],
+ deps = [
+ "@com_github_twitter_common_lang//:twitter_common_lang",
+ "@com_github_twitter_common_finagle_thrift//:twitter_common_finagle_thrift"
+ ],
+)
diff --git a/bazel/genrule_repository.bzl b/bazel/genrule_repository.bzl
index a72be286987dd..030cb9a3a38c1 100644
--- a/bazel/genrule_repository.bzl
+++ b/bazel/genrule_repository.bzl
@@ -1,9 +1,9 @@
def _genrule_repository(ctx):
ctx.download_and_extract(
ctx.attr.urls,
- "", # output
+ "", # output
ctx.attr.sha256,
- "", # type
+ "", # type
ctx.attr.strip_prefix,
)
for ii, patch in enumerate(ctx.attr.patches):
@@ -11,7 +11,7 @@ def _genrule_repository(ctx):
ctx.symlink(patch, patch_input)
patch_result = ctx.execute(["patch", "-p0", "--input", patch_input])
if patch_result.return_code != 0:
- fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr))
+ fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr))
# https://github.com/bazelbuild/bazel/issues/3766
genrule_cmd_file = Label("@envoy//bazel").relative(str(ctx.attr.genrule_cmd_file))
@@ -19,7 +19,9 @@ def _genrule_repository(ctx):
cat_genrule_cmd = ctx.execute(["cat", "_envoy_genrule_cmd.genrule_cmd"])
if cat_genrule_cmd.return_code != 0:
fail("Failed to read genrule command %r: %s" % (
- genrule_cmd_file, cat_genrule_cmd.stderr))
+ genrule_cmd_file,
+ cat_genrule_cmd.stderr,
+ ))
ctx.file("WORKSPACE", "workspace(name=%r)" % (ctx.name,))
ctx.symlink(ctx.attr.build_file, "BUILD.bazel")
@@ -58,10 +60,10 @@ genrule_repository = repository_rule(
)
def _genrule_cc_deps(ctx):
- outs = depset()
- for dep in ctx.attr.deps:
- outs = dep.cc.transitive_headers + dep.cc.libs + outs
- return DefaultInfo(files=outs)
+ outs = depset()
+ for dep in ctx.attr.deps:
+ outs = dep.cc.transitive_headers + dep.cc.libs + outs
+ return DefaultInfo(files = outs)
genrule_cc_deps = rule(
attrs = {
@@ -75,67 +77,67 @@ genrule_cc_deps = rule(
)
def _absolute_bin(path):
- # If the binary path looks like it's relative to the current directory,
- # transform it to be absolute by appending "${PWD}".
- if "/" in path and not path.startswith("/"):
- return '"${PWD}"/%r' % (path,)
- return '%r' % (path,)
+ # If the binary path looks like it's relative to the current directory,
+ # transform it to be absolute by appending "${PWD}".
+ if "/" in path and not path.startswith("/"):
+ return '"${PWD}"/%r' % (path,)
+ return "%r" % (path,)
def _genrule_environment(ctx):
- lines = []
-
- # Bazel uses the same command for C and C++ compilation.
- c_compiler = ctx.var['CC']
-
- # Bare minimum cflags to get included test binaries to link.
- #
- # See //tools:bazel.rc for the full set.
- asan_flags = ["-fsanitize=address,undefined"]
- tsan_flags = ["-fsanitize=thread"]
-
- # Older versions of GCC in Ubuntu, including GCC 5 used in CI images,
- # incorrectly invoke the older `/usr/bin/ld` with gold-specific options when
- # building with sanitizers enabled. Work around this by forcing use of gold
- # in sanitize mode.
- #
- # This is not a great solution because it doesn't detect GCC when Bazel has
- # wrapped it in an intermediate script, but it works well enough to keep CI
- # running.
- #
- # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1
- force_ld_gold = []
- if "gcc" in c_compiler or "g++" in c_compiler:
- force_ld_gold = ["-fuse-ld=gold"]
-
- cc_flags = []
- ld_flags = []
- ld_libs = []
- if ctx.var.get('ENVOY_CONFIG_COVERAGE'):
- ld_libs += ["-lgcov"]
- if ctx.var.get('ENVOY_CONFIG_ASAN'):
- cc_flags += asan_flags
- ld_flags += asan_flags
- ld_flags += force_ld_gold
- if ctx.var.get('ENVOY_CONFIG_TSAN'):
- cc_flags += tsan_flags
- ld_flags += tsan_flags
- ld_flags += force_ld_gold
-
- lines.append("export CFLAGS=%r" % (" ".join(cc_flags),))
- lines.append("export LDFLAGS=%r" % (" ".join(ld_flags),))
- lines.append("export LIBS=%r" % (" ".join(ld_libs),))
- lines.append("export CC=%s" % (_absolute_bin(c_compiler),))
- lines.append("export CXX=%s" % (_absolute_bin(c_compiler),))
-
- # Some Autoconf helper binaries leak, which makes ./configure think the
- # system is unable to do anything. Turn off leak checking during part of
- # the build.
- lines.append("export ASAN_OPTIONS=detect_leaks=0")
-
- lines.append("")
- out = ctx.new_file(ctx.attr.name + ".sh")
- ctx.file_action(out, "\n".join(lines))
- return DefaultInfo(files=depset([out]))
+ lines = []
+
+ # Bazel uses the same command for C and C++ compilation.
+ c_compiler = ctx.var["CC"]
+
+ # Bare minimum cflags to get included test binaries to link.
+ #
+ # See //tools:bazel.rc for the full set.
+ asan_flags = ["-fsanitize=address,undefined"]
+ tsan_flags = ["-fsanitize=thread"]
+
+ # Older versions of GCC in Ubuntu, including GCC 5 used in CI images,
+ # incorrectly invoke the older `/usr/bin/ld` with gold-specific options when
+ # building with sanitizers enabled. Work around this by forcing use of gold
+ # in sanitize mode.
+ #
+ # This is not a great solution because it doesn't detect GCC when Bazel has
+ # wrapped it in an intermediate script, but it works well enough to keep CI
+ # running.
+ #
+ # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1
+ force_ld_gold = []
+ if "gcc" in c_compiler or "g++" in c_compiler:
+ force_ld_gold = ["-fuse-ld=gold"]
+
+ cc_flags = []
+ ld_flags = []
+ ld_libs = []
+ if ctx.var.get("ENVOY_CONFIG_COVERAGE"):
+ ld_libs += ["-lgcov"]
+ if ctx.var.get("ENVOY_CONFIG_ASAN"):
+ cc_flags += asan_flags
+ ld_flags += asan_flags
+ ld_flags += force_ld_gold
+ if ctx.var.get("ENVOY_CONFIG_TSAN"):
+ cc_flags += tsan_flags
+ ld_flags += tsan_flags
+ ld_flags += force_ld_gold
+
+ lines.append("export CFLAGS=%r" % (" ".join(cc_flags),))
+ lines.append("export LDFLAGS=%r" % (" ".join(ld_flags),))
+ lines.append("export LIBS=%r" % (" ".join(ld_libs),))
+ lines.append("export CC=%s" % (_absolute_bin(c_compiler),))
+ lines.append("export CXX=%s" % (_absolute_bin(c_compiler),))
+
+ # Some Autoconf helper binaries leak, which makes ./configure think the
+ # system is unable to do anything. Turn off leak checking during part of
+ # the build.
+ lines.append("export ASAN_OPTIONS=detect_leaks=0")
+
+ lines.append("")
+ out = ctx.new_file(ctx.attr.name + ".sh")
+ ctx.file_action(out, "\n".join(lines))
+ return DefaultInfo(files = depset([out]))
genrule_environment = rule(
implementation = _genrule_environment,
diff --git a/bazel/patched_http_archive.bzl b/bazel/patched_http_archive.bzl
index 87b4be7737345..8a6d54881cdfe 100644
--- a/bazel/patched_http_archive.bzl
+++ b/bazel/patched_http_archive.bzl
@@ -1,9 +1,9 @@
def _patched_http_archive(ctx):
ctx.download_and_extract(
ctx.attr.urls,
- "", # output
+ "", # output
ctx.attr.sha256,
- "", # type
+ "", # type
ctx.attr.strip_prefix,
)
for ii, patch in enumerate(ctx.attr.patches):
@@ -11,7 +11,7 @@ def _patched_http_archive(ctx):
ctx.symlink(patch, patch_input)
patch_result = ctx.execute(["patch", "-p0", "--input", patch_input])
if patch_result.return_code != 0:
- fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr))
+ fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr))
patched_http_archive = repository_rule(
attrs = {
diff --git a/bazel/repositories.bat b/bazel/repositories.bat
new file mode 100644
index 0000000000000..7b66957105932
--- /dev/null
+++ b/bazel/repositories.bat
@@ -0,0 +1,4 @@
+echo "Start"
+@ECHO OFF
+%BAZEL_SH% -c "./repositories.sh %*"
+exit %ERRORLEVEL%
diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl
index b55e5c70fc554..3d231c259ff00 100644
--- a/bazel/repositories.bzl
+++ b/bazel/repositories.bzl
@@ -7,6 +7,12 @@ load(":genrule_repository.bzl", "genrule_repository")
load(":patched_http_archive.bzl", "patched_http_archive")
load(":repository_locations.bzl", "REPOSITORY_LOCATIONS")
load(":target_recipes.bzl", "TARGET_RECIPES")
+load(
+ "@bazel_tools//tools/cpp:windows_cc_configure.bzl",
+ "find_vc_path",
+ "setup_vc_env_vars",
+)
+load("@bazel_tools//tools/cpp:lib_cc_configure.bzl", "get_env_var")
def _repository_impl(name, **kwargs):
# `existing_rule_keys` contains the names of repositories that have already
@@ -25,8 +31,9 @@ def _repository_impl(name, **kwargs):
# user a useful error if they accidentally specify a tag.
if "tag" in location:
fail(
- "Refusing to depend on Git tag %r for external dependency %r: use 'commit' instead."
- % (location["tag"], name))
+ "Refusing to depend on Git tag %r for external dependency %r: use 'commit' instead." %
+ (location["tag"], name),
+ )
if "commit" in location:
# Git repository at given commit ID. Add a BUILD file if requested.
@@ -35,13 +42,15 @@ def _repository_impl(name, **kwargs):
name = name,
remote = location["remote"],
commit = location["commit"],
- **kwargs)
+ **kwargs
+ )
else:
git_repository(
name = name,
remote = location["remote"],
commit = location["commit"],
- **kwargs)
+ **kwargs
+ )
else: # HTTP
# HTTP tarball at a given URL. Add a BUILD file if requested.
if "build_file" in kwargs:
@@ -50,33 +59,54 @@ def _repository_impl(name, **kwargs):
urls = location["urls"],
sha256 = location["sha256"],
strip_prefix = location["strip_prefix"],
- **kwargs)
+ **kwargs
+ )
else:
native.http_archive(
name = name,
urls = location["urls"],
sha256 = location["sha256"],
strip_prefix = location["strip_prefix"],
- **kwargs)
+ **kwargs
+ )
def _build_recipe_repository_impl(ctxt):
# Setup the build directory with links to the relevant files.
ctxt.symlink(Label("//bazel:repositories.sh"), "repositories.sh")
- ctxt.symlink(Label("//ci/build_container:build_and_install_deps.sh"),
- "build_and_install_deps.sh")
+ ctxt.symlink(Label("//bazel:repositories.bat"), "repositories.bat")
+ ctxt.symlink(
+ Label("//ci/build_container:build_and_install_deps.sh"),
+ "build_and_install_deps.sh",
+ )
ctxt.symlink(Label("//ci/build_container:recipe_wrapper.sh"), "recipe_wrapper.sh")
ctxt.symlink(Label("//ci/build_container:Makefile"), "Makefile")
for r in ctxt.attr.recipes:
- ctxt.symlink(Label("//ci/build_container/build_recipes:" + r + ".sh"),
- "build_recipes/" + r + ".sh")
+ ctxt.symlink(
+ Label("//ci/build_container/build_recipes:" + r + ".sh"),
+ "build_recipes/" + r + ".sh",
+ )
ctxt.symlink(Label("//ci/prebuilt:BUILD"), "BUILD")
# Run the build script.
- environment = {}
+ command = []
+ env = {}
+ if ctxt.os.name.upper().startswith("WINDOWS"):
+ vc_path = find_vc_path(ctxt)
+ current_path = get_env_var(ctxt, "PATH", None, False)
+ env = setup_vc_env_vars(ctxt, vc_path)
+ env["PATH"] += (";%s" % current_path)
+ env["CC"] = "cl"
+ env["CXX"] = "cl"
+ env["CXXFLAGS"] = "-DNDEBUG"
+ env["CFLAGS"] = "-DNDEBUG"
+ command = ["./repositories.bat"] + ctxt.attr.recipes
+ else:
+ command = ["./repositories.sh"] + ctxt.attr.recipes
+
print("Fetching external dependencies...")
result = ctxt.execute(
- ["./repositories.sh"] + ctxt.attr.recipes,
- environment = environment,
+ command,
+ environment = env,
quiet = False,
)
print(result.stdout)
@@ -86,6 +116,7 @@ def _build_recipe_repository_impl(ctxt):
print("\033[31;1m\033[48;5;226m External dependency build failed, check above log " +
"for errors and ensure all prerequisites at " +
"https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers are met.")
+
# This error message doesn't appear to the user :( https://github.com/bazelbuild/bazel/issues/3683
fail("External dep build failed")
@@ -97,7 +128,7 @@ def _default_envoy_build_config_impl(ctx):
_default_envoy_build_config = repository_rule(
implementation = _default_envoy_build_config_impl,
attrs = {
- "config": attr.label(default="@envoy//source/extensions:extensions_build_config.bzl"),
+ "config": attr.label(default = "@envoy//source/extensions:extensions_build_config.bzl"),
},
)
@@ -113,12 +144,12 @@ def _default_envoy_api_impl(ctx):
"tools",
]
for d in api_dirs:
- ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d)
+ ctx.symlink(ctx.path(ctx.attr.api).dirname.get_child(d), d)
_default_envoy_api = repository_rule(
implementation = _default_envoy_api_impl,
attrs = {
- "api": attr.label(default="@envoy//api:BUILD"),
+ "api": attr.label(default = "@envoy//api:BUILD"),
},
)
@@ -141,6 +172,22 @@ def _python_deps():
name = "jinja2",
actual = "@com_github_pallets_jinja//:jinja2",
)
+ _repository_impl(
+ name = "com_github_apache_thrift",
+ build_file = "@envoy//bazel/external:apache_thrift.BUILD",
+ )
+ _repository_impl(
+ name = "com_github_twitter_common_lang",
+ build_file = "@envoy//bazel/external:twitter_common_lang.BUILD",
+ )
+ _repository_impl(
+ name = "com_github_twitter_common_rpc",
+ build_file = "@envoy//bazel/external:twitter_common_rpc.BUILD",
+ )
+ _repository_impl(
+ name = "com_github_twitter_common_finagle_thrift",
+ build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD",
+ )
# Bazel native C++ dependencies. For the depedencies that doesn't provide autoconf/automake builds.
def _cc_deps():
@@ -164,8 +211,12 @@ def _envoy_api_deps():
# Treat the data plane API as an external repo, this simplifies exporting the API to
# https://github.com/envoyproxy/data-plane-api.
if "envoy_api" not in native.existing_rules().keys():
- _default_envoy_api(name="envoy_api")
+ _default_envoy_api(name = "envoy_api")
+ native.bind(
+ name = "api_httpbody_protos",
+ actual = "@googleapis//:api_httpbody_protos",
+ )
native.bind(
name = "http_api_protos",
actual = "@googleapis//:http_api_protos",
@@ -187,7 +238,7 @@ def envoy_dependencies(path = "@envoy_deps//", skip_targets = []):
"CXX",
"CFLAGS",
"CXXFLAGS",
- "LD_LIBRARY_PATH"
+ "LD_LIBRARY_PATH",
],
# Don't pretend we're in the sandbox, we do some evil stuff with envoy_dep_cache.
local = True,
@@ -435,32 +486,32 @@ def _com_github_grpc_grpc():
# Rebind some stuff to match what the gRPC Bazel is expecting.
native.bind(
- name = "protobuf_headers",
- actual = "@com_google_protobuf//:protobuf_headers",
+ name = "protobuf_headers",
+ actual = "@com_google_protobuf//:protobuf_headers",
)
native.bind(
- name = "libssl",
- actual = "//external:ssl",
+ name = "libssl",
+ actual = "//external:ssl",
)
native.bind(
- name = "cares",
- actual = "//external:ares",
+ name = "cares",
+ actual = "//external:ares",
)
native.bind(
- name = "grpc",
- actual = "@com_github_grpc_grpc//:grpc++"
+ name = "grpc",
+ actual = "@com_github_grpc_grpc//:grpc++",
)
native.bind(
- name = "grpc_health_proto",
- actual = "@envoy//bazel:grpc_health_proto",
+ name = "grpc_health_proto",
+ actual = "@envoy//bazel:grpc_health_proto",
)
def _com_github_google_jwt_verify():
_repository_impl("com_github_google_jwt_verify")
native.bind(
- name = "jwt_verify_lib",
- actual = "@com_github_google_jwt_verify//:jwt_verify_lib",
+ name = "jwt_verify_lib",
+ actual = "@com_github_google_jwt_verify//:jwt_verify_lib",
)
diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl
index 097e76ad603b6..7a65f4c956f3a 100644
--- a/bazel/repository_locations.bzl
+++ b/bazel/repository_locations.bzl
@@ -1,19 +1,24 @@
REPOSITORY_LOCATIONS = dict(
boringssl = dict(
# Use commits from branch "chromium-stable-with-bazel"
- commit = "2a52ce799382c87cd3119f3b44fbbebf97061ab6", # chromium-67.0.3396.62
+ commit = "372daf7042ffe3da1335743e7c93d78f1399aba7", # chromium-68.0.3440.75
remote = "https://github.com/google/boringssl",
),
com_google_absl = dict(
commit = "92020a042c0cd46979db9f6f0cb32783dc07765e", # 2018-06-08
remote = "https://github.com/abseil/abseil-cpp",
),
+ com_github_apache_thrift = dict(
+ sha256 = "7d59ac4fdcb2c58037ebd4a9da5f9a49e3e034bf75b3f26d9fe48ba3d8806e6b",
+ urls = ["https://files.pythonhosted.org/packages/c6/b4/510617906f8e0c5660e7d96fbc5585113f83ad547a3989b80297ac72a74c/thrift-0.11.0.tar.gz"], # 0.11.0
+ strip_prefix = "thrift-0.11.0",
+ ),
com_github_bombela_backward = dict(
commit = "44ae9609e860e3428cd057f7052e505b4819eb84", # 2018-02-06
remote = "https://github.com/bombela/backward-cpp",
),
com_github_circonus_labs_libcircllhist = dict(
- commit = "476687ac9cc636fc92ac3070246d757ae6854547", # 2018-05-08
+ commit = "050da53a44dede7bda136b93a9aeef47bd91fa12", # 2018-07-02
remote = "https://github.com/circonus-labs/libcircllhist",
),
com_github_cyan4973_xxhash = dict(
@@ -43,16 +48,16 @@ REPOSITORY_LOCATIONS = dict(
remote = "https://github.com/google/libprotobuf-mutator",
),
com_github_grpc_grpc = dict(
- commit = "bec3b5ada2c5e5d782dff0b7b5018df646b65cb0", # v1.12.0
+ commit = "bec3b5ada2c5e5d782dff0b7b5018df646b65cb0", # v1.12.0
remote = "https://github.com/grpc/grpc.git",
),
io_opentracing_cpp = dict(
- commit = "3b36b084a4d7fffc196eac83203cf24dfb8696b3", # v1.4.2
+ commit = "3b36b084a4d7fffc196eac83203cf24dfb8696b3", # v1.4.2
remote = "https://github.com/opentracing/opentracing-cpp",
),
com_lightstep_tracer_cpp = dict(
commit = "ae6a6bba65f8c4d438a6a3ac855751ca8f52e1dc",
- remote = "https://github.com/lightstep/lightstep-tracer-cpp", # v0.7.1
+ remote = "https://github.com/lightstep/lightstep-tracer-cpp", # v0.7.1
),
lightstep_vendored_googleapis = dict(
commit = "d6f78d948c53f3b400bb46996eb3084359914f9b",
@@ -63,9 +68,11 @@ REPOSITORY_LOCATIONS = dict(
remote = "https://github.com/google/jwt_verify_lib",
),
com_github_nodejs_http_parser = dict(
- # 2018-05-30 snapshot to pick up a performance fix, nodejs/http-parser PR 422
+ # 2018-07-20 snapshot to pick up:
+ # A performance fix, nodejs/http-parser PR 422.
+ # A bug fix, nodejs/http-parser PR 432.
# TODO(brian-pane): Upgrade to the next http-parser release once it's available
- commit = "cf69c8eda9fe79e4682598a7b3d39338dea319a3",
+ commit = "77310eeb839c4251c07184a5db8885a572a08352",
remote = "https://github.com/nodejs/http-parser",
),
com_github_pallets_jinja = dict(
@@ -80,6 +87,21 @@ REPOSITORY_LOCATIONS = dict(
commit = "f54b0e47a08782a6131cc3d60f94d038fa6e0a51", # v1.1.0
remote = "https://github.com/tencent/rapidjson",
),
+ com_github_twitter_common_lang = dict(
+ sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1",
+ urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-0.3.9.tar.gz"], # 0.3.9
+ strip_prefix = "twitter.common.lang-0.3.9/src",
+ ),
+ com_github_twitter_common_rpc = dict(
+ sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514",
+ urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-0.3.9.tar.gz"], # 0.3.9
+ strip_prefix = "twitter.common.rpc-0.3.9/src",
+ ),
+ com_github_twitter_common_finagle_thrift = dict(
+ sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a",
+ urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-0.3.9.tar.gz"], # 0.3.9
+ strip_prefix = "twitter.common.finagle-thrift-0.3.9/src",
+ ),
com_google_googletest = dict(
commit = "43863938377a9ea1399c0596269e0890b5c5515a",
remote = "https://github.com/google/googletest",
diff --git a/bazel/target_recipes.bzl b/bazel/target_recipes.bzl
index 002780148a4e2..6260336887927 100644
--- a/bazel/target_recipes.bzl
+++ b/bazel/target_recipes.bzl
@@ -5,7 +5,6 @@ TARGET_RECIPES = {
"ares": "cares",
"benchmark": "benchmark",
"event": "libevent",
- "event_pthreads": "libevent",
"tcmalloc_and_profiler": "gperftools",
"luajit": "luajit",
"nghttp2": "nghttp2",
diff --git a/ci/build_container/build_container_centos.sh b/ci/build_container/build_container_centos.sh
index f26971230c3df..d416fddea6f7c 100755
--- a/ci/build_container/build_container_centos.sh
+++ b/ci/build_container/build_container_centos.sh
@@ -9,7 +9,7 @@ curl -L -o /etc/yum.repos.d/alonid-llvm-5.0.0-epel-7.repo \
# dependencies for bazel and build_recipes
yum install -y java-1.8.0-openjdk-devel unzip which openssl rpm-build \
- cmake3 devtoolset-4-gcc-c++ git golang libtool make patch rsync wget \
+ cmake3 devtoolset-4-gcc-c++ git golang libtool make ninja-build patch rsync wget \
clang-5.0.0 devtoolset-4-libatomic-devel llvm-5.0.0 python-virtualenv bc
yum clean all
diff --git a/ci/build_container/build_container_ubuntu.sh b/ci/build_container/build_container_ubuntu.sh
index ff37f0fe1e912..e107bd1d2deb2 100755
--- a/ci/build_container/build_container_ubuntu.sh
+++ b/ci/build_container/build_container_ubuntu.sh
@@ -6,7 +6,7 @@ set -e
apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get install -y wget software-properties-common make cmake git python python-pip \
- bc libtool automake zip time golang g++ gdb strace wireshark tshark
+ bc libtool ninja-build automake zip time golang g++ gdb strace wireshark tshark
# clang head (currently 5.0)
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -
apt-add-repository "deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main"
@@ -23,5 +23,5 @@ rm -rf /var/lib/apt/lists/*
# virtualenv
pip install virtualenv
-EXPECTED_CXX_VERSION="g++ (Ubuntu 5.4.0-6ubuntu1~16.04.9) 5.4.0 20160609" ./build_container_common.sh
+EXPECTED_CXX_VERSION="g++ (Ubuntu 5.4.0-6ubuntu1~16.04.10) 5.4.0 20160609" ./build_container_common.sh
diff --git a/ci/build_container/build_recipes/benchmark.sh b/ci/build_container/build_recipes/benchmark.sh
index 5e8f2f41ab2c5..6817ea42a291e 100644
--- a/ci/build_container/build_recipes/benchmark.sh
+++ b/ci/build_container/build_recipes/benchmark.sh
@@ -8,11 +8,17 @@ git clone https://github.com/google/benchmark.git
mkdir build
cd build
-cmake -G "Unix Makefiles" ../benchmark \
+cmake -G "Ninja" ../benchmark \
-DCMAKE_BUILD_TYPE=RELEASE \
-DBENCHMARK_ENABLE_GTEST_TESTS=OFF
-make
-cp src/libbenchmark.a "$THIRDPARTY_BUILD"/lib
+ninja
+
+benchmark_lib="libbenchmark.a"
+if [[ "${OS}" == "Windows_NT" ]]; then
+ benchmark_lib="benchmark.lib"
+fi
+
+cp "src/$benchmark_lib" "$THIRDPARTY_BUILD"/lib
cd ../benchmark
INCLUDE_DIR="$THIRDPARTY_BUILD/include/testing/base/public"
diff --git a/ci/build_container/build_recipes/cares.sh b/ci/build_container/build_recipes/cares.sh
index b3797f432e99d..d4191ae7fadd8 100755
--- a/ci/build_container/build_recipes/cares.sh
+++ b/ci/build_container/build_recipes/cares.sh
@@ -10,10 +10,31 @@ VERSION=cares-1_14_0
CPPFLAGS="$(for f in $CXXFLAGS; do if [[ $f =~ -D.* ]]; then echo $f; fi; done | tr '\n' ' ')"
CFLAGS="$(for f in $CXXFLAGS; do if [[ ! $f =~ -D.* ]]; then echo $f; fi; done | tr '\n' ' ')"
-wget -O c-ares-"$VERSION".tar.gz https://github.com/c-ares/c-ares/archive/"$VERSION".tar.gz
+curl https://github.com/c-ares/c-ares/archive/"$VERSION".tar.gz -sLo c-ares-"$VERSION".tar.gz
tar xf c-ares-"$VERSION".tar.gz
cd c-ares-"$VERSION"
-./buildconf
-./configure --prefix="$THIRDPARTY_BUILD" --enable-shared=no --enable-lib-only \
- --enable-debug --enable-optimize
-make V=1 install
+
+mkdir build
+cd build
+
+build_type=RelWithDebInfo
+if [[ "${OS}" == "Windows_NT" ]]; then
+ # On Windows, every object file in the final executable needs to be compiled to use the
+ # same version of the C Runtime Library. If Envoy is built with '-c dbg', then it will
+ # use the Debug C Runtime Library. Setting CMAKE_BUILD_TYPE to Debug will cause c-ares
+ # to use the debug version as well
+ # TODO: when '-c fastbuild' and '-c opt' work for Windows builds, set this appropriately
+ build_type=Debug
+fi
+
+cmake -G "Ninja" -DCMAKE_INSTALL_PREFIX="$THIRDPARTY_BUILD" \
+ -DCARES_SHARED=no \
+ -DCARES_STATIC=on \
+ -DCMAKE_BUILD_TYPE="$build_type" \
+ ..
+ninja
+ninja install
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cp "CMakeFiles/c-ares.dir/c-ares.pdb" "$THIRDPARTY_BUILD/lib/c-ares.pdb"
+fi
diff --git a/ci/build_container/build_recipes/gperftools.sh b/ci/build_container/build_recipes/gperftools.sh
index 7c0c72d9c6e6e..de18e91a526d3 100755
--- a/ci/build_container/build_recipes/gperftools.sh
+++ b/ci/build_container/build_recipes/gperftools.sh
@@ -2,9 +2,13 @@
set -e
+if [[ "${OS}" == "Windows_NT" ]]; then
+ exit 0
+fi
+
VERSION=2.7
-wget -O gperftools-"$VERSION".tar.gz https://github.com/gperftools/gperftools/releases/download/gperftools-"$VERSION"/gperftools-"$VERSION".tar.gz
+curl https://github.com/gperftools/gperftools/releases/download/gperftools-"$VERSION"/gperftools-"$VERSION".tar.gz -sLo gperftools-"$VERSION".tar.gz
tar xf gperftools-"$VERSION".tar.gz
cd gperftools-"$VERSION"
diff --git a/ci/build_container/build_recipes/libevent.sh b/ci/build_container/build_recipes/libevent.sh
index c88d5bb3a2ed3..0bd783cf4bdd0 100755
--- a/ci/build_container/build_recipes/libevent.sh
+++ b/ci/build_container/build_recipes/libevent.sh
@@ -4,8 +4,33 @@ set -e
VERSION=2.1.8-stable
-wget -O libevent-"$VERSION".tar.gz https://github.com/libevent/libevent/releases/download/release-"$VERSION"/libevent-"$VERSION".tar.gz
-tar xf libevent-"$VERSION".tar.gz
-cd libevent-"$VERSION"
-./configure --prefix="$THIRDPARTY_BUILD" --enable-shared=no --disable-libevent-regress --disable-openssl
-make V=1 install
+curl https://github.com/libevent/libevent/archive/release-"$VERSION".tar.gz -sLo libevent-release-"$VERSION".tar.gz
+tar xf libevent-release-"$VERSION".tar.gz
+cd libevent-release-"$VERSION"
+
+mkdir build
+cd build
+
+# libevent defaults CMAKE_BUILD_TYPE to Release
+build_type=Release
+if [[ "${OS}" == "Windows_NT" ]]; then
+ # On Windows, every object file in the final executable needs to be compiled to use the
+ # same version of the C Runtime Library. If Envoy is built with '-c dbg', then it will
+ # use the Debug C Runtime Library. Setting CMAKE_BUILD_TYPE to Debug will cause libevent
+ # to use the debug version as well
+ # TODO: when '-c fastbuild' and '-c opt' work for Windows builds, set this appropriately
+ build_type=Debug
+fi
+
+cmake -G "Ninja" \
+ -DCMAKE_INSTALL_PREFIX="$THIRDPARTY_BUILD" \
+ -DEVENT__DISABLE_OPENSSL:BOOL=on \
+ -DEVENT__DISABLE_REGRESS:BOOL=on \
+ -DCMAKE_BUILD_TYPE="$build_type" \
+ ..
+ninja
+ninja install
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cp "CMakeFiles/event.dir/event.pdb" "$THIRDPARTY_BUILD/lib/event.pdb"
+fi
diff --git a/ci/build_container/build_recipes/luajit.sh b/ci/build_container/build_recipes/luajit.sh
index 4deba13a51d46..3b02133d34dc7 100644
--- a/ci/build_container/build_recipes/luajit.sh
+++ b/ci/build_container/build_recipes/luajit.sh
@@ -4,7 +4,7 @@ set -e
VERSION=2.0.5
-wget -O LuaJIT-"$VERSION".tar.gz https://github.com/LuaJIT/LuaJIT/archive/v"$VERSION".tar.gz
+curl https://github.com/LuaJIT/LuaJIT/archive/v"$VERSION".tar.gz -sLo LuaJIT-"$VERSION".tar.gz
tar xf LuaJIT-"$VERSION".tar.gz
cd LuaJIT-"$VERSION"
@@ -46,15 +46,26 @@ index f7f81a4..e698517 100644
# Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter.
#XCFLAGS+= -DLUAJIT_DISABLE_JIT
@@ -564,7 +564,7 @@ endif
-
+
Q= @
E= @echo
-#Q=
+Q=
#E= @:
-
+
##############################################################################
EOF
-patch -p1 < ../luajit_make.diff
-DEFAULT_CC=${CC} TARGET_CFLAGS=${CFLAGS} TARGET_LDFLAGS=${CFLAGS} CFLAGS="" make V=1 PREFIX="$THIRDPARTY_BUILD" install
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cd src
+ ./msvcbuild.bat debug
+
+ mkdir -p "$THIRDPARTY_BUILD/include/luajit-2.0"
+ cp *.h* "$THIRDPARTY_BUILD/include/luajit-2.0"
+ cp luajit.lib "$THIRDPARTY_BUILD/lib"
+ cp *.pdb "$THIRDPARTY_BUILD/lib"
+else
+ patch -p1 < ../luajit_make.diff
+
+ DEFAULT_CC=${CC} TARGET_CFLAGS=${CFLAGS} TARGET_LDFLAGS=${CFLAGS} CFLAGS="" make V=1 PREFIX="$THIRDPARTY_BUILD" install
+fi
diff --git a/ci/build_container/build_recipes/nghttp2.sh b/ci/build_container/build_recipes/nghttp2.sh
index 1b380f3856d2e..cea6ab963292a 100755
--- a/ci/build_container/build_recipes/nghttp2.sh
+++ b/ci/build_container/build_recipes/nghttp2.sh
@@ -2,10 +2,47 @@
set -e
-VERSION=1.32.0
+# Use master branch, which contains a fix for the spurious limit of 100 concurrent streams:
+# https://github.com/nghttp2/nghttp2/commit/2ba1389993729fcb6ee5794ac512f2b67b29952e
+# TODO(PiotrSikora): switch back to releases once v1.33.0 is out.
+VERSION=e5b3f9addd49bca27e2f99c5c65a564eb5c0cf6d # 2018-06-09
-wget -O nghttp2-"$VERSION".tar.gz https://github.com/nghttp2/nghttp2/releases/download/v"$VERSION"/nghttp2-"$VERSION".tar.gz
+curl https://github.com/nghttp2/nghttp2/archive/"$VERSION".tar.gz -sLo nghttp2-"$VERSION".tar.gz
tar xf nghttp2-"$VERSION".tar.gz
cd nghttp2-"$VERSION"
-./configure --prefix="$THIRDPARTY_BUILD" --enable-shared=no --enable-lib-only
-make V=1 install
+
+# Allow nghttp2 to build as static lib on Windows
+# TODO: remove once https://github.com/nghttp2/nghttp2/pull/1198 is merged
+cat > nghttp2_cmakelists.diff << 'EOF'
+diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
+index 17e422b2..e58070f5 100644
+--- a/lib/CMakeLists.txt
++++ b/lib/CMakeLists.txt
+@@ -56,6 +56,7 @@ if(HAVE_CUNIT OR ENABLE_STATIC_LIB)
+ COMPILE_FLAGS "${WARNCFLAGS}"
+ VERSION ${LT_VERSION} SOVERSION ${LT_SOVERSION}
+ ARCHIVE_OUTPUT_NAME nghttp2
++ ARCHIVE_OUTPUT_DIRECTORY static
+ )
+ target_compile_definitions(nghttp2_static PUBLIC "-DNGHTTP2_STATICLIB")
+ if(ENABLE_STATIC_LIB)
+EOF
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ git apply nghttp2_cmakelists.diff
+fi
+
+mkdir build
+cd build
+
+cmake -G "Ninja" -DCMAKE_INSTALL_PREFIX="$THIRDPARTY_BUILD" \
+ -DCMAKE_INSTALL_LIBDIR="$THIRDPARTY_BUILD/lib" \
+ -DENABLE_STATIC_LIB=on \
+ -DENABLE_LIB_ONLY=on \
+ ..
+ninja
+ninja install
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cp "lib/CMakeFiles/nghttp2_static.dir/nghttp2_static.pdb" "$THIRDPARTY_BUILD/lib/nghttp2_static.pdb"
+fi
diff --git a/ci/build_container/build_recipes/yaml-cpp.sh b/ci/build_container/build_recipes/yaml-cpp.sh
index db63dcb3a11ef..2c565cfd1bf6c 100755
--- a/ci/build_container/build_recipes/yaml-cpp.sh
+++ b/ci/build_container/build_recipes/yaml-cpp.sh
@@ -4,11 +4,31 @@ set -e
VERSION=0.6.2
-wget -O yaml-cpp-"$VERSION".tar.gz https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-"$VERSION".tar.gz
+curl https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-"$VERSION".tar.gz -sLo yaml-cpp-"$VERSION".tar.gz
tar xf yaml-cpp-"$VERSION".tar.gz
cd yaml-cpp-yaml-cpp-"$VERSION"
-cmake -DCMAKE_INSTALL_PREFIX:PATH="$THIRDPARTY_BUILD" \
+
+mkdir build
+cd build
+
+build_type=RelWithDebInfo
+if [[ "${OS}" == "Windows_NT" ]]; then
+ # On Windows, every object file in the final executable needs to be compiled to use the
+ # same version of the C Runtime Library. If Envoy is built with '-c dbg', then it will
+ # use the Debug C Runtime Library. Setting CMAKE_BUILD_TYPE to Debug will cause yaml-cpp
+ # to use the debug version as well
+ # TODO: when '-c fastbuild' and '-c opt' work for Windows builds, set this appropriately
+ build_type=Debug
+fi
+
+cmake -G "Ninja" -DCMAKE_INSTALL_PREFIX:PATH="$THIRDPARTY_BUILD" \
-DCMAKE_CXX_FLAGS:STRING="${CXXFLAGS} ${CPPFLAGS}" \
-DCMAKE_C_FLAGS:STRING="${CFLAGS} ${CPPFLAGS}" \
- -DCMAKE_BUILD_TYPE=RelWithDebInfo .
-make VERBOSE=1 install
+ -DYAML_CPP_BUILD_TESTS=off \
+ -DCMAKE_BUILD_TYPE="$build_type" \
+ ..
+ninja install
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cp "CMakeFiles/yaml-cpp.dir/yaml-cpp.pdb" "$THIRDPARTY_BUILD/lib/yaml-cpp.pdb"
+fi
diff --git a/ci/build_container/build_recipes/zlib.sh b/ci/build_container/build_recipes/zlib.sh
index fd22ea67f0af5..62997062f1491 100644
--- a/ci/build_container/build_recipes/zlib.sh
+++ b/ci/build_container/build_recipes/zlib.sh
@@ -4,8 +4,15 @@ set -e
VERSION=1.2.11
-wget -O zlib-"$VERSION".tar.gz https://github.com/madler/zlib/archive/v"$VERSION".tar.gz
+curl https://github.com/madler/zlib/archive/v"$VERSION".tar.gz -sLo zlib-"$VERSION".tar.gz
tar xf zlib-"$VERSION".tar.gz
cd zlib-"$VERSION"
-./configure --prefix="$THIRDPARTY_BUILD"
-make V=1 install
+mkdir build
+cd build
+cmake -G "Ninja" -DCMAKE_INSTALL_PREFIX:PATH="$THIRDPARTY_BUILD" ..
+ninja
+ninja install
+
+if [[ "${OS}" == "Windows_NT" ]]; then
+ cp "CMakeFiles/zlibstatic.dir/zlibstatic.pdb" "$THIRDPARTY_BUILD/lib/zlibstatic.pdb"
+fi
diff --git a/ci/build_setup.ps1 b/ci/build_setup.ps1
new file mode 100755
index 0000000000000..12a3aeff987f5
--- /dev/null
+++ b/ci/build_setup.ps1
@@ -0,0 +1,22 @@
+$ErrorActionPreference = "Stop";
+trap { $host.SetShouldExit(1) }
+
+if ("$env:NUM_CPUS" -eq "") {
+ $env:NUM_CPUS = (Get-WmiObject -class Win32_computersystem).NumberOfLogicalProcessors
+}
+
+if ("$env:ENVOY_BAZEL_ROOT" -eq "") {
+ Write-Host "ENVOY_BAZEL_ROOT must be set!"
+ throw
+}
+
+mkdir -force "$env:ENVOY_BAZEL_ROOT" > $nul
+
+$env:ENVOY_SRCDIR = [System.IO.Path]::GetFullPath("$PSScriptRoot\..")
+
+echo "ENVOY_BAZEL_ROOT: $env:ENVOY_BAZEL_ROOT"
+echo "ENVOY_SRCDIR: $env:ENVOY_SRCDIR"
+
+$env:BAZEL_BASE_OPTIONS="--nomaster_bazelrc --output_base=$env:ENVOY_BAZEL_ROOT --bazelrc=$env:ENVOY_SRCDIR\windows\tools\bazel.rc"
+$env:BAZEL_BUILD_OPTIONS="--strategy=Genrule=standalone --spawn_strategy=standalone --verbose_failures --jobs=$env:NUM_CPUS --show_task_finish $env:BAZEL_BUILD_EXTRA_OPTIONS"
+$env:BAZEL_TEST_OPTIONS="$env:BAZEL_BUILD_OPTIONS --cache_test_results=no --test_output=all $env:BAZEL_EXTRA_TEST_OPTIONS"
diff --git a/ci/build_setup.sh b/ci/build_setup.sh
index 6264b66929cce..dba93c0dd78d3 100755
--- a/ci/build_setup.sh
+++ b/ci/build_setup.sh
@@ -87,7 +87,7 @@ if [ "$1" != "-nofetch" ]; then
fi
# This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to.
- (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 4b6c55b726eda8a1f99e6f4ca1a87f6ce670604f)
+ (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f 3e5b73305b961526ffcee7584251692a9a3ce4b3)
cp -f "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE
fi
diff --git a/ci/do_ci.ps1 b/ci/do_ci.ps1
new file mode 100755
index 0000000000000..fa0aa691c1a7d
--- /dev/null
+++ b/ci/do_ci.ps1
@@ -0,0 +1,20 @@
+$ErrorActionPreference = "Stop";
+trap { $host.SetShouldExit(1) }
+
+. "$PSScriptRoot\build_setup.ps1"
+Write-Host "building using $env:NUM_CPUS CPUs"
+
+function bazel_debug_binary_build() {
+ echo "Building..."
+ pushd "$env:ENVOY_SRCDIR"
+ bazel $env:BAZEL_BASE_OPTIONS.Split(" ") build $env:BAZEL_BUILD_OPTIONS.Split(" ") -c dbg "//source/exe:envoy-static"
+ $exit = $LASTEXITCODE
+ if ($exit -ne 0) {
+ popd
+ exit $exit
+ }
+ popd
+}
+
+echo "bazel debug build..."
+bazel_debug_binary_build
diff --git a/ci/mac_ci_setup.sh b/ci/mac_ci_setup.sh
index e44bd7d5430f1..decdfd75f4d97 100755
--- a/ci/mac_ci_setup.sh
+++ b/ci/mac_ci_setup.sh
@@ -21,7 +21,7 @@ if ! brew update; then
exit 1
fi
-DEPS="automake bazel cmake coreutils go libtool wget"
+DEPS="automake bazel cmake coreutils go libtool wget ninja"
for DEP in ${DEPS}
do
is_installed "${DEP}" || install "${DEP}"
diff --git a/ci/prebuilt/BUILD b/ci/prebuilt/BUILD
index 691644568905c..8997736ea30ae 100644
--- a/ci/prebuilt/BUILD
+++ b/ci/prebuilt/BUILD
@@ -2,36 +2,47 @@ licenses(["notice"]) # Apache 2
package(default_visibility = ["//visibility:public"])
+config_setting(
+ name = "windows_x86_64",
+ values = {"cpu": "x64_windows"},
+)
+
cc_library(
name = "ares",
- srcs = ["thirdparty_build/lib/libcares.a"],
+ srcs = select({
+ ":windows_x86_64": ["thirdparty_build/lib/cares.lib"],
+ "//conditions:default": ["thirdparty_build/lib/libcares.a"],
+ }),
hdrs = glob(["thirdparty_build/include/ares*.h"]),
includes = ["thirdparty_build/include"],
)
cc_library(
name = "benchmark",
- srcs = ["thirdparty_build/lib/libbenchmark.a"],
+ srcs = select({
+ ":windows_x86_64": ["thirdparty_build/lib/benchmark.lib"],
+ "//conditions:default": ["thirdparty_build/lib/libbenchmark.a"],
+ }),
hdrs = ["thirdparty_build/include/testing/base/public/benchmark.h"],
includes = ["thirdparty_build/include"],
)
cc_library(
name = "event",
- srcs = ["thirdparty_build/lib/libevent.a"],
+ srcs = select({
+ ":windows_x86_64": ["thirdparty_build/lib/event.lib"],
+ "//conditions:default": ["thirdparty_build/lib/libevent.a"],
+ }),
hdrs = glob(["thirdparty_build/include/event2/**/*.h"]),
includes = ["thirdparty_build/include"],
)
-cc_library(
- name = "event_pthreads",
- srcs = ["thirdparty_build/lib/libevent_pthreads.a"],
- deps = [":event"],
-)
-
cc_library(
name = "luajit",
- srcs = ["thirdparty_build/lib/libluajit-5.1.a"],
+ srcs = select({
+ ":windows_x86_64": ["thirdparty_build/lib/luajit.lib"],
+ "//conditions:default": ["thirdparty_build/lib/libluajit-5.1.a"],
+ }),
hdrs = glob(["thirdparty_build/include/luajit-2.0/*"]),
includes = ["thirdparty_build/include"],
# TODO(mattklein123): We should strip luajit-2.0 here for consumers. However, if we do that
@@ -40,7 +51,10 @@ cc_library(
cc_library(
name = "nghttp2",
- srcs = ["thirdparty_build/lib/libnghttp2.a"],
+ srcs = select({
+ ":windows_x86_64": ["thirdparty_build/lib/nghttp2.lib"],
+ "//conditions:default": ["thirdparty_build/lib/libnghttp2.a"],
+ }),
hdrs = glob(["thirdparty_build/include/nghttp2/**/*.h"]),
includes = ["thirdparty_build/include"],
)
@@ -54,14 +68,20 @@ cc_library(
cc_library(
name = "yaml_cpp",
- srcs = ["thirdparty_build/lib/libyaml-cpp.a"],
+ srcs = select({
+ ":windows_x86_64": glob(["thirdparty_build/lib/libyaml-cpp*.lib"]),
+ "//conditions:default": ["thirdparty_build/lib/libyaml-cpp.a"],
+ }),
hdrs = glob(["thirdparty_build/include/yaml-cpp/**/*.h"]),
includes = ["thirdparty_build/include"],
)
cc_library(
name = "zlib",
- srcs = ["thirdparty_build/lib/libz.a"],
+ srcs = select({
+ ":windows_x86_64": glob(["thirdparty_build/lib/zlibstaticd.lib"]),
+ "//conditions:default": ["thirdparty_build/lib/libz.a"],
+ }),
hdrs = [
"thirdparty_build/include/zconf.h",
"thirdparty_build/include/zlib.h",
diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh
index d52172671f73b..3fd04a01e283e 100755
--- a/ci/run_envoy_docker.sh
+++ b/ci/run_envoy_docker.sh
@@ -18,7 +18,7 @@ USER_GROUP=root
mkdir -p "${ENVOY_DOCKER_BUILD_DIR}"
# Since we specify an explicit hash, docker-run will pull from the remote repo if missing.
-docker run --rm -t -i -e http_proxy=${http_proxy} -e https_proxy=${https_proxy} \
+docker run --rm -t -i -e HTTP_PROXY=${http_proxy} -e HTTPS_PROXY=${https_proxy} \
-u "${USER}":"${USER_GROUP}" -v "${ENVOY_DOCKER_BUILD_DIR}":/build \
-v "$PWD":/source -e NUM_CPUS --cap-add SYS_PTRACE "${IMAGE_NAME}":"${IMAGE_ID}" \
/bin/bash -lc "groupadd --gid $(id -g) -f envoygroup && useradd -o --uid $(id -u) --gid $(id -g) \
diff --git a/configs/configgen.sh b/configs/configgen.sh
index 703d09b2c5494..ff8b006da31e6 100755
--- a/configs/configgen.sh
+++ b/configs/configgen.sh
@@ -22,4 +22,4 @@ for FILE in $*; do
done
# tar is having issues with -C for some reason so just cd into OUT_DIR.
-(cd "$OUT_DIR"; tar -cvf example_configs.tar *.json *.yaml certs/*.pem)
+(cd "$OUT_DIR"; tar -hcvf example_configs.tar *.json *.yaml certs/*.pem)
diff --git a/docs/build.sh b/docs/build.sh
index a93ecea6caaf8..69ee02ce3f6e6 100755
--- a/docs/build.sh
+++ b/docs/build.sh
@@ -49,7 +49,9 @@ bazel --batch build ${BAZEL_BUILD_OPTIONS} @envoy_api//docs:protos --aspects \
# These are the protos we want to put in docs, this list will grow.
# TODO(htuch): Factor this out of this script.
PROTO_RST="
+ /envoy/admin/v2alpha/clusters/envoy/admin/v2alpha/clusters.proto.rst
/envoy/admin/v2alpha/config_dump/envoy/admin/v2alpha/config_dump.proto.rst
+ /envoy/admin/v2alpha/clusters/envoy/admin/v2alpha/metrics.proto.rst
/envoy/api/v2/core/address/envoy/api/v2/core/address.proto.rst
/envoy/api/v2/core/base/envoy/api/v2/core/base.proto.rst
/envoy/api/v2/core/http_uri/envoy/api/v2/core/http_uri.proto.rst
@@ -102,12 +104,17 @@ PROTO_RST="
/envoy/config/rbac/v2alpha/rbac/envoy/config/rbac/v2alpha/rbac.proto.rst
/envoy/config/transport_socket/capture/v2alpha/capture/envoy/config/transport_socket/capture/v2alpha/capture.proto.rst
/envoy/data/accesslog/v2/accesslog/envoy/data/accesslog/v2/accesslog.proto.rst
+ /envoy/data/core/v2alpha/health_check_event/envoy/data/core/v2alpha/health_check_event.proto.rst
/envoy/data/tap/v2alpha/capture/envoy/data/tap/v2alpha/capture.proto.rst
/envoy/service/accesslog/v2/als/envoy/service/accesslog/v2/als.proto.rst
/envoy/service/auth/v2alpha/external_auth/envoy/service/auth/v2alpha/attribute_context.proto.rst
/envoy/service/auth/v2alpha/external_auth/envoy/service/auth/v2alpha/external_auth.proto.rst
+ /envoy/type/http_status/envoy/type/http_status.proto.rst
/envoy/type/percent/envoy/type/percent.proto.rst
/envoy/type/range/envoy/type/range.proto.rst
+ /envoy/type/matcher/metadata/envoy/type/matcher/metadata.proto.rst
+ /envoy/type/matcher/number/envoy/type/matcher/number.proto.rst
+ /envoy/type/matcher/string/envoy/type/matcher/string.proto.rst
"
# Dump all the generated RST so they can be added to PROTO_RST easily.
diff --git a/docs/root/api-v1/route_config/route.rst b/docs/root/api-v1/route_config/route.rst
index 7b320b90ac77b..5f11b1708079a 100644
--- a/docs/root/api-v1/route_config/route.rst
+++ b/docs/root/api-v1/route_config/route.rst
@@ -487,7 +487,7 @@ string map. Nested objects are not supported.
.. _config_http_conn_man_route_table_cors:
-Cors
+CORS
--------
Settings on a route take precedence over settings on the virtual host.
diff --git a/docs/root/api-v1/route_config/vhost.rst b/docs/root/api-v1/route_config/vhost.rst
index 2d4662124101e..a677b79c53499 100644
--- a/docs/root/api-v1/route_config/vhost.rst
+++ b/docs/root/api-v1/route_config/vhost.rst
@@ -15,6 +15,7 @@ upstream cluster to route to or whether to perform a redirect.
"name": "...",
"domains": [],
"routes": [],
+ "cors": {},
"require_ssl": "...",
"virtual_clusters": [],
"rate_limits": [],
diff --git a/docs/root/api-v2/admin/admin.rst b/docs/root/api-v2/admin/admin.rst
index 455ba12199f37..db0081b902492 100644
--- a/docs/root/api-v2/admin/admin.rst
+++ b/docs/root/api-v2/admin/admin.rst
@@ -6,3 +6,5 @@ Admin
:maxdepth: 2
../admin/v2alpha/config_dump.proto
+ ../admin/v2alpha/clusters.proto
+ ../admin/v2alpha/metrics.proto
diff --git a/docs/root/api-v2/data/core/core.rst b/docs/root/api-v2/data/core/core.rst
new file mode 100644
index 0000000000000..f9d7e77bf4d71
--- /dev/null
+++ b/docs/root/api-v2/data/core/core.rst
@@ -0,0 +1,8 @@
+Core data
+=========
+
+.. toctree::
+ :glob:
+ :maxdepth: 2
+
+ v2alpha/health_check_event.proto
diff --git a/docs/root/api-v2/data/data.rst b/docs/root/api-v2/data/data.rst
index e97e93bf879fc..fd7c5877e939f 100644
--- a/docs/root/api-v2/data/data.rst
+++ b/docs/root/api-v2/data/data.rst
@@ -6,4 +6,5 @@ Envoy data
:maxdepth: 2
accesslog/accesslog
+ core/core
tap/tap
diff --git a/docs/root/api-v2/types/types.rst b/docs/root/api-v2/types/types.rst
index 116d6c3cb519c..8b5750f9993f6 100644
--- a/docs/root/api-v2/types/types.rst
+++ b/docs/root/api-v2/types/types.rst
@@ -5,5 +5,9 @@ Types
:glob:
:maxdepth: 2
+ ../type/http_status.proto
../type/percent.proto
../type/range.proto
+ ../type/matcher/metadata.proto
+ ../type/matcher/number.proto
+ ../type/matcher/string.proto
diff --git a/docs/root/configuration/access_log.rst b/docs/root/configuration/access_log.rst
index 88946fa75f669..94392f404c69f 100644
--- a/docs/root/configuration/access_log.rst
+++ b/docs/root/configuration/access_log.rst
@@ -102,6 +102,16 @@ The following command operators are supported:
TCP
Total duration in milliseconds of the downstream connection.
+%RESPONSE_DURATION%
+ HTTP
+ Total duration in milliseconds of the request from the start time to the first byte read from the
+ upstream host.
+
+ TCP
+ Not implemented ("-").
+
+.. _config_access_log_format_response_flags:
+
%RESPONSE_FLAGS%
Additional details about the response or connection, if any. For TCP connections, the response codes mentioned in
the descriptions do not apply. Possible values are:
@@ -121,6 +131,14 @@ The following command operators are supported:
* **FI**: The request was aborted with a response code specified via :ref:`fault injection `.
* **RL**: The request was ratelimited locally by the :ref:`HTTP rate limit filter ` in addition to 429 response code.
+%RESPONSE_TX_DURATION%
+ HTTP
+ Total duration in milliseconds of the request from the first byte read from the upstream host to the last
+ byte sent downstream.
+
+ TCP
+ Not implemented ("-").
+
%UPSTREAM_HOST%
Upstream host URL (e.g., tcp://ip:port for TCP connections).
diff --git a/docs/root/configuration/cluster_manager/cluster_stats.rst b/docs/root/configuration/cluster_manager/cluster_stats.rst
index 83fe8a10b0340..bfeb164c795a5 100644
--- a/docs/root/configuration/cluster_manager/cluster_stats.rst
+++ b/docs/root/configuration/cluster_manager/cluster_stats.rst
@@ -19,6 +19,10 @@ statistics. Any ``:`` character in the stats name is replaced with ``_``.
cluster_added, Counter, Total clusters added (either via static config or CDS)
cluster_modified, Counter, Total clusters modified (via CDS)
cluster_removed, Counter, Total clusters removed (via CDS)
+ cluster_updated, Counter, Total cluster updates
+ cluster_updated_via_merge, Counter, Total cluster updates applied as merged updates
+ update_merge_cancelled, Counter, Total merged updates that got cancelled and delivered early
+ update_out_of_merge_window, Counter, Total updates which arrived out of a merge window
active_clusters, Gauge, Number of currently active (warmed) clusters
warming_clusters, Gauge, Number of currently warming (not active) clusters
diff --git a/docs/root/configuration/health_checkers/redis.rst b/docs/root/configuration/health_checkers/redis.rst
index 2439982ced8d2..1859c005adb1e 100644
--- a/docs/root/configuration/health_checkers/redis.rst
+++ b/docs/root/configuration/health_checkers/redis.rst
@@ -3,12 +3,22 @@
Redis
=====
-The Redis health checker is a custom health checker which checks Redis upstream hosts. It sends
-a Redis PING command and expect a PONG response. The upstream Redis server can respond with
-anything other than PONG to cause an immediate active health check failure. Optionally, Envoy can
-perform EXISTS on a user-specified key. If the key does not exist it is considered a passing healthcheck.
-This allows the user to mark a Redis instance for maintenance by setting the specified
-:ref:`key ` to any value and waiting for
-traffic to drain.
+The Redis health checker is a custom health checker (with :code:`envoy.health_checkers.redis` as name)
+which checks Redis upstream hosts. It sends a Redis PING command and expect a PONG response. The upstream
+Redis server can respond with anything other than PONG to cause an immediate active health check failure.
+Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a
+passing healthcheck. This allows the user to mark a Redis instance for maintenance by setting the
+specified :ref:`key ` to any value and waiting
+for traffic to drain.
+
+An example setting for :ref:`custom_health_check ` as a
+Redis health checker is shown below:
+
+.. code-block:: yaml
+
+ custom_health_check:
+ name: envoy.health_checkers.redis
+ config:
+ key: foo
* :ref:`v2 API reference `
\ No newline at end of file
diff --git a/docs/root/configuration/http_conn_man/stats.rst b/docs/root/configuration/http_conn_man/stats.rst
index 1b9b13e1d63ec..9b32590f3362e 100644
--- a/docs/root/configuration/http_conn_man/stats.rst
+++ b/docs/root/configuration/http_conn_man/stats.rst
@@ -52,6 +52,7 @@ statistics:
downstream_rq_5xx, Counter, Total 5xx responses
downstream_rq_ws_on_non_ws_route, Counter, Total WebSocket upgrade requests rejected by non WebSocket routes
downstream_rq_time, Histogram, Request time milliseconds
+ downstream_rq_idle_timeout, Counter, Total requests closed due to idle timeout
rs_too_large, Counter, Total response errors due to buffering an overly large body
Per user agent statistics
diff --git a/docs/root/configuration/http_filters/cors_filter.rst b/docs/root/configuration/http_filters/cors_filter.rst
index 436999a1d18dd..f7e0018cc5f4d 100644
--- a/docs/root/configuration/http_filters/cors_filter.rst
+++ b/docs/root/configuration/http_filters/cors_filter.rst
@@ -8,5 +8,5 @@ For the meaning of the headers please refer to the pages below.
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Access_control_CORS
- https://www.w3.org/TR/cors/
-- :ref:`v1 API reference `
-- :ref:`v2 API reference `
+- :ref:`v1 API reference `
+- :ref:`v2 API reference `
diff --git a/docs/root/configuration/http_filters/ext_authz_filter.rst b/docs/root/configuration/http_filters/ext_authz_filter.rst
index c9eabf8f94819..bd92156303970 100644
--- a/docs/root/configuration/http_filters/ext_authz_filter.rst
+++ b/docs/root/configuration/http_filters/ext_authz_filter.rst
@@ -5,9 +5,11 @@ External Authorization
* External authorization :ref:`architecture overview `
* :ref:`HTTP filter v2 API reference `
-The external authorization HTTP filter calls an external gRPC service to check if the incoming
+The external authorization HTTP filter calls an external gRPC or HTTP service to check if the incoming
HTTP request is authorized or not.
-If the request is deemed unauthorized then the request will be denied with 403 (Forbidden) response.
+If the request is deemed unauthorized then the request will be denied normally with 403 (Forbidden) response.
+Note that sending additional custom metadata from the authorization service to the upstream, or to the downstream is
+also possible. This is explained in more details at :ref:`HTTP filter `.
.. tip::
It is recommended that this filter is configured first in the filter chain so that requests are
@@ -18,14 +20,14 @@ The content of the requests that are passed to an authorization service is speci
.. _config_http_filters_ext_authz_http_configuration:
-The HTTP filter, using a gRPC service, can be configured as follows. You can see all the
+The HTTP filter, using a gRPC/HTTP service, can be configured as follows. You can see all the
configuration options at
:ref:`HTTP filter `.
-Example
--------
+Configuration Examples
+-----------------------------
-A sample filter configuration could be:
+A sample filter configuration for a gRPC authorization server:
.. code-block:: yaml
@@ -36,6 +38,8 @@ A sample filter configuration could be:
envoy_grpc:
cluster_name: ext-authz
+.. code-block:: yaml
+
clusters:
- name: ext-authz
type: static
@@ -43,6 +47,30 @@ A sample filter configuration could be:
hosts:
- socket_address: { address: 127.0.0.1, port_value: 10003 }
+A sample filter configuration for a raw HTTP authorization server:
+
+.. code-block:: yaml
+
+ http_filters:
+ - name: envoy.ext_authz
+ config:
+ http_service:
+ server_uri:
+ uri: 127.0.0.1:10003
+ cluster: ext-authz
+ timeout: 0.25s
+ failure_mode_allow: false
+
+.. code-block:: yaml
+
+ clusters:
+ - name: ext-authz
+ connect_timeout: 0.25s
+ type: logical_dns
+ lb_policy: round_robin
+ hosts:
+ - socket_address: { address: 127.0.0.1, port_value: 10003 }
+
Statistics
----------
The HTTP filter outputs statistics in the *cluster..ext_authz.* namespace.
diff --git a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst
index 0c77708dbac58..471297286a125 100644
--- a/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst
+++ b/docs/root/configuration/http_filters/grpc_json_transcoder_filter.rst
@@ -62,3 +62,16 @@ match the incoming request path, set `match_incoming_request_route` to true.
};
}
}
+
+Sending arbitrary content
+-------------------------
+
+By default, when transcoding occurs, gRPC-JSON encodes the message output of a gRPC service method into
+JSON and sets the HTTP response `Content-Type` header to `application/json`. To send abritrary content,
+a gRPC service method can use
+`google.api.HttpBody `_
+as its output message type. The implementation needs to set
+`content_type `_
+(which sets the value of the HTTP response `Content-Type` header) and
+`data `_
+(which sets the HTTP response body) accordingly.
\ No newline at end of file
diff --git a/docs/root/configuration/http_filters/lua_filter.rst b/docs/root/configuration/http_filters/lua_filter.rst
index 0f5fb6022d358..afb2970a57869 100644
--- a/docs/root/configuration/http_filters/lua_filter.rst
+++ b/docs/root/configuration/http_filters/lua_filter.rst
@@ -290,6 +290,28 @@ under the filter name i.e. *envoy.lua*. Below is an example of a *metadata* in a
Returns a :ref:`metadata object `.
+requestInfo()
+^^^^^^^^^^^^^
+
+.. code-block:: lua
+
+ requestInfo = handle:requestInfo()
+
+Returns :repo:`information ` related to the current request.
+
+Returns a :ref:`request info object `.
+
+connection()
+^^^^^^^^^^^^
+
+.. code-block:: lua
+
+ connection = handle:connection()
+
+Returns the current request's underlying :repo:`connection `.
+
+Returns a :ref:`connection object `.
+
.. _config_http_filters_lua_header_wrapper:
Header object API
@@ -390,7 +412,7 @@ get()
metadata:get(key)
Gets a metadata. *key* is a string that supplies the metadata key. Returns the corresponding
-value of the given metadata key. The type of the value can be: *null*, *boolean*, *number*,
+value of the given metadata key. The type of the value can be: *nil*, *boolean*, *number*,
*string* and *table*.
__pairs()
@@ -403,3 +425,89 @@ __pairs()
Iterates through every *metadata* entry. *key* is a string that supplies a *metadata*
key. *value* is *metadata* entry value.
+
+.. _config_http_filters_lua_request_info_wrapper:
+
+Request info object API
+-----------------------
+
+protocol()
+^^^^^^^^^^
+
+.. code-block:: lua
+
+ requestInfo:protocol()
+
+Returns the string representation of :repo:`HTTP protocol `
+used by the current request. The possible values are: *HTTP/1.0*, *HTTP/1.1*, and *HTTP/2*.
+
+dynamicMetadata()
+^^^^^^^^^^^^^^^^^
+
+.. code-block:: lua
+
+ requestInfo:dynamicMetadata()
+
+Returns a :ref:`dynamic metadata object `.
+
+.. _config_http_filters_lua_request_info_dynamic_metadata_wrapper:
+
+Dynamic metadata object API
+---------------------------
+
+get()
+^^^^^
+
+.. code-block:: lua
+
+ dynamicMetadata:get(filterName)
+
+ -- to get a value from a returned table.
+ dynamicMetadata:get(filterName)[key]
+
+Gets an entry in dynamic metadata struct. *filterName* is a string that supplies the filter name, e.g. *envoy.lb*.
+Returns the corresponding *table* of a given *filterName*.
+
+set()
+^^^^^
+
+.. code-block:: lua
+
+ dynamicMetadata:set(filterName, key, value)
+
+Sets key-value pair of a *filterName*'s metadata. *filterName* is a key specifying the target filter name,
+e.g. *envoy.lb*. The type of *key* and *value* is *string*.
+
+__pairs()
+^^^^^^^^^
+
+.. code-block:: lua
+
+ for key, value in pairs(dynamicMetadata) do
+ end
+
+Iterates through every *dynamicMetadata* entry. *key* is a string that supplies a *dynamicMetadata*
+key. *value* is *dynamicMetadata* entry value.
+
+.. _config_http_filters_lua_connection_wrapper:
+
+Connection object API
+---------------------
+
+ssl()
+^^^^^^^^
+
+.. code-block:: lua
+
+ if connection:ssl() == nil then
+ print("plain")
+ else
+ print("secure")
+ end
+
+Returns :repo:`SSL connection ` object when the connection is
+secured and *nil* when it is not.
+
+.. note::
+
+ Currently the SSL connection object has no exposed APIs.
diff --git a/docs/root/configuration/listener_filters/listener_filters.rst b/docs/root/configuration/listener_filters/listener_filters.rst
index 6c5a7857d5a4c..4f3e2f353398c 100644
--- a/docs/root/configuration/listener_filters/listener_filters.rst
+++ b/docs/root/configuration/listener_filters/listener_filters.rst
@@ -9,4 +9,5 @@ Envoy has the follow builtin listener filters.
:maxdepth: 2
original_dst_filter
+ proxy_protocol
tls_inspector
diff --git a/docs/root/configuration/listener_filters/proxy_protocol.rst b/docs/root/configuration/listener_filters/proxy_protocol.rst
new file mode 100644
index 0000000000000..2d196609fb004
--- /dev/null
+++ b/docs/root/configuration/listener_filters/proxy_protocol.rst
@@ -0,0 +1,26 @@
+.. _config_listener_filters_proxy_protocol:
+
+Proxy Protocol
+==============
+
+This listener filter adds support for
+`HAProxy Proxy Protocol `_.
+
+In this mode, the upstream connection is assumed to come from a proxy
+which places the original coordinates (IP, PORT) into a connection-string.
+Envoy then extracts these and uses them as the remote address.
+
+In Proxy Protocol v2 there exists the concept of extensions (TLV)
+tags that are optional. This implementation skips over these without
+using them.
+
+This implementation supports both version 1 and version 2, it
+automatically determines on a per-connection basis which of the two
+versions is present. Note: if the filter is enabled, the Proxy Protocol
+must be present on the connection (either version 1 or version 2),
+the standard does not allow parsing to determine if it is present or not.
+
+If there is a protocol error or an unsupported address family
+(e.g. AF_UNIX) the connection will be closed and an error thrown.
+
+* :ref:`v2 API reference `
diff --git a/docs/root/configuration/overview/v2_overview.rst b/docs/root/configuration/overview/v2_overview.rst
index 1e5629e8c80ea..3c8e0aff06c99 100644
--- a/docs/root/configuration/overview/v2_overview.rst
+++ b/docs/root/configuration/overview/v2_overview.rst
@@ -37,12 +37,9 @@ flag, i.e.:
.. code-block:: console
- ./envoy -c .{json,yaml,pb,pb_text} --v2-config-only
+ ./envoy -c .{json,yaml,pb,pb_text}
-where the extension reflects the underlying v2 config representation. The
-:option:`--v2-config-only` flag is not strictly required as Envoy will attempt
-to autodetect the config file version, but this option provides an enhanced
-debug experience when configuration parsing fails.
+where the extension reflects the underlying v2 config representation.
The :ref:`Bootstrap ` message is the root of the
configuration. A key concept in the :ref:`Bootstrap `
@@ -98,7 +95,14 @@ A minimal fully static bootstrap config is provided below:
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
- hosts: [{ socket_address: { address: 127.0.0.2, port_value: 1234 }}]
+ load_assignment:
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 1234
Mostly static with dynamic EDS
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -145,13 +149,22 @@ on 127.0.0.3:5678 is provided below:
eds_config:
api_config_source:
api_type: GRPC
- cluster_names: [xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: xds_cluster
- name: xds_cluster
connect_timeout: 0.25s
type: STATIC
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
- hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}]
+ load_assignment:
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 5678
Notice above that *xds_cluster* is defined to point Envoy at the management server. Even in
an otherwise completely dynamic configurations, some static resources need to
@@ -198,11 +211,15 @@ below:
lds_config:
api_config_source:
api_type: GRPC
- cluster_names: [xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: xds_cluster
cds_config:
api_config_source:
api_type: GRPC
- cluster_names: [xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: xds_cluster
static_resources:
clusters:
@@ -211,7 +228,14 @@ below:
type: STATIC
lb_policy: ROUND_ROBIN
http2_protocol_options: {}
- hosts: [{ socket_address: { address: 127.0.0.3, port_value: 5678 }}]
+ load_assignment:
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ address:
+ socket_address:
+ address: 127.0.0.1
+ port_value: 5678
The management server could respond to LDS requests with:
@@ -236,7 +260,9 @@ The management server could respond to LDS requests with:
config_source:
api_config_source:
api_type: GRPC
- cluster_names: [xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: xds_cluster
http_filters:
- name: envoy.router
@@ -270,7 +296,9 @@ The management server could respond to CDS requests with:
eds_config:
api_config_source:
api_type: GRPC
- cluster_names: [xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: xds_cluster
The management server could respond to EDS requests with:
@@ -324,7 +352,9 @@ for the service definition. This is used by Envoy as a client when
cds_config:
api_config_source:
api_type: GRPC
- cluster_names: [some_xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: some_xds_cluster
is set in the :ref:`dynamic_resources
` of the :ref:`Bootstrap
@@ -341,7 +371,9 @@ for the service definition. This is used by Envoy as a client when
eds_config:
api_config_source:
api_type: GRPC
- cluster_names: [some_xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: some_xds_cluster
is set in the :ref:`eds_cluster_config
` field of the :ref:`Cluster
@@ -358,7 +390,9 @@ for the service definition. This is used by Envoy as a client when
lds_config:
api_config_source:
api_type: GRPC
- cluster_names: [some_xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: some_xds_cluster
is set in the :ref:`dynamic_resources
` of the :ref:`Bootstrap
@@ -376,7 +410,9 @@ for the service definition. This is used by Envoy as a client when
config_source:
api_config_source:
api_type: GRPC
- cluster_names: [some_xds_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: some_xds_cluster
is set in the :ref:`rds
` field of the :ref:`HttpConnectionManager
@@ -496,7 +532,9 @@ for the service definition. This is used by Envoy as a client when
ads_config:
api_type: GRPC
- cluster_names: [some_ads_cluster]
+ grpc_services:
+ envoy_grpc:
+ cluster_name: some_ads_cluster
is set in the :ref:`dynamic_resources
` of the :ref:`Bootstrap
@@ -526,11 +564,11 @@ the shared ADS channel.
Management Server Unreachability
--------------------------------
-When Envoy instance looses connectivity with the management server, Envoy will latch on to
-the previous configuration while actively retrying in the background to reestablish the
-connection with the management server.
+When an Envoy instance loses connectivity with the management server, Envoy will latch on to
+the previous configuration while actively retrying in the background to reestablish the
+connection with the management server.
-Envoy debug logs the fact that it is not able to establish a connection with the management server
+Envoy debug logs the fact that it is not able to establish a connection with the management server
every time it attempts a connection.
:ref:`upstream_cx_connect_fail ` a cluster level statistic
diff --git a/docs/root/intro/arch_overview/health_checking.rst b/docs/root/intro/arch_overview/health_checking.rst
index 2f44702a06bc4..08812c0b62e56 100644
--- a/docs/root/intro/arch_overview/health_checking.rst
+++ b/docs/root/intro/arch_overview/health_checking.rst
@@ -24,6 +24,44 @@ unhealthy, successes required before marking a host healthy, etc.):
maintenance by setting the specified key to any value and waiting for traffic to drain. See
:ref:`redis_key `.
+.. _arch_overview_per_cluster_health_check_config:
+
+Per cluster member health check config
+--------------------------------------
+
+If active health checking is configured for an upstream cluster, a specific additional configuration
+for each registered member can be specified by setting the
+:ref:`HealthCheckConfig`
+in the :ref:`Endpoint` of an :ref:`LbEndpoint`
+of each defined :ref:`LocalityLbEndpoints` in a
+:ref:`ClusterLoadAssignment`.
+
+An example of setting up :ref:`health check config`
+to set a :ref:`cluster member`'s alternative health check
+:ref:`port` is:
+
+.. code-block:: yaml
+
+ load_assignment:
+ endpoints:
+ - lb_endpoints:
+ - endpoint:
+ health_check_config:
+ port_value: 8080
+ address:
+ socket_address:
+ address: localhost
+ port_value: 80
+
+.. _arch_overview_health_check_logging:
+
+Health check event logging
+--------------------------
+
+A per-healthchecker log of ejection and addition events can optionally be produced by Envoy by
+specifying a log file path in `the HealthCheck config `.
+The log is structured as JSON dumps of `HealthCheckEvent messages `.
+
Passive health checking
-----------------------
diff --git a/docs/root/intro/arch_overview/http_connection_management.rst b/docs/root/intro/arch_overview/http_connection_management.rst
index 4f1d415b48e3d..40415481322fc 100644
--- a/docs/root/intro/arch_overview/http_connection_management.rst
+++ b/docs/root/intro/arch_overview/http_connection_management.rst
@@ -42,3 +42,27 @@ table `. The route table can be specified in one of
* Statically.
* Dynamically via the :ref:`RDS API `.
+
+Timeouts
+--------
+
+Various configurable timeouts apply to an HTTP connection and its constituent streams:
+
+* Connection-level :ref:`idle timeout
+ `:
+ this applies to the idle period where no streams are active.
+* Connection-level :ref:`drain timeout
+ `:
+ this spans between an Envoy originated GOAWAY and connection termination.
+* Stream-level idle timeout: this applies to each individual stream. It may be configured at both
+ the :ref:`connection manager
+ `
+ and :ref:`per-route ` granularity.
+ Header/data/trailer events on the stream reset the idle timeout.
+* Stream-level :ref:`per-route upstream timeout `: this
+ applies to the upstream response, i.e. a maximum bound on the time from the end of the downstream
+ request until the end of the upstream response. This may also be specified at the :ref:`per-retry
+ ` granularity.
+* Stream-level :ref:`per-route gRPC max timeout
+ `: this bounds the upstream timeout and allows
+ the timeout to be overriden via the *grpc-timeout* request header.
diff --git a/docs/root/intro/arch_overview/redis.rst b/docs/root/intro/arch_overview/redis.rst
index b93830edba161..ff7d4696a4abb 100644
--- a/docs/root/intro/arch_overview/redis.rst
+++ b/docs/root/intro/arch_overview/redis.rst
@@ -43,8 +43,10 @@ For filter configuration details, see the Redis proxy filter
The corresponding cluster definition should be configured with
:ref:`ring hash load balancing `.
-If active healthchecking is desired, the cluster should be configured with a
-:ref:`Redis healthcheck `.
+If :ref:`active health checking ` is desired, the
+cluster should be configured with a :ref:`custom health check
+` which configured as a
+:ref:`Redis health checker `.
If passive healthchecking is desired, also configure
:ref:`outlier detection `.
diff --git a/docs/root/intro/arch_overview/websocket.rst b/docs/root/intro/arch_overview/websocket.rst
index 9d65b3b680e74..35aa8477fc535 100644
--- a/docs/root/intro/arch_overview/websocket.rst
+++ b/docs/root/intro/arch_overview/websocket.rst
@@ -1,7 +1,27 @@
.. _arch_overview_websocket:
-WebSocket support
-=================
+Envoy currently supports two modes of Upgrade behavior, the new generic upgrade mode, and
+the old WebSocket-only TCP proxy mode.
+
+
+New style Upgrade support
+=========================
+
+The new style Upgrade support is intended mainly for WebSocket but may be used for non-WebSocket
+upgrades as well. The new style of upgrades pass both the HTTP headers and the upgrade payload
+through an HTTP filter chain. One may configure the
+:ref:`upgrade_configs `
+in one of two ways. If only the
+`upgrade_type `
+is specified, both the upgrade headers, any request and response body, and WebSocket payload will
+pass through the default HTTP filter chain. To avoid the use of HTTP-only filters for upgrade payload,
+one can set up custom
+`filters `
+for the given upgrade type, up to and including only using the router filter to send the WebSocket
+data upstream.
+
+Old style WebSocket support
+===========================
Envoy supports upgrading a HTTP/1.1 connection to a WebSocket connection.
Connection upgrade will be allowed only if the downstream client
@@ -18,8 +38,8 @@ retries, rate limits and shadowing are not supported for WebSocket routes.
However, prefix rewriting, explicit and automatic host rewriting, traffic
shifting and splitting are supported.
-Connection semantics
---------------------
+Old style Connection semantics
+------------------------------
Even though WebSocket upgrades occur over HTTP/1.1 connections, WebSockets
proxying works similarly to plain TCP proxy, i.e., Envoy does not interpret
diff --git a/docs/root/intro/version_history.rst b/docs/root/intro/version_history.rst
index 3b81f97a9a046..62f92f2070040 100644
--- a/docs/root/intro/version_history.rst
+++ b/docs/root/intro/version_history.rst
@@ -3,15 +3,57 @@ Version history
1.8.0 (Pending)
===============
+* access log: added :ref:`response flag filter `
+ to filter based on the presence of Envoy response flags.
+* access log: added RESPONSE_DURATION and RESPONSE_TX_DURATION.
+* admin: added :http:get:`/hystrix_event_stream` as an endpoint for monitoring envoy's statistics
+ through `Hystrix dashboard `_.
+* grpc-json: added support for building HTTP response from
+ `google.api.HttpBody `_.
+* cluster: added :ref:`option ` to merge
+ health check/weight/metadata updates within the given duration.
+* config: v1 disabled by default. v1 support remains available until October via flipping --v2-config-only=false.
+* config: v1 disabled by default. v1 support remains available until October via setting :option:`--allow-deprecated-v1-api`.
+* health check: added support for :ref:`custom health check `.
+* health check: added support for :ref:`specifying jitter as a percentage `.
+* health_check: added support for :ref:`health check event logging `.
+* health_check: added support for specifying :ref:`custom request headers `
+ to HTTP health checker requests.
+* http: added support for a per-stream idle timeout. This applies at both :ref:`connection manager
+ `
+ and :ref:`per-route granularity `. The timeout
+ defaults to 5 minutes; if you have other timeouts (e.g. connection idle timeout, upstream
+ response per-retry) that are longer than this in duration, you may want to consider setting a
+ non-default per-stream idle timeout.
+* http: added support for a :ref:`per-stream idle timeout
+ `. This defaults to 5 minutes; if you have
+ other timeouts (e.g. connection idle timeout, upstream response per-retry) that are longer than
+ this in duration, you may want to consider setting a non-default per-stream idle timeout.
+* http: added generic :ref:`Upgrade support
+ `.
+* http: better handling of HEAD requests. Now sending transfer-encoding: chunked rather than content-length: 0.
* http: response filters not applied to early error paths such as http_parser generated 400s.
+* http: :ref:`hpack_table_size ` now controls
+ dynamic table size of both: encoder and decoder.
+* listeners: added the ability to match :ref:`FilterChain ` using
+ :ref:`destination_port ` and
+ :ref:`prefix_ranges `.
+* lua: added :ref:`connection() ` wrapper and *ssl()* API.
+* lua: added :ref:`requestInfo() ` wrapper and *protocol()* API.
+* lua: added :ref:`requestInfo():dynamicMetadata() ` API.
+* proxy_protocol: added support for HAProxy Proxy Protocol v2 (AF_INET/AF_INET6 only).
* ratelimit: added support for :repo:`api/envoy/service/ratelimit/v2/rls.proto`.
Lyft's reference implementation of the `ratelimit `_ service also supports the data-plane-api proto as of v1.1.0.
Envoy can use either proto to send client requests to a ratelimit server with the use of the
:ref:`use_data_plane_proto`
boolean flag in the ratelimit configuration.
Support for the legacy proto :repo:`source/common/ratelimit/ratelimit.proto` is deprecated and will be removed at the start of the 1.9.0 release cycle.
+* router: added ability to set request/response headers at the :ref:`envoy_api_msg_route.Route` level.
* tracing: added support for configuration of :ref:`tracing sampling
`.
+* thrift_proxy: introduced thrift routing, moved configuration to correct location
+* upstream: added configuration option to the subset load balancer to take locality weights into account when
+ selecting a host from a subset.
1.7.0
===============
@@ -26,6 +68,8 @@ Version history
* access log: improved WebSocket logging.
* admin: added :http:get:`/config_dump` for dumping the current configuration and associated xDS
version information (if applicable).
+* admin: added :http:get:`/clusters?format=json` for outputing a JSON-serialized proto detailing
+ the current status of all clusters.
* admin: added :http:get:`/stats/prometheus` as an alternative endpoint for getting stats in prometheus format.
* admin: added :ref:`/runtime_modify endpoint ` to add or change runtime values.
* admin: mutations must be sent as POSTs, rather than GETs. Mutations include:
@@ -43,8 +87,10 @@ Version history
to close tcp_proxy upstream connections when health checks fail.
* cluster: added :ref:`option ` to drain
connections from hosts after they are removed from service discovery, regardless of health status.
-* cluster: fixed bug preventing the deletion of all endpoints in a priority.
-* debug: added symbolized stack traces (where supported).
+* cluster: fixed bug preventing the deletion of all endpoints in a priority
+* debug: added symbolized stack traces (where supported)
+* ext-authz filter: added support to raw HTTP authorization.
+* ext-authz filter: added support to gRPC responses to carry HTTP attributes.
* grpc: support added for the full set of :ref:`Google gRPC call credentials
`.
* gzip filter: added :ref:`stats ` to the filter.
@@ -61,8 +107,7 @@ Version history
* health check: health check connections can now be configured to use http/2.
* health check http filter: added
:ref:`generic header matching `
- to trigger health check response. Deprecated the
- :ref:`endpoint option `.
+ to trigger health check response. Deprecated the endpoint option.
* http: filters can now optionally support
:ref:`virtual host `,
:ref:`route `, and
@@ -85,8 +130,7 @@ Version history
* listeners: added the ability to match :ref:`FilterChain ` using
:ref:`application_protocols `
(e.g. ALPN for TLS protocol).
-* listeners: :ref:`sni_domains ` has been deprecated/renamed to
- :ref:`server_names `.
+* listeners: `sni_domains` has been deprecated/renamed to :ref:`server_names `.
* listeners: removed restriction on all filter chains having identical filters.
* load balancer: added :ref:`weighted round robin
` support. The round robin
diff --git a/docs/root/operations/admin.rst b/docs/root/operations/admin.rst
index 8a9947b5c6334..38cc64f7fe182 100644
--- a/docs/root/operations/admin.rst
+++ b/docs/root/operations/admin.rst
@@ -32,8 +32,8 @@ modify different aspects of the server:
In the future additional security options will be added to the administration interface. This
work is tracked in `this `_ issue.
- All mutations should be sent as HTTP POST operations. For a limited time, they will continue
- to work with HTTP GET, with a warning logged.
+ All mutations must be sent as HTTP POST operations. When a mutation is requested via GET,
+ the request has no effect, and an HTTP 400 (Invalid Request) response is returned.
.. http:get:: /
@@ -110,6 +110,11 @@ modify different aspects of the server:
*/failed_outlier_check*: The host has failed an outlier detection check.
+.. http:get:: /clusters?format=json
+
+ Dump the */clusters* output in a JSON-serialized proto. See the
+ :ref:`definition ` for more information.
+
.. _operations_admin_interface_config_dump:
.. http:get:: /config_dump
@@ -314,3 +319,39 @@ The fields are:
Use the /runtime_modify endpoint with care. Changes are effectively immediately. It is
**critical** that the admin interface is :ref:`properly secured
`.
+
+ .. _operations_admin_interface_hystrix_event_stream:
+
+.. http:get:: /hystrix_event_stream
+
+ This endpoint is intended to be used as the stream source for
+ `Hystrix dashboard `_.
+ a GET to this endpoint will trriger a stream of statistics from envoy in
+ `text/event-stream `_
+ format, as expected by the Hystrix dashboard.
+
+ If invoked from a browser or a terminal, the response will be shown as a continous stream,
+ sent in intervals defined by the :ref:`Bootstrap `
+ :ref:`stats_flush_interval `
+
+ This handler is enabled only when a Hystrix sink is enabled in the config file as documented
+ :ref:`here `.
+
+ As Envoy's and Hystrix resiliency mechanisms differ, some of the statistics shown in the dashboard
+ had to be adapted:
+
+ * **Thread pool rejections** - Generally similar to what's called short circuited in Envoy,
+ and counted by *upstream_rq_pending_overflow*, although the term thread pool is not accurate for
+ Envoy. Both in Hystrix and Envoy, the result is rejected requests which are not passed upstream.
+ * **circuit breaker status (closed or open)** - Since in Envoy, a circuit is opened based on the
+ current number of connections/requests in queue, there is no sleeping window for circuit breaker,
+ circuit open/closed is momentary. Hence, we set the circuit breaker status to "forced closed".
+ * **Short-circuited (rejected)** - The term exists in Envoy but refers to requests not sent because
+ of passing a limit (queue or connections), while in Hystrix it refers to requests not sent because
+ of high percentage of service unavailable responses during some time frame.
+ In Envoy, service unavailable response will cause **outlier detection** - removing a node off the
+ load balancer pool, but requests are not rejected as a result. Therefore, this counter is always
+ set to '0'.
+ * Latency information is currently unavailable.
+
+
diff --git a/docs/root/operations/cli.rst b/docs/root/operations/cli.rst
index e50e99462e32a..c6081452c6770 100644
--- a/docs/root/operations/cli.rst
+++ b/docs/root/operations/cli.rst
@@ -11,8 +11,8 @@ following are the command line options that Envoy supports.
*(optional)* The path to the v1 or v2 :ref:`JSON/YAML/proto3 configuration
file `. If this flag is missing, :option:`--config-yaml` is required.
This will be parsed as a :ref:`v2 bootstrap configuration file
- ` and on failure, subject to
- :option:`--v2-config-only`, will be considered as a :ref:`v1 JSON
+ `. On failure, if :option:`--allow-deprecated-v1-api`,
+ is set, it will be considered as a :ref:`v1 JSON
configuration file `. For v2 configuration files, valid
extensions are ``.json``, ``.yaml``, ``.pb`` and ``.pb_text``, which indicate
JSON, YAML, `binary proto3
@@ -34,9 +34,14 @@ following are the command line options that Envoy supports.
.. option:: --v2-config-only
+ *(deprecated)* This flag used to allow opting into only using a
+ :ref:`v2 bootstrap configuration file `. This is now set by default.
+
+.. option:: --allow-deprecated-v1-api
+
*(optional)* This flag determines whether the configuration file should only
be parsed as a :ref:`v2 bootstrap configuration file
- `. If false (default), when a v2 bootstrap
+ `. If specified when a v2 bootstrap
config parse fails, a second attempt to parse the config as a :ref:`v1 JSON
configuration file ` will be made.
diff --git a/examples/grpc-bridge/Dockerfile-python b/examples/grpc-bridge/Dockerfile-python
index a807a4d1c95ac..02aa308c2acb1 100644
--- a/examples/grpc-bridge/Dockerfile-python
+++ b/examples/grpc-bridge/Dockerfile-python
@@ -3,7 +3,7 @@ FROM envoyproxy/envoy:latest
RUN apt-get update
RUN apt-get -q install -y python-dev \
python-pip
-RUN pip install -q grpcio requests
+RUN pip install -q grpcio protobuf requests
ADD ./client /client
RUN chmod a+x /client/client.py
RUN mkdir /var/log/envoy/
diff --git a/examples/grpc-bridge/config/s2s-grpc-envoy.yaml b/examples/grpc-bridge/config/s2s-grpc-envoy.yaml
index 31ccd3ff0fe41..baaac35e57ea2 100644
--- a/examples/grpc-bridge/config/s2s-grpc-envoy.yaml
+++ b/examples/grpc-bridge/config/s2s-grpc-envoy.yaml
@@ -21,7 +21,7 @@ static_resources:
prefix: "/"
headers:
- name: content-type
- value: application/grpc
+ exact_match: application/grpc
route:
cluster: local_service_grpc
http_filters:
diff --git a/include/envoy/api/os_sys_calls.h b/include/envoy/api/os_sys_calls.h
index ae6a96c9e5588..9378b35e3cc05 100644
--- a/include/envoy/api/os_sys_calls.h
+++ b/include/envoy/api/os_sys_calls.h
@@ -1,5 +1,6 @@
#pragma once
+#include
#include // for mode_t
#include // for sockaddr
#include
@@ -13,6 +14,22 @@
namespace Envoy {
namespace Api {
+/**
+ * SysCallResult holds the rc and errno values resulting from a system call.
+ */
+struct SysCallResult {
+
+ /**
+ * The return code from the system call.
+ */
+ int rc_;
+
+ /**
+ * The errno value as captured after the system call.
+ */
+ int errno_;
+};
+
class OsSysCalls {
public:
virtual ~OsSysCalls() {}
@@ -22,6 +39,11 @@ class OsSysCalls {
*/
virtual int bind(int sockfd, const sockaddr* addr, socklen_t addrlen) PURE;
+ /**
+ * @see ioctl (man 2 ioctl)
+ */
+ virtual int ioctl(int sockfd, unsigned long int request, void* argp) PURE;
+
/**
* Open file by full_path with given flags and mode.
* @return file descriptor.
@@ -90,6 +112,11 @@ class OsSysCalls {
* @see man 2 getsockopt
*/
virtual int getsockopt(int sockfd, int level, int optname, void* optval, socklen_t* optlen) PURE;
+
+ /**
+ * @see man 2 socket
+ */
+ virtual int socket(int domain, int type, int protocol) PURE;
};
typedef std::unique_ptr OsSysCallsPtr;
diff --git a/include/envoy/buffer/BUILD b/include/envoy/buffer/BUILD
index 084ed91ebf24f..01dcb26234196 100644
--- a/include/envoy/buffer/BUILD
+++ b/include/envoy/buffer/BUILD
@@ -11,4 +11,5 @@ envoy_package()
envoy_cc_library(
name = "buffer_interface",
hdrs = ["buffer.h"],
+ deps = ["//include/envoy/api:os_sys_calls_interface"],
)
diff --git a/include/envoy/buffer/buffer.h b/include/envoy/buffer/buffer.h
index ae9cff3785ae7..cfe8611e0848a 100644
--- a/include/envoy/buffer/buffer.h
+++ b/include/envoy/buffer/buffer.h
@@ -5,6 +5,7 @@
#include
#include
+#include "envoy/api/os_sys_calls.h"
#include "envoy/common/pure.h"
namespace Envoy {
@@ -142,9 +143,10 @@ class Instance {
* Read from a file descriptor directly into the buffer.
* @param fd supplies the descriptor to read from.
* @param max_length supplies the maximum length to read.
- * @return the number of bytes read or -1 if there was an error.
+ * @return a Api::SysCallResult with rc_ = the number of bytes read if successful, or rc_ = -1
+ * for failure. If the call is successful, errno_ shouldn't be used.
*/
- virtual int read(int fd, uint64_t max_length) PURE;
+ virtual Api::SysCallResult read(int fd, uint64_t max_length) PURE;
/**
* Reserve space in the buffer.
@@ -164,12 +166,19 @@ class Instance {
*/
virtual ssize_t search(const void* data, uint64_t size, size_t start) const PURE;
+ /**
+ * Constructs a flattened string from a buffer.
+ * @return the flattened string.
+ */
+ virtual std::string toString() const PURE;
+
/**
* Write the buffer out to a file descriptor.
* @param fd supplies the descriptor to write to.
- * @return the number of bytes written or -1 if there was an error.
+ * @return a Api::SysCallResult with rc_ = the number of bytes written if successful, or rc_ = -1
+ * for failure. If the call is successful, errno_ shouldn't be used.
*/
- virtual int write(int fd) PURE;
+ virtual Api::SysCallResult write(int fd) PURE;
};
typedef std::unique_ptr InstancePtr;
diff --git a/include/envoy/common/BUILD b/include/envoy/common/BUILD
index dee9f10b4479b..507c15d284fe9 100644
--- a/include/envoy/common/BUILD
+++ b/include/envoy/common/BUILD
@@ -37,3 +37,8 @@ envoy_cc_library(
name = "callback",
hdrs = ["callback.h"],
)
+
+envoy_cc_library(
+ name = "backoff_strategy_interface",
+ hdrs = ["backoff_strategy.h"],
+)
diff --git a/include/envoy/common/backoff_strategy.h b/include/envoy/common/backoff_strategy.h
new file mode 100644
index 0000000000000..63114e047fc27
--- /dev/null
+++ b/include/envoy/common/backoff_strategy.h
@@ -0,0 +1,25 @@
+#pragma once
+
+#include "envoy/common/pure.h"
+
+namespace Envoy {
+/**
+ * Generic interface for all backoff strategy implementations.
+ */
+class BackOffStrategy {
+public:
+ virtual ~BackOffStrategy() {}
+
+ /**
+ * @return the next backoff interval in milli seconds.
+ */
+ virtual uint64_t nextBackOffMs() PURE;
+
+ /**
+ * Resets the intervals so that the back off intervals can start again.
+ */
+ virtual void reset() PURE;
+};
+
+typedef std::unique_ptr BackOffStrategyPtr;
+} // namespace Envoy
\ No newline at end of file
diff --git a/include/envoy/http/header_map.h b/include/envoy/http/header_map.h
index 7bdf89436a8d8..c85d77b53e061 100644
--- a/include/envoy/http/header_map.h
+++ b/include/envoy/http/header_map.h
@@ -7,6 +7,7 @@
#include
#include
#include
+#include
#include "envoy/common/pure.h"
@@ -106,31 +107,6 @@ class HeaderString {
*/
bool find(const char* str) const { return strstr(c_str(), str); }
- /**
- * HeaderString is in token list form, each token separated by commas or whitespace,
- * see https://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.1 for more information,
- * header field value's case sensitivity depends on each header.
- * @return whether contains token in case insensitive manner.
- */
- bool caseInsensitiveContains(const char* token) const {
- // Avoid dead loop if token argument is empty.
- const int n = strlen(token);
- if (n == 0) {
- return false;
- }
-
- // Find token substring, skip if it's partial of other token.
- const char* tokens = c_str();
- for (const char* p = tokens; (p = strcasestr(p, token)); p += n) {
- if ((p == tokens || *(p - 1) == ' ' || *(p - 1) == ',') &&
- (*(p + n) == '\0' || *(p + n) == ' ' || *(p + n) == ',')) {
- return true;
- }
- }
-
- return false;
- }
-
/**
* Set the value of the string by copying data into it. This overwrites any existing string.
*/
@@ -281,6 +257,7 @@ class HeaderEntry {
HEADER_FUNC(KeepAlive) \
HEADER_FUNC(LastModified) \
HEADER_FUNC(Method) \
+ HEADER_FUNC(NoChunks) \
HEADER_FUNC(Origin) \
HEADER_FUNC(OtSpanContext) \
HEADER_FUNC(Path) \
@@ -498,5 +475,10 @@ class HeaderMap {
typedef std::unique_ptr HeaderMapPtr;
+/**
+ * Convenient container type for storing Http::LowerCaseString and std::string key/value pairs.
+ */
+typedef std::vector> HeaderVector;
+
} // namespace Http
} // namespace Envoy
diff --git a/include/envoy/network/BUILD b/include/envoy/network/BUILD
index 31781824fa098..5f7e0ebd06acc 100644
--- a/include/envoy/network/BUILD
+++ b/include/envoy/network/BUILD
@@ -11,6 +11,7 @@ envoy_package()
envoy_cc_library(
name = "address_interface",
hdrs = ["address.h"],
+ deps = ["//include/envoy/api:os_sys_calls_interface"],
)
envoy_cc_library(
diff --git a/include/envoy/network/address.h b/include/envoy/network/address.h
index 949747484018f..13004a1f19b03 100644
--- a/include/envoy/network/address.h
+++ b/include/envoy/network/address.h
@@ -8,6 +8,7 @@
#include
#include
+#include "envoy/api/os_sys_calls.h"
#include "envoy/common/pure.h"
#include "absl/numeric/int128.h"
@@ -128,19 +129,19 @@ class Instance {
* Bind a socket to this address. The socket should have been created with a call to socket() on
* an Instance of the same address family.
* @param fd supplies the platform socket handle.
- * @return 0 for success and -1 for failure. The error code associated with a failure will
- * be accessible in a plaform dependent fashion (e.g. errno for Unix platforms).
+ * @return a Api::SysCallResult with rc_ = 0 for success and rc_ = -1 for failure. If the call
+ * is successful, errno_ shouldn't be used.
*/
- virtual int bind(int fd) const PURE;
+ virtual Api::SysCallResult bind(int fd) const PURE;
/**
* Connect a socket to this address. The socket should have been created with a call to socket()
* on this object.
* @param fd supplies the platform socket handle.
- * @return 0 for success and -1 for failure. The error code associated with a failure will
- * be accessible in a plaform dependent fashion (e.g. errno for Unix platforms).
+ * @return a Api::SysCallResult with rc_ = 0 for success and rc_ = -1 for failure. If the call
+ * is successful, errno_ shouldn't be used.
*/
- virtual int connect(int fd) const PURE;
+ virtual Api::SysCallResult connect(int fd) const PURE;
/**
* @return the IP address information IFF type() == Type::Ip, otherwise nullptr.
@@ -150,9 +151,8 @@ class Instance {
/**
* Create a socket for this address.
* @param type supplies the socket type to create.
- * @return the file descriptor naming the socket for success and -1 for failure. The error
- * code associated with a failure will be accessible in a plaform dependent fashion (e.g.
- * errno for Unix platforms).
+ * @return the file descriptor naming the socket. In case of a failure, the program would be
+ * aborted.
*/
virtual int socket(SocketType type) const PURE;
diff --git a/include/envoy/network/connection.h b/include/envoy/network/connection.h
index 84e0395b3cf47..b7c57f06e4c93 100644
--- a/include/envoy/network/connection.h
+++ b/include/envoy/network/connection.h
@@ -186,6 +186,11 @@ class Connection : public Event::DeferredDeletable, public FilterManager {
*/
virtual const Ssl::Connection* ssl() const PURE;
+ /**
+ * @return requested server name (e.g. SNI in TLS), if any.
+ */
+ virtual absl::string_view requestedServerName() const PURE;
+
/**
* @return State the current state of the connection.
*/
diff --git a/include/envoy/registry/registry.h b/include/envoy/registry/registry.h
index d9890a79456df..727934540c9a6 100644
--- a/include/envoy/registry/registry.h
+++ b/include/envoy/registry/registry.h
@@ -82,7 +82,7 @@ template class FactoryRegistry {
}
factories().emplace(factory.name(), &factory);
- RELEASE_ASSERT(getFactory(factory.name()) == &factory);
+ RELEASE_ASSERT(getFactory(factory.name()) == &factory, "");
return displaced;
}
@@ -92,7 +92,7 @@ template class FactoryRegistry {
*/
static void removeFactoryForTest(const std::string& name) {
auto result = factories().erase(name);
- RELEASE_ASSERT(result == 1);
+ RELEASE_ASSERT(result == 1, "");
}
/**
diff --git a/include/envoy/request_info/request_info.h b/include/envoy/request_info/request_info.h
index b3bb71248e721..ee21a3ae7e8b1 100644
--- a/include/envoy/request_info/request_info.h
+++ b/include/envoy/request_info/request_info.h
@@ -64,6 +64,13 @@ class RequestInfo {
*/
virtual void setResponseFlag(ResponseFlag response_flag) PURE;
+ /**
+ * @param response_flags the response_flags to intersect with.
+ * @return true if the intersection of the response_flags argument and the currently set response
+ * flags is non-empty.
+ */
+ virtual bool intersectResponseFlags(uint64_t response_flags) const PURE;
+
/**
* @param host the selected upstream host for the request.
*/
@@ -216,7 +223,12 @@ class RequestInfo {
/**
* @return whether response flag is set or not.
*/
- virtual bool getResponseFlag(ResponseFlag response_flag) const PURE;
+ virtual bool hasResponseFlag(ResponseFlag response_flag) const PURE;
+
+ /**
+ * @return whether any response flag is set or not.
+ */
+ virtual bool hasAnyResponseFlag() const PURE;
/**
* @return upstream host description.
diff --git a/include/envoy/router/rds.h b/include/envoy/router/rds.h
index 827740aee3fca..8ff43f213f4f5 100644
--- a/include/envoy/router/rds.h
+++ b/include/envoy/router/rds.h
@@ -45,7 +45,7 @@ class RouteConfigProvider {
virtual SystemTime lastUpdated() const PURE;
};
-typedef std::shared_ptr RouteConfigProviderSharedPtr;
+typedef std::unique_ptr RouteConfigProviderPtr;
} // namespace Router
} // namespace Envoy
diff --git a/include/envoy/router/route_config_provider_manager.h b/include/envoy/router/route_config_provider_manager.h
index 98b8db5a937ea..daacf2e8e6deb 100644
--- a/include/envoy/router/route_config_provider_manager.h
+++ b/include/envoy/router/route_config_provider_manager.h
@@ -27,43 +27,29 @@ class RouteConfigProviderManager {
virtual ~RouteConfigProviderManager() {}
/**
- * Get a RouteConfigProviderSharedPtr for a route from RDS. Ownership of the RouteConfigProvider
- * is shared by all the HttpConnectionManagers who own a RouteConfigProviderSharedPtr. The
- * RouteConfigProviderManager holds weak_ptrs to the RouteConfigProviders. Clean up of the weak
- * ptrs happen from the destructor of the RouteConfigProvider. This function creates a
- * RouteConfigProvider if there isn't one with the same (route_config_name, cluster) already.
- * Otherwise, it returns a RouteConfigProviderSharedPtr created from the manager held weak_ptr.
+ * Get a RouteConfigProviderPtr for a route from RDS. Ownership of the RouteConfigProvider is the
+ * HttpConnectionManagers who calls this function. The RouteConfigProviderManager holds raw
+ * pointers to the RouteConfigProviders. Clean up of the pointers happen from the destructor of
+ * the RouteConfigProvider. This method creates a RouteConfigProvider which may share the
+ * underlying RDS subscription with the same (route_config_name, cluster).
* @param rds supplies the proto configuration of an RDS-configured RouteConfigProvider.
* @param factory_context is the context to use for the route config provider.
* @param stat_prefix supplies the stat_prefix to use for the provider stats.
*/
- virtual RouteConfigProviderSharedPtr getRdsRouteConfigProvider(
+ virtual RouteConfigProviderPtr createRdsRouteConfigProvider(
const envoy::config::filter::network::http_connection_manager::v2::Rds& rds,
Server::Configuration::FactoryContext& factory_context, const std::string& stat_prefix) PURE;
/**
* Get a RouteConfigSharedPtr for a statically defined route. Ownership is as described for
- * getRdsRouteConfigProvider above. Unlike getRdsRouteConfigProvider(), this method always creates
- * a new RouteConfigProvider.
+ * getRdsRouteConfigProvider above. This method always create a new RouteConfigProvider.
* @param route_config supplies the RouteConfiguration for this route
* @param runtime supplies the runtime loader.
* @param cm supplies the ClusterManager.
*/
- virtual RouteConfigProviderSharedPtr
- getStaticRouteConfigProvider(const envoy::api::v2::RouteConfiguration& route_config,
- Server::Configuration::FactoryContext& factory_context) PURE;
-
- /**
- * @return std::vector a list of all the
- * dynamic (RDS) RouteConfigProviders currently loaded.
- */
- virtual std::vector getRdsRouteConfigProviders() PURE;
-
- /**
- * @return std::vector a list of all the
- * static RouteConfigProviders currently loaded.
- */
- virtual std::vector getStaticRouteConfigProviders() PURE;
+ virtual RouteConfigProviderPtr
+ createStaticRouteConfigProvider(const envoy::api::v2::RouteConfiguration& route_config,
+ Server::Configuration::FactoryContext& factory_context) PURE;
};
} // namespace Router
diff --git a/include/envoy/router/router.h b/include/envoy/router/router.h
index 8699f95d3f137..a69c402251910 100644
--- a/include/envoy/router/router.h
+++ b/include/envoy/router/router.h
@@ -99,6 +99,11 @@ class CorsPolicy {
*/
virtual const std::list& allowOrigins() const PURE;
+ /*
+ * @return std::list& regexes that match allowed origins.
+ */
+ virtual const std::list& allowOriginRegexes() const PURE;
+
/**
* @return std::string access-control-allow-methods value.
*/
@@ -467,6 +472,12 @@ class RouteEntry : public ResponseEntry {
*/
virtual std::chrono::milliseconds timeout() const PURE;
+ /**
+ * @return optional the route's idle timeout. Zero indicates a
+ * disabled idle timeout, while nullopt indicates deference to the global timeout.
+ */
+ virtual absl::optional idleTimeout() const PURE;
+
/**
* @return absl::optional the maximum allowed timeout value derived
* from 'grpc-timeout' header of a gRPC request. Non-present value disables use of 'grpc-timeout'
diff --git a/include/envoy/server/BUILD b/include/envoy/server/BUILD
index fae78b50ab2ad..42acd80d8f161 100644
--- a/include/envoy/server/BUILD
+++ b/include/envoy/server/BUILD
@@ -111,6 +111,7 @@ envoy_cc_library(
hdrs = ["options.h"],
deps = [
"//include/envoy/network:address_interface",
+ "//include/envoy/stats:stats_interface",
],
)
@@ -180,3 +181,27 @@ envoy_cc_library(
"//source/common/protobuf",
],
)
+
+envoy_cc_library(
+ name = "resource_monitor_interface",
+ hdrs = ["resource_monitor.h"],
+ deps = [
+ "//source/common/protobuf",
+ ],
+)
+
+envoy_cc_library(
+ name = "resource_monitor_config_interface",
+ hdrs = ["resource_monitor_config.h"],
+ deps = [
+ ":resource_monitor_interface",
+ "//include/envoy/event:dispatcher_interface",
+ ],
+)
+
+envoy_cc_library(
+ name = "overload_manager_interface",
+ hdrs = ["overload_manager.h"],
+ deps = [
+ ],
+)
diff --git a/include/envoy/server/admin.h b/include/envoy/server/admin.h
index 2ed9f3e45e61f..00dca9d31cf5f 100644
--- a/include/envoy/server/admin.h
+++ b/include/envoy/server/admin.h
@@ -37,7 +37,7 @@ class AdminStream {
* @return Http::StreamDecoderFilterCallbacks& to be used by the handler to get HTTP request data
* for streaming.
*/
- virtual const Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const PURE;
+ virtual Http::StreamDecoderFilterCallbacks& getDecoderFilterCallbacks() const PURE;
/**
* @return Http::HeaderMap& to be used by handler to parse header information sent with the
@@ -54,7 +54,7 @@ class AdminStream {
*/
#define MAKE_ADMIN_HANDLER(X) \
[this](absl::string_view path_and_query, Http::HeaderMap& response_headers, \
- Buffer::Instance& data, AdminStream& admin_stream) -> Http::Code { \
+ Buffer::Instance& data, Server::AdminStream& admin_stream) -> Http::Code { \
return X(path_and_query, response_headers, data, admin_stream); \
}
diff --git a/include/envoy/server/filter_config.h b/include/envoy/server/filter_config.h
index 6fc02e5980bcd..46d1a265cb231 100644
--- a/include/envoy/server/filter_config.h
+++ b/include/envoy/server/filter_config.h
@@ -208,7 +208,7 @@ class NamedNetworkFilterConfigFactory {
FactoryContext& context) {
UNREFERENCED_PARAMETER(config);
UNREFERENCED_PARAMETER(context);
- NOT_IMPLEMENTED;
+ NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
}
/**
@@ -260,7 +260,7 @@ class NamedHttpFilterConfigFactory {
UNREFERENCED_PARAMETER(config);
UNREFERENCED_PARAMETER(stat_prefix);
UNREFERENCED_PARAMETER(context);
- NOT_IMPLEMENTED;
+ NOT_IMPLEMENTED_GCOVR_EXCL_LINE;
}
/**
diff --git a/include/envoy/server/health_checker_config.h b/include/envoy/server/health_checker_config.h
index 68af121915baa..089c65f4d3977 100644
--- a/include/envoy/server/health_checker_config.h
+++ b/include/envoy/server/health_checker_config.h
@@ -31,6 +31,12 @@ class HealthCheckerFactoryContext {
* for all singleton processing.
*/
virtual Event::Dispatcher& dispatcher() PURE;
+
+ /*
+ * @return Upstream::HealthCheckEventLoggerPtr the health check event logger for the
+ * created health checkers. This function may not be idempotent.
+ */
+ virtual Upstream::HealthCheckEventLoggerPtr eventLogger() PURE;
};
/**
@@ -63,4 +69,4 @@ class CustomHealthCheckerFactory {
} // namespace Configuration
} // namespace Server
-} // namespace Envoy
\ No newline at end of file
+} // namespace Envoy
diff --git a/include/envoy/server/options.h b/include/envoy/server/options.h
index 0d5a62e084913..88d43d8c6bf19 100644
--- a/include/envoy/server/options.h
+++ b/include/envoy/server/options.h
@@ -6,6 +6,7 @@
#include "envoy/common/pure.h"
#include "envoy/network/address.h"
+#include "envoy/stats/stats.h"
#include "spdlog/spdlog.h"
@@ -149,10 +150,10 @@ class Options {
virtual uint64_t maxStats() const PURE;
/**
- * @return uint64_t the maximum name length of the name field in
+ * @return StatsOptions& the max stat name / suffix lengths for stats.
* router/cluster/listener.
*/
- virtual uint64_t maxObjNameLength() const PURE;
+ virtual const Stats::StatsOptions& statsOptions() const PURE;
/**
* @return bool indicating whether the hot restart functionality has been disabled via cli flags.
diff --git a/include/envoy/server/overload_manager.h b/include/envoy/server/overload_manager.h
new file mode 100644
index 0000000000000..40cf4cf3e969f
--- /dev/null
+++ b/include/envoy/server/overload_manager.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#include "envoy/common/pure.h"
+
+namespace Envoy {
+namespace Server {
+
+enum class OverloadActionState {
+ /**
+ * Indicates that an overload action is active because at least one of its triggers has fired.
+ */
+ Active,
+ /**
+ * Indicates that an overload action is inactive because none of its triggers have fired.
+ */
+ Inactive
+};
+
+/**
+ * Callback invoked when an overload action changes state.
+ */
+typedef std::function OverloadActionCb;
+
+/**
+ * The OverloadManager protects the Envoy instance from being overwhelmed by client
+ * requests. It monitors a set of resources and notifies registered listeners if
+ * configured thresholds for those resources have been exceeded.
+ */
+class OverloadManager {
+public:
+ virtual ~OverloadManager() {}
+
+ /**
+ * Register a callback to be invoked when the specified overload action changes state
+ * (ie. becomes activated or inactivated). Must be called before the start method is called.
+ * @param action const std::string& the name of the overload action to register for
+ * @param dispatcher Event::Dispatcher& the dispatcher on which callbacks will be posted
+ * @param callback OverloadActionCb the callback to post when the overload action
+ * changes state
+ */
+ virtual void registerForAction(const std::string& action, Event::Dispatcher& dispatcher,
+ OverloadActionCb callback) PURE;
+};
+
+} // namespace Server
+} // namespace Envoy
diff --git a/include/envoy/server/resource_monitor.h b/include/envoy/server/resource_monitor.h
new file mode 100644
index 0000000000000..3fd01b52ac3b7
--- /dev/null
+++ b/include/envoy/server/resource_monitor.h
@@ -0,0 +1,52 @@
+#pragma once
+
+#include
+
+#include "envoy/common/exception.h"
+#include "envoy/common/pure.h"
+
+namespace Envoy {
+namespace Server {
+
+// Struct for reporting usage for a particular resource.
+struct ResourceUsage {
+ // Fraction of (resource usage)/(resource limit).
+ double resource_pressure_;
+};
+
+class ResourceMonitor {
+public:
+ virtual ~ResourceMonitor() {}
+
+ /**
+ * Notifies caller of updated resource usage.
+ */
+ class Callbacks {
+ public:
+ virtual ~Callbacks() {}
+
+ /**
+ * Called when the request for updated resource usage succeeds.
+ * @param usage the updated resource usage
+ */
+ virtual void onSuccess(const ResourceUsage& usage) PURE;
+
+ /**
+ * Called when the request for updated resource usage fails.
+ * @param error the exception caught when trying to get updated resource usage
+ */
+ virtual void onFailure(const EnvoyException& error) PURE;
+ };
+
+ /**
+ * Recalculate resource usage.
+ * This must be non-blocking so if RPCs need to be made they should be
+ * done asynchronously and invoke the callback when finished.
+ */
+ virtual void updateResourceUsage(Callbacks& callbacks) PURE;
+};
+
+typedef std::unique_ptr ResourceMonitorPtr;
+
+} // namespace Server
+} // namespace Envoy
diff --git a/include/envoy/server/resource_monitor_config.h b/include/envoy/server/resource_monitor_config.h
new file mode 100644
index 0000000000000..ceea1a685c9b1
--- /dev/null
+++ b/include/envoy/server/resource_monitor_config.h
@@ -0,0 +1,60 @@
+#pragma once
+
+#include "envoy/common/pure.h"
+#include "envoy/event/dispatcher.h"
+#include "envoy/server/resource_monitor.h"
+
+#include "common/protobuf/protobuf.h"
+
+namespace Envoy {
+namespace Server {
+namespace Configuration {
+
+class ResourceMonitorFactoryContext {
+public:
+ virtual ~ResourceMonitorFactoryContext() {}
+
+ /**
+ * @return Event::Dispatcher& the main thread's dispatcher. This dispatcher should be used
+ * for all singleton processing.
+ */
+ virtual Event::Dispatcher& dispatcher() PURE;
+};
+
+/**
+ * Implemented by each resource monitor and registered via Registry::registerFactory()
+ * or the convenience class RegistryFactory.
+ */
+class ResourceMonitorFactory {
+public:
+ virtual ~ResourceMonitorFactory() {}
+
+ /**
+ * Create a particular resource monitor implementation.
+ * @param config const ProtoBuf::Message& supplies the config for the resource monitor
+ * implementation.
+ * @param context ResourceMonitorFactoryContext& supplies the resource monitor's context.
+ * @return ResourceMonitorPtr the resource monitor instance. Should not be nullptr.
+ * @throw EnvoyException if the implementation is unable to produce an instance with
+ * the provided parameters.
+ */
+ virtual ResourceMonitorPtr createResourceMonitor(const Protobuf::Message& config,
+ ResourceMonitorFactoryContext& context) PURE;
+
+ /**
+ * @return ProtobufTypes::MessagePtr create empty config proto message. The resource monitor
+ * config, which arrives in an opaque google.protobuf.Struct message, will be converted
+ * to JSON and then parsed into this empty proto.
+ */
+ virtual ProtobufTypes::MessagePtr createEmptyConfigProto() PURE;
+
+ /**
+ * @return std::string the identifying name for a particular implementation of a resource
+ * monitor produced by the factory.
+ */
+ virtual std::string name() PURE;
+};
+
+} // namespace Configuration
+} // namespace Server
+} // namespace Envoy
diff --git a/include/envoy/ssl/connection.h b/include/envoy/ssl/connection.h
index cd8ebe55cf06a..98f8506497b8d 100644
--- a/include/envoy/ssl/connection.h
+++ b/include/envoy/ssl/connection.h
@@ -38,6 +38,12 @@ class Connection {
*/
virtual const std::string& sha256PeerCertificateDigest() const PURE;
+ /**
+ * @return std::string the serial number field of the peer certificate. Returns "" if
+ * there is no peer certificate, or no serial number.
+ **/
+ virtual std::string serialNumberPeerCertificate() const PURE;
+
/**
* @return std::string the subject field of the peer certificate in RFC 2253 format. Returns "" if
* there is no peer certificate, or no subject.
diff --git a/include/envoy/ssl/context.h b/include/envoy/ssl/context.h
index 5af2fa804fb8a..b3d63bfe45b78 100644
--- a/include/envoy/ssl/context.h
+++ b/include/envoy/ssl/context.h
@@ -32,12 +32,13 @@ class Context {
*/
virtual std::string getCertChainInformation() const PURE;
};
+typedef std::shared_ptr ContextSharedPtr;
class ClientContext : public virtual Context {};
-typedef std::unique_ptr ClientContextPtr;
+typedef std::shared_ptr ClientContextSharedPtr;
class ServerContext : public virtual Context {};
-typedef std::unique_ptr ServerContextPtr;
+typedef std::shared_ptr ServerContextSharedPtr;
} // namespace Ssl
} // namespace Envoy
diff --git a/include/envoy/ssl/context_manager.h b/include/envoy/ssl/context_manager.h
index 7489800c99caf..ea63ab9981f05 100644
--- a/include/envoy/ssl/context_manager.h
+++ b/include/envoy/ssl/context_manager.h
@@ -19,13 +19,13 @@ class ContextManager {
/**
* Builds a ClientContext from a ClientContextConfig.
*/
- virtual ClientContextPtr createSslClientContext(Stats::Scope& scope,
- const ClientContextConfig& config) PURE;
+ virtual ClientContextSharedPtr createSslClientContext(Stats::Scope& scope,
+ const ClientContextConfig& config) PURE;
/**
* Builds a ServerContext from a ServerContextConfig.
*/
- virtual ServerContextPtr
+ virtual ServerContextSharedPtr
createSslServerContext(Stats::Scope& scope, const ServerContextConfig& config,
const std::vector& server_names) PURE;
diff --git a/include/envoy/stats/stats.h b/include/envoy/stats/stats.h
index 76e88253548d5..c39e41749699a 100644
--- a/include/envoy/stats/stats.h
+++ b/include/envoy/stats/stats.h
@@ -24,6 +24,43 @@ class Instance;
namespace Stats {
+/**
+ * Struct stored under Server::Options to hold information about the maximum object name length and
+ * maximum stat suffix length of a stat. These have defaults in StatsOptionsImpl, and the maximum
+ * object name length can be overridden. The default initialization is used in IsolatedStatImpl, and
+ * the user-overridden struct is stored in Options.
+ *
+ * As noted in the comment above StatsOptionsImpl in source/common/stats/stats_impl.h, a stat name
+ * often contains both a string whose length is user-defined (cluster_name in the below example),
+ * and a specific statistic name generated by Envoy. To make room for growth on both fronts, we
+ * limit the max allowed length of each separately.
+ *
+ * name / stat name
+ * |----------------------------------------------------------------|
+ * cluster..outlier_detection.ejections_consecutive_5xx
+ * |--------------------------------------| |-----------------------|
+ * object name suffix
+ */
+class StatsOptions {
+public:
+ virtual ~StatsOptions() {}
+
+ /**
+ * The max allowed length of a complete stat name, including suffix.
+ */
+ virtual size_t maxNameLength() const PURE;
+
+ /**
+ * The max allowed length of the object part of a stat name.
+ */
+ virtual size_t maxObjNameLength() const PURE;
+
+ /**
+ * The max allowed length of a stat suffix.
+ */
+ virtual size_t maxStatSuffixLength() const PURE;
+};
+
/**
* General representation of a tag.
*/
@@ -329,6 +366,12 @@ class Scope {
* @return a histogram within the scope's namespace with a particular value type.
*/
virtual Histogram& histogram(const std::string& name) PURE;
+
+ /**
+ * @return a reference to the top-level StatsOptions struct, containing information about the
+ * maximum allowable object name length and stat suffix length.
+ */
+ virtual const Stats::StatsOptions& statsOptions() const PURE;
};
/**
@@ -424,7 +467,7 @@ class StatDataAllocator {
* @return CounterSharedPtr a counter, or nullptr if allocation failed, in which case
* tag_extracted_name and tags are not moved.
*/
- virtual CounterSharedPtr makeCounter(const std::string& name, std::string&& tag_extracted_name,
+ virtual CounterSharedPtr makeCounter(absl::string_view name, std::string&& tag_extracted_name,
std::vector&& tags) PURE;
/**
@@ -434,9 +477,14 @@ class StatDataAllocator {
* @return GaugeSharedPtr a gauge, or nullptr if allocation failed, in which case
* tag_extracted_name and tags are not moved.
*/
- virtual GaugeSharedPtr makeGauge(const std::string& name, std::string&& tag_extracted_name,
+ virtual GaugeSharedPtr makeGauge(absl::string_view name, std::string&& tag_extracted_name,
std::vector&& tags) PURE;
+ /**
+ * Determines whether this stats allocator requires bounded stat-name size.
+ */
+ virtual bool requiresBoundedStatNameSize() const PURE;
+
// TODO(jmarantz): create a parallel mechanism to instantiate histograms. At
// the moment, histograms don't fit the same pattern of counters and gaugaes
// as they are not actually created in the context of a stats allocator.
diff --git a/include/envoy/tcp/BUILD b/include/envoy/tcp/BUILD
new file mode 100644
index 0000000000000..3716bc5cb4f64
--- /dev/null
+++ b/include/envoy/tcp/BUILD
@@ -0,0 +1,19 @@
+licenses(["notice"]) # Apache 2
+
+load(
+ "//bazel:envoy_build_system.bzl",
+ "envoy_cc_library",
+ "envoy_package",
+)
+
+envoy_package()
+
+envoy_cc_library(
+ name = "conn_pool_interface",
+ hdrs = ["conn_pool.h"],
+ deps = [
+ "//include/envoy/buffer:buffer_interface",
+ "//include/envoy/event:deferred_deletable",
+ "//include/envoy/upstream:upstream_interface",
+ ],
+)
diff --git a/include/envoy/tcp/conn_pool.h b/include/envoy/tcp/conn_pool.h
new file mode 100644
index 0000000000000..8237af37fea31
--- /dev/null
+++ b/include/envoy/tcp/conn_pool.h
@@ -0,0 +1,161 @@
+#pragma once
+
+#include
+#include
+
+#include "envoy/buffer/buffer.h"
+#include "envoy/common/pure.h"
+#include "envoy/event/deferred_deletable.h"
+#include "envoy/upstream/upstream.h"
+
+namespace Envoy {
+namespace Tcp {
+namespace ConnectionPool {
+
+/**
+ * Handle that allows a pending connection request to be canceled before it is completed.
+ */
+class Cancellable {
+public:
+ virtual ~Cancellable() {}
+
+ /**
+ * Cancel the pending request.
+ */
+ virtual void cancel() PURE;
+};
+
+/**
+ * Reason that a pool connection could not be obtained.
+ */
+enum class PoolFailureReason {
+ // A resource overflowed and policy prevented a new connection from being created.
+ Overflow,
+ // A local connection failure took place while creating a new connection.
+ LocalConnectionFailure,
+ // A remote connection failure took place while creating a new connection.
+ RemoteConnectionFailure,
+ // A timeout occurred while creating a new connection.
+ Timeout,
+};
+
+/*
+ * UpstreamCallbacks for connection pool upstream connection callbacks and data. Note that
+ * onEvent(Connected) is never triggered since the event always occurs before a ConnectionPool
+ * caller is assigned a connection.
+ */
+class UpstreamCallbacks : public Network::ConnectionCallbacks {
+public:
+ virtual ~UpstreamCallbacks() {}
+
+ /*
+ * Invoked when data is delivered from the upstream connection while the connection is owned by a
+ * ConnectionPool::Instance caller.
+ * @param data supplies data from the upstream
+ * @param end_stream whether the data is the last data frame
+ */
+ virtual void onUpstreamData(Buffer::Instance& data, bool end_stream) PURE;
+};
+
+/*
+ * ConnectionData wraps a ClientConnection allocated to a caller. Open ClientConnections are
+ * released back to the pool for re-use when their containing ConnectionData is destroyed.
+ */
+class ConnectionData {
+public:
+ virtual ~ConnectionData() {}
+
+ /**
+ * @return the ClientConnection for the connection.
+ */
+ virtual Network::ClientConnection& connection() PURE;
+
+ /**
+ * Sets the ConnectionPool::UpstreamCallbacks for the connection. If no callback is attached,
+ * data from the upstream will cause the connection to be closed. Callbacks cease when the
+ * connection is released.
+ * @param callback the UpstreamCallbacks to invoke for upstream data
+ */
+ virtual void addUpstreamCallbacks(ConnectionPool::UpstreamCallbacks& callback) PURE;
+};
+
+typedef std::unique_ptr ConnectionDataPtr;
+
+/**
+ * Pool callbacks invoked in the context of a newConnection() call, either synchronously or
+ * asynchronously.
+ */
+class Callbacks {
+public:
+ virtual ~Callbacks() {}
+
+ /**
+ * Called when a pool error occurred and no connection could be acquired for making the request.
+ * @param reason supplies the failure reason.
+ * @param host supplies the description of the host that caused the failure. This may be nullptr
+ * if no host was involved in the failure (for example overflow).
+ */
+ virtual void onPoolFailure(PoolFailureReason reason,
+ Upstream::HostDescriptionConstSharedPtr host) PURE;
+
+ /**
+ * Called when a connection is available to process a request/response. Connections may be
+ * released back to the pool for re-use by resetting the ConnectionDataPtr. If the connection is
+ * no longer viable for reuse (e.g. due to some kind of protocol error), the underlying
+ * ClientConnection should be closed to prevent its reuse.
+ *
+ * @param conn supplies the connection data to use.
+ * @param host supplies the description of the host that will carry the request. For logical
+ * connection pools the description may be different each time this is called.
+ */
+ virtual void onPoolReady(ConnectionDataPtr&& conn,
+ Upstream::HostDescriptionConstSharedPtr host) PURE;
+};
+
+/**
+ * An instance of a generic connection pool.
+ */
+class Instance : public Event::DeferredDeletable {
+public:
+ virtual ~Instance() {}
+
+ /**
+ * Called when a connection pool has been drained of pending requests, busy connections, and
+ * ready connections.
+ */
+ typedef std::function DrainedCb;
+
+ /**
+ * Register a callback that gets called when the connection pool is fully drained. No actual
+ * draining is done. The owner of the connection pool is responsible for not creating any
+ * new connections.
+ */
+ virtual void addDrainedCallback(DrainedCb cb) PURE;
+
+ /**
+ * Actively drain all existing connection pool connections. This method can be used in cases
+ * where the connection pool is not being destroyed, but the caller wishes to make sure that
+ * all new requests take place on a new connection. For example, when a health check failure
+ * occurs.
+ */
+ virtual void drainConnections() PURE;
+
+ /**
+ * Create a new connection on the pool.
+ * @param cb supplies the callbacks to invoke when the connection is ready or has failed. The
+ * callbacks may be invoked immediately within the context of this call if there is a
+ * ready connection or an immediate failure. In this case, the routine returns nullptr.
+ * @return Cancellable* If no connection is ready, the callback is not invoked, and a handle
+ * is returned that can be used to cancel the request. Otherwise, one of the
+ * callbacks is called and the routine returns nullptr. NOTE: Once a callback
+ * is called, the handle is no longer valid and any further cancellation
+ * should be done by resetting the connection.
+ */
+ virtual Cancellable* newConnection(Callbacks& callbacks) PURE;
+};
+
+typedef std::unique_ptr InstancePtr;
+
+} // namespace ConnectionPool
+} // namespace Tcp
+} // namespace Envoy
diff --git a/include/envoy/upstream/BUILD b/include/envoy/upstream/BUILD
index 60f3c7ba4bdc9..e59ddb8a392fa 100644
--- a/include/envoy/upstream/BUILD
+++ b/include/envoy/upstream/BUILD
@@ -12,6 +12,7 @@ envoy_cc_library(
name = "cluster_manager_interface",
hdrs = ["cluster_manager.h"],
deps = [
+ ":health_checker_interface",
":load_balancer_interface",
":thread_local_cluster_interface",
":upstream_interface",
@@ -24,6 +25,7 @@ envoy_cc_library(
"//include/envoy/runtime:runtime_interface",
"//include/envoy/secret:secret_manager_interface",
"//include/envoy/server:admin_interface",
+ "//include/envoy/tcp:conn_pool_interface",
"@envoy_api//envoy/api/v2:cds_cc",
"@envoy_api//envoy/config/bootstrap/v2:bootstrap_cc",
],
@@ -32,7 +34,10 @@ envoy_cc_library(
envoy_cc_library(
name = "health_checker_interface",
hdrs = ["health_checker.h"],
- deps = [":upstream_interface"],
+ deps = [
+ ":upstream_interface",
+ "@envoy_api//envoy/data/core/v2alpha:health_check_event_cc",
+ ],
)
envoy_cc_library(
@@ -101,6 +106,8 @@ envoy_cc_library(
"//include/envoy/http:codec_interface",
"//include/envoy/network:connection_interface",
"//include/envoy/network:transport_socket_interface",
+ "//include/envoy/runtime:runtime_interface",
"//include/envoy/ssl:context_interface",
+ "//include/envoy/ssl:context_manager_interface",
],
)
diff --git a/include/envoy/upstream/cluster_manager.h b/include/envoy/upstream/cluster_manager.h
index f2ec6e672953e..07c93b67e48a5 100644
--- a/include/envoy/upstream/cluster_manager.h
+++ b/include/envoy/upstream/cluster_manager.h
@@ -17,6 +17,9 @@
#include "envoy/runtime/runtime.h"
#include "envoy/secret/secret_manager.h"
#include "envoy/server/admin.h"
+#include "envoy/ssl/context_manager.h"
+#include "envoy/tcp/conn_pool.h"
+#include "envoy/upstream/health_checker.h"
#include "envoy/upstream/load_balancer.h"
#include "envoy/upstream/thread_local_cluster.h"
#include "envoy/upstream/upstream.h"
@@ -119,6 +122,18 @@ class ClusterManager {
Http::Protocol protocol,
LoadBalancerContext* context) PURE;
+ /**
+ * Allocate a load balanced TCP connection pool for a cluster. This is *per-thread* so that
+ * callers do not need to worry about per thread synchronization. The load balancing policy that
+ * is used is the one defined on the cluster when it was created.
+ *
+ * Can return nullptr if there is no host available in the cluster or if the cluster does not
+ * exist.
+ */
+ virtual Tcp::ConnectionPool::Instance* tcpConnPoolForCluster(const std::string& cluster,
+ ResourcePriority priority,
+ LoadBalancerContext* context) PURE;
+
/**
* Allocate a load balanced TCP connection for a cluster. The created connection is already
* bound to the correct *per-thread* dispatcher, so no further synchronization is needed. The
@@ -248,12 +263,22 @@ class ClusterManagerFactory {
ResourcePriority priority, Http::Protocol protocol,
const Network::ConnectionSocket::OptionsSharedPtr& options) PURE;
+ /**
+ * Allocate a TCP connection pool for the host. Pools are separated by 'priority' and
+ * 'options->hashKey()', if any.
+ */
+ virtual Tcp::ConnectionPool::InstancePtr
+ allocateTcpConnPool(Event::Dispatcher& dispatcher, HostConstSharedPtr host,
+ ResourcePriority priority,
+ const Network::ConnectionSocket::OptionsSharedPtr& options) PURE;
+
/**
* Allocate a cluster from configuration proto.
*/
virtual ClusterSharedPtr clusterFromProto(const envoy::api::v2::Cluster& cluster,
ClusterManager& cm,
Outlier::EventLoggerSharedPtr outlier_event_logger,
+ AccessLog::AccessLogManager& log_manager,
bool added_via_api) PURE;
/**
@@ -269,5 +294,31 @@ class ClusterManagerFactory {
virtual Secret::SecretManager& secretManager() PURE;
};
+/**
+ * Factory for creating ClusterInfo
+ */
+class ClusterInfoFactory {
+public:
+ virtual ~ClusterInfoFactory() {}
+
+ /**
+ * This method returns a Upstream::ClusterInfoConstSharedPtr
+ *
+ * @param runtime supplies the runtime loader.
+ * @param cluster supplies the owning cluster.
+ * @param bind_config supplies information on binding newly established connections.
+ * @param stats supplies a store for all known counters, gauges, and timers.
+ * @param ssl_context_manager supplies a manager for all SSL contexts.
+ * @param secret_manager supplies a manager for static secrets.
+ * @param added_via_api denotes whether this was added via API.
+ * @return Upstream::ClusterInfoConstSharedPtr
+ */
+ virtual Upstream::ClusterInfoConstSharedPtr
+ createClusterInfo(Runtime::Loader& runtime, const envoy::api::v2::Cluster& cluster,
+ const envoy::api::v2::core::BindConfig& bind_config, Stats::Store& stats,
+ Ssl::ContextManager& ssl_context_manager, Secret::SecretManager& secret_manager,
+ bool added_via_api) PURE;
+};
+
} // namespace Upstream
} // namespace Envoy
diff --git a/include/envoy/upstream/health_checker.h b/include/envoy/upstream/health_checker.h
index e0827eb80e845..e9fafe5cb69cb 100644
--- a/include/envoy/upstream/health_checker.h
+++ b/include/envoy/upstream/health_checker.h
@@ -3,6 +3,7 @@
#include
#include
+#include "envoy/data/core/v2alpha/health_check_event.pb.h"
#include "envoy/upstream/upstream.h"
namespace Envoy {
@@ -59,5 +60,36 @@ typedef std::shared_ptr HealthCheckerSharedPtr;
std::ostream& operator<<(std::ostream& out, HealthState state);
std::ostream& operator<<(std::ostream& out, HealthTransition changed_state);
+/**
+ * Sink for health check event logs.
+ */
+class HealthCheckEventLogger {
+public:
+ virtual ~HealthCheckEventLogger() {}
+
+ /**
+ * Log an unhealthy host ejection event.
+ * @param health_checker_type supplies the type of health checker that generated the event.
+ * @param host supplies the host that generated the event.
+ * @param failure_type supplies the type of health check failure
+ */
+ virtual void
+ logEjectUnhealthy(envoy::data::core::v2alpha::HealthCheckerType health_checker_type,
+ const HostDescriptionConstSharedPtr& host,
+ envoy::data::core::v2alpha::HealthCheckFailureType failure_type) PURE;
+
+ /**
+ * Log a healthy host addition event.
+ * @param health_checker_type supplies the type of health checker that generated the event.
+ * @param host supplies the host that generated the event.
+ * @param healthy_threshold supplied the configured healthy threshold for this health check
+ * @param first_check whether this is a fast path success on the first health check for this host
+ */
+ virtual void logAddHealthy(envoy::data::core::v2alpha::HealthCheckerType health_checker_type,
+ const HostDescriptionConstSharedPtr& host, bool first_check) PURE;
+};
+
+typedef std::unique_ptr HealthCheckEventLoggerPtr;
+
} // namespace Upstream
} // namespace Envoy
diff --git a/include/envoy/upstream/host_description.h b/include/envoy/upstream/host_description.h
index aff175735b9af..9f5eb67f7ba20 100644
--- a/include/envoy/upstream/host_description.h
+++ b/include/envoy/upstream/host_description.h
@@ -52,10 +52,20 @@ class HostDescription {
*/
virtual bool canary() const PURE;
+ /**
+ * Update the canary status of the host.
+ */
+ virtual void canary(bool is_canary) PURE;
+
/**
* @return the metadata associated with this host
*/
- virtual const envoy::api::v2::core::Metadata& metadata() const PURE;
+ virtual const std::shared_ptr metadata() const PURE;
+
+ /**
+ * Set the current metadata.
+ */
+ virtual void metadata(const envoy::api::v2::core::Metadata& new_metadata) PURE;
/**
* @return the cluster the host is a member of.
diff --git a/include/envoy/upstream/load_balancer_type.h b/include/envoy/upstream/load_balancer_type.h
index 5a8e21828d1b4..cc2e1b3029e59 100644
--- a/include/envoy/upstream/load_balancer_type.h
+++ b/include/envoy/upstream/load_balancer_type.h
@@ -46,6 +46,11 @@ class LoadBalancerSubsetInfo {
* sorted keys used to define load balancer subsets.
*/
virtual const std::vector>& subsetKeys() const PURE;
+
+ /*
+ * @return bool whether routing to subsets should take locality weights into account.
+ */
+ virtual bool localityWeightAware() const PURE;
};
} // namespace Upstream
diff --git a/source/common/access_log/BUILD b/source/common/access_log/BUILD
index 40896979eae9d..afbca0e51e2cf 100644
--- a/source/common/access_log/BUILD
+++ b/source/common/access_log/BUILD
@@ -51,7 +51,10 @@ envoy_cc_library(
"//source/common/http:header_utility_lib",
"//source/common/http:headers_lib",
"//source/common/http:utility_lib",
+ "//source/common/protobuf:utility_lib",
+ "//source/common/request_info:request_info_lib",
"//source/common/runtime:uuid_util_lib",
"//source/common/tracing:http_tracer_lib",
+ "@envoy_api//envoy/config/filter/accesslog/v2:accesslog_cc",
],
)
diff --git a/source/common/access_log/access_log_formatter.cc b/source/common/access_log/access_log_formatter.cc
index 82f8205200565..88ec956d9d558 100644
--- a/source/common/access_log/access_log_formatter.cc
+++ b/source/common/access_log/access_log_formatter.cc
@@ -35,14 +35,16 @@ FormatterPtr AccessLogFormatUtils::defaultAccessLogFormatter() {
std::string
AccessLogFormatUtils::durationToString(const absl::optional& time) {
if (time) {
- return fmt::FormatInt(
- std::chrono::duration_cast(time.value()).count())
- .str();
+ return durationToString(time.value());
} else {
return UnspecifiedValueString;
}
}
+std::string AccessLogFormatUtils::durationToString(const std::chrono::nanoseconds& time) {
+ return fmt::FormatInt(std::chrono::duration_cast(time).count()).str();
+}
+
const std::string&
AccessLogFormatUtils::protocolToString(const absl::optional& protocol) {
if (protocol) {
@@ -221,6 +223,18 @@ RequestInfoFormatter::RequestInfoFormatter(const std::string& field_name) {
field_extractor_ = [](const RequestInfo::RequestInfo& request_info) {
return AccessLogFormatUtils::durationToString(request_info.firstUpstreamRxByteReceived());
};
+ } else if (field_name == "RESPONSE_TX_DURATION") {
+ field_extractor_ = [](const RequestInfo::RequestInfo& request_info) {
+ auto downstream = request_info.lastDownstreamTxByteSent();
+ auto upstream = request_info.firstUpstreamRxByteReceived();
+
+ if (downstream && upstream) {
+ auto val = downstream.value() - upstream.value();
+ return AccessLogFormatUtils::durationToString(val);
+ }
+
+ return UnspecifiedValueString;
+ };
} else if (field_name == "BYTES_RECEIVED") {
field_extractor_ = [](const RequestInfo::RequestInfo& request_info) {
return fmt::FormatInt(request_info.bytesReceived()).str();
@@ -388,7 +402,7 @@ std::string MetadataFormatter::format(const envoy::api::v2::core::Metadata& meta
}
ProtobufTypes::String json;
const auto status = Protobuf::util::MessageToJsonString(*data, &json);
- RELEASE_ASSERT(status.ok());
+ RELEASE_ASSERT(status.ok(), "");
if (max_length_ && json.length() > max_length_.value()) {
return json.substr(0, max_length_.value());
}
diff --git a/source/common/access_log/access_log_formatter.h b/source/common/access_log/access_log_formatter.h
index b0a1246345c06..b6cb0a8a2775d 100644
--- a/source/common/access_log/access_log_formatter.h
+++ b/source/common/access_log/access_log_formatter.h
@@ -73,6 +73,7 @@ class AccessLogFormatUtils {
static FormatterPtr defaultAccessLogFormatter();
static const std::string& protocolToString(const absl::optional& protocol);
static std::string durationToString(const absl::optional& time);
+ static std::string durationToString(const std::chrono::nanoseconds& time);
private:
AccessLogFormatUtils();
diff --git a/source/common/access_log/access_log_impl.cc b/source/common/access_log/access_log_impl.cc
index 74f0f5b677fdb..163b695cf0253 100644
--- a/source/common/access_log/access_log_impl.cc
+++ b/source/common/access_log/access_log_impl.cc
@@ -4,6 +4,7 @@
#include
#include "envoy/common/time.h"
+#include "envoy/config/filter/accesslog/v2/accesslog.pb.validate.h"
#include "envoy/filesystem/filesystem.h"
#include "envoy/http/header_map.h"
#include "envoy/runtime/runtime.h"
@@ -17,6 +18,8 @@
#include "common/http/header_utility.h"
#include "common/http/headers.h"
#include "common/http/utility.h"
+#include "common/protobuf/utility.h"
+#include "common/request_info/utility.h"
#include "common/runtime/uuid_util.h"
#include "common/tracing/http_tracer_impl.h"
@@ -44,7 +47,7 @@ bool ComparisonFilter::compareAgainstValue(uint64_t lhs) {
case envoy::config::filter::accesslog::v2::ComparisonFilter::LE:
return lhs <= value;
default:
- NOT_REACHED;
+ NOT_REACHED_GCOVR_EXCL_LINE;
}
}
@@ -68,8 +71,11 @@ FilterFactory::fromProto(const envoy::config::filter::accesslog::v2::AccessLogFi
return FilterPtr{new OrFilter(config.or_filter(), runtime, random)};
case envoy::config::filter::accesslog::v2::AccessLogFilter::kHeaderFilter:
return FilterPtr{new HeaderFilter(config.header_filter())};
+ case envoy::config::filter::accesslog::v2::AccessLogFilter::kResponseFlagFilter:
+ MessageUtil::validate(config);
+ return FilterPtr{new ResponseFlagFilter(config.response_flag_filter())};
default:
- NOT_REACHED;
+ NOT_REACHED_GCOVR_EXCL_LINE;
}
}
@@ -174,6 +180,24 @@ bool HeaderFilter::evaluate(const RequestInfo::RequestInfo&,
return Http::HeaderUtility::matchHeaders(request_headers, header_data_);
}
+ResponseFlagFilter::ResponseFlagFilter(
+ const envoy::config::filter::accesslog::v2::ResponseFlagFilter& config) {
+ for (int i = 0; i < config.flags_size(); i++) {
+ absl::optional response_flag =
+ RequestInfo::ResponseFlagUtils::toResponseFlag(config.flags(i));
+ // The config has been validated. Therefore, every flag in the config will have a mapping.
+ ASSERT(response_flag.has_value());
+ configured_flags_ |= response_flag.value();
+ }
+}
+
+bool ResponseFlagFilter::evaluate(const RequestInfo::RequestInfo& info, const Http::HeaderMap&) {
+ if (configured_flags_ != 0) {
+ return info.intersectResponseFlags(configured_flags_);
+ }
+ return info.hasAnyResponseFlag();
+}
+
InstanceSharedPtr
AccessLogFactory::fromProto(const envoy::config::filter::accesslog::v2::AccessLog& config,
Server::Configuration::FactoryContext& context) {
diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h
index 63c22735f996a..cad54fd633012 100644
--- a/source/common/access_log/access_log_impl.h
+++ b/source/common/access_log/access_log_impl.h
@@ -6,7 +6,6 @@
#include "envoy/access_log/access_log.h"
#include "envoy/config/filter/accesslog/v2/accesslog.pb.h"
-#include "envoy/request_info/request_info.h"
#include "envoy/runtime/runtime.h"
#include "envoy/server/access_log_config.h"
@@ -166,6 +165,21 @@ class HeaderFilter : public Filter {
std::vector header_data_;
};
+/**
+ * Filter requests that had a response with an Envoy response flag set.
+ */
+class ResponseFlagFilter : public Filter {
+public:
+ ResponseFlagFilter(const envoy::config::filter::accesslog::v2::ResponseFlagFilter& config);
+
+ // AccessLog::Filter
+ bool evaluate(const RequestInfo::RequestInfo& info,
+ const Http::HeaderMap& request_headers) override;
+
+private:
+ uint64_t configured_flags_{};
+};
+
/**
* Access log factory that reads the configuration from proto.
*/
diff --git a/source/common/api/os_sys_calls_impl.cc b/source/common/api/os_sys_calls_impl.cc
index 7012290927660..80cfd24bf602c 100644
--- a/source/common/api/os_sys_calls_impl.cc
+++ b/source/common/api/os_sys_calls_impl.cc
@@ -11,6 +11,10 @@ int OsSysCallsImpl::bind(int sockfd, const sockaddr* addr, socklen_t addrlen) {
return ::bind(sockfd, addr, addrlen);
}
+int OsSysCallsImpl::ioctl(int sockfd, unsigned long int request, void* argp) {
+ return ::ioctl(sockfd, request, argp);
+}
+
int OsSysCallsImpl::open(const std::string& full_path, int flags, int mode) {
return ::open(full_path.c_str(), flags, mode);
}
@@ -57,5 +61,9 @@ int OsSysCallsImpl::getsockopt(int sockfd, int level, int optname, void* optval,
return ::getsockopt(sockfd, level, optname, optval, optlen);
}
+int OsSysCallsImpl::socket(int domain, int type, int protocol) {
+ return ::socket(domain, type, protocol);
+}
+
} // namespace Api
} // namespace Envoy
diff --git a/source/common/api/os_sys_calls_impl.h b/source/common/api/os_sys_calls_impl.h
index d1985622615d5..db325862367df 100644
--- a/source/common/api/os_sys_calls_impl.h
+++ b/source/common/api/os_sys_calls_impl.h
@@ -11,6 +11,7 @@ class OsSysCallsImpl : public OsSysCalls {
public:
// Api::OsSysCalls
int bind(int sockfd, const sockaddr* addr, socklen_t addrlen) override;
+ int ioctl(int sockfd, unsigned long int request, void* argp) override;
int open(const std::string& full_path, int flags, int mode) override;
ssize_t write(int fd, const void* buffer, size_t num_bytes) override;
ssize_t writev(int fd, const iovec* iovec, int num_iovec) override;
@@ -24,6 +25,7 @@ class OsSysCallsImpl : public OsSysCalls {
int stat(const char* pathname, struct stat* buf) override;
int setsockopt(int sockfd, int level, int optname, const void* optval, socklen_t optlen) override;
int getsockopt(int sockfd, int level, int optname, void* optval, socklen_t* optlen) override;
+ int socket(int domain, int type, int protocol) override;
};
typedef ThreadSafeSingleton OsSysCallsSingleton;
diff --git a/source/common/buffer/buffer_impl.cc b/source/common/buffer/buffer_impl.cc
index f7bdfcd12aa46..888d87077376f 100644
--- a/source/common/buffer/buffer_impl.cc
+++ b/source/common/buffer/buffer_impl.cc
@@ -94,9 +94,9 @@ void OwnedImpl::move(Instance& rhs, uint64_t length) {
static_cast(rhs).postProcess();
}
-int OwnedImpl::read(int fd, uint64_t max_length) {
+Api::SysCallResult OwnedImpl::read(int fd, uint64_t max_length) {
if (max_length == 0) {
- return 0;
+ return {0, 0};
}
constexpr uint64_t MaxSlices = 2;
RawSlice slices[MaxSlices];
@@ -115,8 +115,9 @@ int OwnedImpl::read(int fd, uint64_t max_length) {
ASSERT(num_bytes_to_read <= max_length);
auto& os_syscalls = Api::OsSysCallsSingleton::get();
const ssize_t rc = os_syscalls.readv(fd, iov, static_cast(num_slices_to_read));
+ const int error = errno;
if (rc < 0) {
- return rc;
+ return {static_cast(rc), error};
}
uint64_t num_slices_to_commit = 0;
uint64_t bytes_to_commit = rc;
@@ -130,7 +131,7 @@ int OwnedImpl::read(int fd, uint64_t max_length) {
}
ASSERT(num_slices_to_commit <= num_slices);
commit(slices, num_slices_to_commit);
- return rc;
+ return {static_cast(rc), error};
}
uint64_t OwnedImpl::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) {
@@ -151,7 +152,7 @@ ssize_t OwnedImpl::search(const void* data, uint64_t size, size_t start) const {
return result_ptr.pos;
}
-int OwnedImpl::write(int fd) {
+Api::SysCallResult OwnedImpl::write(int fd) {
constexpr uint64_t MaxSlices = 16;
RawSlice slices[MaxSlices];
const uint64_t num_slices = std::min(getRawSlices(slices, MaxSlices), MaxSlices);
@@ -165,14 +166,15 @@ int OwnedImpl::write(int fd) {
}
}
if (num_slices_to_write == 0) {
- return 0;
+ return {0, 0};
}
auto& os_syscalls = Api::OsSysCallsSingleton::get();
const ssize_t rc = os_syscalls.writev(fd, iov, num_slices_to_write);
+ const int error = errno;
if (rc > 0) {
drain(static_cast(rc));
}
- return static_cast(rc);
+ return {static_cast(rc), error};
}
OwnedImpl::OwnedImpl() : buffer_(evbuffer_new()) {}
diff --git a/source/common/buffer/buffer_impl.h b/source/common/buffer/buffer_impl.h
index 4d9a2f4aa7b6f..993f4990405d8 100644
--- a/source/common/buffer/buffer_impl.h
+++ b/source/common/buffer/buffer_impl.h
@@ -80,17 +80,12 @@ class OwnedImpl : public LibEventInstance {
void* linearize(uint32_t size) override;
void move(Instance& rhs) override;
void move(Instance& rhs, uint64_t length) override;
- int read(int fd, uint64_t max_length) override;
+ Api::SysCallResult read(int fd, uint64_t max_length) override;
uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override;
ssize_t search(const void* data, uint64_t size, size_t start) const override;
- int write(int fd) override;
+ Api::SysCallResult write(int fd) override;
void postProcess() override {}
-
- /**
- * Construct a flattened string from a buffer.
- * @return the flattened string.
- */
- std::string toString() const;
+ std::string toString() const override;
Event::Libevent::BufferPtr& buffer() override { return buffer_; }
diff --git a/source/common/buffer/watermark_buffer.cc b/source/common/buffer/watermark_buffer.cc
index 9eb32b1815ee7..fe2c1981e54f0 100644
--- a/source/common/buffer/watermark_buffer.cc
+++ b/source/common/buffer/watermark_buffer.cc
@@ -40,10 +40,10 @@ void WatermarkBuffer::move(Instance& rhs, uint64_t length) {
checkHighWatermark();
}
-int WatermarkBuffer::read(int fd, uint64_t max_length) {
- int bytes_read = OwnedImpl::read(fd, max_length);
+Api::SysCallResult WatermarkBuffer::read(int fd, uint64_t max_length) {
+ Api::SysCallResult result = OwnedImpl::read(fd, max_length);
checkHighWatermark();
- return bytes_read;
+ return result;
}
uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) {
@@ -52,10 +52,10 @@ uint64_t WatermarkBuffer::reserve(uint64_t length, RawSlice* iovecs, uint64_t nu
return bytes_reserved;
}
-int WatermarkBuffer::write(int fd) {
- int bytes_written = OwnedImpl::write(fd);
+Api::SysCallResult WatermarkBuffer::write(int fd) {
+ Api::SysCallResult result = OwnedImpl::write(fd);
checkLowWatermark();
- return bytes_written;
+ return result;
}
void WatermarkBuffer::setWatermarks(uint32_t low_watermark, uint32_t high_watermark) {
diff --git a/source/common/buffer/watermark_buffer.h b/source/common/buffer/watermark_buffer.h
index 5be55409ef1e4..fb74ccde04f4c 100644
--- a/source/common/buffer/watermark_buffer.h
+++ b/source/common/buffer/watermark_buffer.h
@@ -28,9 +28,9 @@ class WatermarkBuffer : public OwnedImpl {
void drain(uint64_t size) override;
void move(Instance& rhs) override;
void move(Instance& rhs, uint64_t length) override;
- int read(int fd, uint64_t max_length) override;
+ Api::SysCallResult read(int fd, uint64_t max_length) override;
uint64_t reserve(uint64_t length, RawSlice* iovecs, uint64_t num_iovecs) override;
- int write(int fd) override;
+ Api::SysCallResult write(int fd) override;
void postProcess() override { checkLowWatermark(); }
void setWatermarks(uint32_t watermark) { setWatermarks(watermark / 2, watermark); }
diff --git a/source/common/buffer/zero_copy_input_stream_impl.cc b/source/common/buffer/zero_copy_input_stream_impl.cc
index f6030f69eec72..9159045b5c332 100644
--- a/source/common/buffer/zero_copy_input_stream_impl.cc
+++ b/source/common/buffer/zero_copy_input_stream_impl.cc
@@ -44,7 +44,7 @@ bool ZeroCopyInputStreamImpl::Next(const void** data, int* size) {
return false;
}
-bool ZeroCopyInputStreamImpl::Skip(int) { NOT_IMPLEMENTED; }
+bool ZeroCopyInputStreamImpl::Skip(int) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; }
void ZeroCopyInputStreamImpl::BackUp(int count) {
ASSERT(count >= 0);
diff --git a/source/common/common/BUILD b/source/common/common/BUILD
index fc0e43d110f1b..b921a9d154449 100644
--- a/source/common/common/BUILD
+++ b/source/common/common/BUILD
@@ -16,6 +16,17 @@ envoy_cc_library(
deps = [":minimal_logger_lib"],
)
+envoy_cc_library(
+ name = "backoff_lib",
+ srcs = ["backoff_strategy.cc"],
+ hdrs = ["backoff_strategy.h"],
+ deps = [
+ ":assert_lib",
+ "//include/envoy/common:backoff_strategy_interface",
+ "//include/envoy/runtime:runtime_interface",
+ ],
+)
+
envoy_cc_library(
name = "base64_lib",
srcs = ["base64.cc"],
@@ -113,6 +124,21 @@ envoy_cc_library(
hdrs = ["macros.h"],
)
+envoy_cc_library(
+ name = "matchers_lib",
+ srcs = ["matchers.cc"],
+ hdrs = ["matchers.h"],
+ external_deps = ["abseil_optional"],
+ deps = [
+ ":utility_lib",
+ "//source/common/config:metadata_lib",
+ "//source/common/protobuf",
+ "@envoy_api//envoy/type/matcher:metadata_cc",
+ "@envoy_api//envoy/type/matcher:number_cc",
+ "@envoy_api//envoy/type/matcher:string_cc",
+ ],
+)
+
envoy_cc_library(
name = "non_copyable",
hdrs = ["non_copyable.h"],
@@ -209,6 +235,8 @@ envoy_cc_library(
deps = [
":assert_lib",
":logger_lib",
+ "//include/envoy/stats:stats_interface",
+ "//source/common/stats:stats_lib",
],
)
diff --git a/source/common/common/assert.h b/source/common/common/assert.h
index 64ca60856e20e..9d4f4c5275215 100644
--- a/source/common/common/assert.h
+++ b/source/common/common/assert.h
@@ -3,21 +3,33 @@
#include "common/common/logger.h"
namespace Envoy {
+
/**
* assert macro that uses our builtin logging which gives us thread ID and can log to various
* sinks.
+ *
+ * The old style release assert was of the form RELEASE_ASSERT(foo == bar);
+ * where it would log stack traces and the failed conditional and crash if the
+ * condition is not met. The are many legacy RELEASE_ASSERTS in Envoy which
+ * were converted to RELEASE_ASSERT(foo == bar, "");
+ *
+ * The new style of release assert is of the form
+ * RELEASE_ASSERT(foo == bar, "reason foo should actually be bar");
+ * new uses of RELEASE_ASSERT should supply a verbose explanation of what went wrong.
*/
-#define RELEASE_ASSERT(X) \
+#define RELEASE_ASSERT(X, DETAILS) \
do { \
if (!(X)) { \
+ const std::string& details = (DETAILS); \
ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::assert), critical, \
- "assert failure: {}", #X); \
+ "assert failure: {}.{}{}", #X, \
+ details.empty() ? "" : " Details: ", details); \
abort(); \
} \
} while (false)
#ifndef NDEBUG
-#define ASSERT(X) RELEASE_ASSERT(X)
+#define ASSERT(X) RELEASE_ASSERT(X, "")
#else
// This non-implementation ensures that its argument is a valid expression that can be statically
// casted to a bool, but the expression is never evaluated and will be compiled away.
@@ -36,10 +48,14 @@ namespace Envoy {
"panic: {}", X); \
abort();
-#define NOT_IMPLEMENTED PANIC("not implemented")
+// NOT_IMPLEMENTED_GCOVR_EXCL_LINE is for overridden functions that are expressly not implemented.
+// The macro name includes "GCOVR_EXCL_LINE" to exclude the macro's usage from code coverage
+// reports.
+#define NOT_IMPLEMENTED_GCOVR_EXCL_LINE PANIC("not implemented")
-// NOT_REACHED is for spots the compiler insists on having a return, but where we know that it
-// shouldn't be possible to arrive there, assuming no horrendous bugs. For example, after a
-// switch (some_enum) with all enum values included in the cases.
-#define NOT_REACHED PANIC("not reached")
+// NOT_REACHED_GCOVR_EXCL_LINE is for spots the compiler insists on having a return, but where we
+// know that it shouldn't be possible to arrive there, assuming no horrendous bugs. For example,
+// after a switch (some_enum) with all enum values included in the cases. The macro name includes
+// "GCOVR_EXCL_LINE" to exclude the macro's usage from code coverage reports.
+#define NOT_REACHED_GCOVR_EXCL_LINE PANIC("not reached")
} // Envoy
diff --git a/source/common/common/backoff_strategy.cc b/source/common/common/backoff_strategy.cc
new file mode 100644
index 0000000000000..d8a21be12c801
--- /dev/null
+++ b/source/common/common/backoff_strategy.cc
@@ -0,0 +1,22 @@
+#include "common/common/backoff_strategy.h"
+
+namespace Envoy {
+
+JitteredBackOffStrategy::JitteredBackOffStrategy(uint64_t base_interval, uint64_t max_interval,
+ Runtime::RandomGenerator& random)
+ : base_interval_(base_interval), max_interval_(max_interval), random_(random) {
+ ASSERT(base_interval_ <= max_interval_);
+}
+
+uint64_t JitteredBackOffStrategy::nextBackOffMs() {
+ const uint64_t multiplier = (1 << current_retry_) - 1;
+ const uint64_t base_backoff = multiplier * base_interval_;
+ if (base_backoff <= max_interval_) {
+ current_retry_++;
+ }
+ return std::min(random_.random() % base_backoff, max_interval_);
+}
+
+void JitteredBackOffStrategy::reset() { current_retry_ = 1; }
+
+} // namespace Envoy
\ No newline at end of file
diff --git a/source/common/common/backoff_strategy.h b/source/common/common/backoff_strategy.h
new file mode 100644
index 0000000000000..787320ecb5a98
--- /dev/null
+++ b/source/common/common/backoff_strategy.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#include
+#include