diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index b55e5c70fc554..fb3f7ecde8f1b 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -142,6 +142,7 @@ def _python_deps(): actual = "@com_github_pallets_jinja//:jinja2", ) + # Bazel native C++ dependencies. For the depedencies that doesn't provide autoconf/automake builds. def _cc_deps(): _repository_impl("grpc_httpjson_transcoding") diff --git a/configs/BUILD b/configs/BUILD index 57584665ad1cf..113c199157fba 100644 --- a/configs/BUILD +++ b/configs/BUILD @@ -13,8 +13,8 @@ envoy_py_test_binary( name = "configgen", srcs = ["configgen.py"], data = glob([ - "*.json", "*.yaml", + "*.json", ]), external_deps = ["jinja2"], ) diff --git a/configs/access_log_format_helper.template.json b/configs/access_log_format_helper.template.json deleted file mode 100644 index 710f8aa90912c..0000000000000 --- a/configs/access_log_format_helper.template.json +++ /dev/null @@ -1,15 +0,0 @@ -{% macro ingress_sampled_log() %} - "format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro ingress_full() %} - "format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" -{% endmacro %} - -{% macro egress_error_log() %} - "format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n" -{% endmacro %} - -{% macro egress_error_amazon_service() %} - "format": "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n" -{% endmacro %} diff --git a/configs/access_log_format_helper_v2.template.yaml b/configs/access_log_format_helper_v2.template.yaml new file mode 100644 index 0000000000000..7a5d711c088b4 --- /dev/null +++ b/configs/access_log_format_helper_v2.template.yaml @@ -0,0 +1,15 @@ +{% macro ingress_sampled_log() -%} + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" +{% endmacro %} + +{% macro ingress_full() -%} + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" +{% endmacro %} + +{% macro egress_error_log() -%} + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n" +{% endmacro %} + +{% macro egress_error_amazon_service() -%} + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n" +{% endmacro %} diff --git a/configs/configgen.py b/configs/configgen.py index 0d050820768d7..474ff1b10980e 100755 --- a/configs/configgen.py +++ b/configs/configgen.py @@ -49,11 +49,15 @@ external_virtual_hosts = [ { 'name': 'dynamodb_iad', - 'address': "tcp://127.0.0.1:9204", + 'address': "127.0.0.1", + 'protocol':"TCP", + 'port_value':"9204", 'hosts': [ { 'name': 'dynamodb_iad', 'domain': '*', - 'remote_address': 'dynamodb.us-east-1.amazonaws.com:443', + 'remote_address': 'dynamodb.us-east-1.amazonaws.com', + 'protocol': 'TCP', + 'port_value': '443', 'verify_subject_alt_name': [ 'dynamodb.us-east-1.amazonaws.com' ], 'ssl': True } @@ -69,12 +73,14 @@ # as it demonstrates how to setup TCP proxy and the network rate limit filter. mongos_servers = { 'somedb': { - 'address': "tcp://127.0.0.1:27019", + 'address': "127.0.0.1", + 'protocol': "TCP", + 'port_value': 27019, 'hosts': [ - "router1.yourcompany.net:27817", - "router2.yourcompany.net:27817", - "router3.yourcompany.net:27817", - "router4.yourcompany.net:27817", + {'port_value' : 27817, 'address':'router1.yourcompany.net' , 'protocol': 'TCP'}, + {'port_value' : 27817, 'address':'router2.yourcompany.net' , 'protocol': 'TCP'}, + {'port_value' : 27817, 'address':'router3.yourcompany.net' , 'protocol': 'TCP'}, + {'port_value' : 27817, 'address':'router4.yourcompany.net' , 'protocol': 'TCP'}, ], 'ratelimit': True } @@ -85,20 +91,18 @@ def generate_config(template_path, template, output_file, **context): env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path, followlinks=True), undefined=jinja2.StrictUndefined) raw_output = env.get_template(template).render(**context) - # Verify valid JSON and then dump it nicely formatted to avoid jinja pain. - output = json.loads(raw_output, object_pairs_hook=OrderedDict) with open(output_file, 'w') as fh: - json.dump(output, fh, indent=2) + fh.write(raw_output) # Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners, # as well as a listener for the double proxy to connect to via SSL client authentication. -generate_config(SCRIPT_DIR, 'envoy_front_proxy.template.json', - '{}/envoy_front_proxy.json'.format(OUT_DIR), clusters=front_envoy_clusters) +generate_config(SCRIPT_DIR, 'envoy_front_proxy_v2.template.yaml', + '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR), clusters=front_envoy_clusters) # Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners, # and backhauls the traffic to the main front proxy. -generate_config(SCRIPT_DIR, 'envoy_double_proxy.template.json', - '{}/envoy_double_proxy.json'.format(OUT_DIR)) +generate_config(SCRIPT_DIR, 'envoy_double_proxy_v2.template.yaml', + '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR)) # Generate a demo config for the service to service (local) proxy. This sets up several different # listeners: @@ -108,11 +112,11 @@ def generate_config(template_path, template, output_file, **context): # optional external service ports: built from external_virtual_hosts above. Each external host # that Envoy proxies to listens on its own port. # optional mongo ports: built from mongos_servers above. -generate_config(SCRIPT_DIR, 'envoy_service_to_service.template.json', - '{}/envoy_service_to_service.json'.format(OUT_DIR), +generate_config(SCRIPT_DIR, 'envoy_service_to_service_v2.template.yaml', + '{}/envoy_service_to_service.yaml'.format(OUT_DIR), internal_virtual_hosts=service_to_service_envoy_clusters, external_virtual_hosts=external_virtual_hosts, mongos_servers=mongos_servers) -for google_ext in ['json', 'yaml', 'v2.yaml']: +for google_ext in ['v2.yaml']: shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) diff --git a/configs/envoy_double_proxy.template.json b/configs/envoy_double_proxy.template.json deleted file mode 100644 index f5eb5499eda0c..0000000000000 --- a/configs/envoy_double_proxy.template.json +++ /dev/null @@ -1,152 +0,0 @@ -{% macro listener(address,ssl,proxy_proto) %} - { - "address": "{{ address }}", - {% if ssl -%} - "ssl_context": { - "alpn_protocols": "h2,http/1.1", - "alt_alpn_protocols": "http/1.1", - "cert_chain_file": "certs/servercert.pem", - "private_key_file": "certs/serverkey.pem" - }, - {% endif -%} - {% if proxy_proto -%} - "use_proxy_proto": true, - {% endif -%} - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "tracing": { - "operation_name": "ingress" - }, - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/access_error.log", - "filter": {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 500}, - {"type": "duration", "op": ">=", "value": 1000}, - {"type": "traceable_request"} - ] - } - }, - { - "path": "/var/log/envoy/access.log" - }], - "stat_prefix": "router", - {% if proxy_proto -%} - "use_remote_address": true, - {% endif -%} - "route_config": - { - "virtual_hosts": [ - { - "name": "all", - "domains": ["*"], - "routes": [ - { - "prefix": "/", - "cluster": "backhaul", - {# Generally allow front proxy to control timeout and use this as a backstop #} - "timeout_ms": 20000 - } - ] - } - ] - }, - "filters": [ - { "name": "health_check", - "config": { - "pass_through_mode": false, "endpoint": "/healthcheck" - } - }, - { "name": "buffer", - "config": { - "max_request_bytes": 5242880, - "max_request_time_s": 120 - } - }, - { "name": "router", "config": {} } - ] - } - }] - } -{% endmacro %} - -{ - "listeners": [ - {# TCP listener for external port 443 (SSL). Assumes a TCP LB in front such as ELB which - supports proxy proto. #} - {{ listener("tcp://0.0.0.0:9300",True,True) }}, - - {# TCP listener for external port 80 (non-SSL). Assumes a TCP LB in front such as ELB which - supports proxy proto. #} - {{ listener("tcp://0.0.0.0:9301",False,True) }} - ], - - "admin": { "access_log_path": "/var/log/envoy/admin_access.log", - "address": "tcp://127.0.0.1:9901" }, - "flags_path": "/etc/envoy/flags", - "statsd_tcp_cluster_name": "statsd", - - "tracing": { - "http": { - "driver": { - "type": "lightstep", - "config": { - "access_token_file": "/etc/envoy/lightstep_access_token", - "collector_cluster": "lightstep_saas" - } - } - } - }, - - "runtime": { - "symlink_root": "/srv/runtime_data/current", - "subdirectory": "envoy", - "override_subdirectory": "envoy_override" - }, - - "cluster_manager": { - "clusters": [ - { - "name": "statsd", - "connect_timeout_ms": 250, - "type": "static", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://127.0.0.1:8125"}] - }, - { - "name": "backhaul", - "connect_timeout_ms": 1000, - "type": "strict_dns", - "lb_type": "round_robin", - "features": "http2", - "max_requests_per_connection": 25000, {# There are so few connections going back - that we can get some imbalance. Until we can come - up with a better solution just limit the requests - so we can cycle and get better spread. #} - "ssl_context": { - "cert_chain_file": "certs/clientcert.pem", - "private_key_file": "certs/clientkey.pem", - "ca_cert_file": "certs/cacert.pem", - "verify_subject_alt_name": ["front-proxy.yourcompany.net"] - }, - "hosts": [{"url": "tcp://front-proxy.yourcompany.net:9400"}] - }, - { - "name": "lightstep_saas", - "features": "http2", - "ssl_context": { - "ca_cert_file": "certs/cacert.pem", - "verify_subject_alt_name": ["collector-grpc.lightstep.com"] - }, - "connect_timeout_ms": 1000, - "type": "logical_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://collector-grpc.lightstep.com:443"}] - } - ] - } -} diff --git a/configs/envoy_double_proxy_v2.template.yaml b/configs/envoy_double_proxy_v2.template.yaml new file mode 100644 index 0000000000000..a3205747661dd --- /dev/null +++ b/configs/envoy_double_proxy_v2.template.yaml @@ -0,0 +1,167 @@ +{%- macro listener(protocol, address, port_value, tls, proxy_proto) -%} +- name: listener_created_from_configgen + address: + socket_address: + protocol: {{protocol}} + address: {{address}} + port_value: {{port_value}} + filter_chains: + - filter_chain_match: {} + {% if tls %} + tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: certs/servercert.pem + private_key: + filename: certs/serverkey.pem + validation_context: {} + alpn_protocols: + - h2 + - http/1.1 + deprecated_v1: + alt_alpn_protocols: http/1.1 + {% endif %} + {% if proxy_proto %} + use_proxy_proto: true + {%endif -%} + filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + stat_prefix: router + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + routes: + - match: + prefix: "/" + route: + cluster: backhaul + #Generally allow front proxy to control timeout and use this as a backstop + timeout: 20s + http_filters: + - name: envoy.health_check + config: + pass_through_mode: false + endpoint: /healthcheck + name: envoy.buffer + config: + max_request_bytes: 5242880 + max_request_time: 120s + name: envoy.router + config: {} + tracing: + operation_name: INGRESS + idle_timeout: 840s + access_log: + - name: envoy.file_access_log + filter: + or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 500 + runtime_key: access_log.access_error.status + - duration_filter: + comparison: + op: GE + value: + default_value: 1000 + runtime_key: access_log.access_error.duration + - traceable_filter: {} + config: + path: /var/log/envoy/access_error.log + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" + {% if proxy_proto %} + use_remote_address: true + {%endif -%} +{% endmacro -%} +static_resources: + listeners: + # TCP listener for external port 443 (TLS). Assumes a TCP LB in front such as ELB which + # supports proxy proto + {{ listener("TCP", "0.0.0.0",9300,True, True)|indent(2) }} + # TCP listener for external port 80 (non-TLS). Assumes a TCP LB in front such as ELB which + # supports proxy proto. + {{ listener("TCP", "0.0.0.0",9301,False, True)|indent(2) }} + clusters: + - name: statsd + type: STATIC + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8125 + - name: backhaul + type: STRICT_DNS + connect_timeout: 1s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: front-proxy.yourcompany.net + port_value: 9400 + # There are so few connections going back + # that we can get some imbalance. Until we come up + # with a better solution just limit the requests + # so we can cycle and get better spread. + max_requests_per_connection: 25000 + tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + filename: certs/clientcert.pem + private_key: + filename: certs/clientkey.pem + validation_context: + trusted_ca: + filename: certs/cacert.pem + verify_subject_alt_name: + - front-proxy.yourcompany.net + http2_protocol_options: {} + - name: lightstep_saas + type: LOGICAL_DNS + connect_timeout: 1s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: collector-grpc.lightstep.com + port_value: 443 + http2_protocol_options: {} + tls_context: + common_tls_context: + validation_context: + trusted_ca: + filename: certs/cacert.pem + verify_subject_alt_name: + - collector-grpc.lightstep.com +flags_path: "/etc/envoy/flags" +stats_sinks: +- name: envoy.statsd + config: + tcp_cluster_name: statsd +tracing: + http: + name: envoy.lightstep + config: + access_token_file: "/etc/envoy/lightstep_access_token" + collector_cluster: lightstep_saas +runtime: + symlink_root: "/srv/runtime_data/current" + subdirectory: envoy + override_subdirectory: envoy_override +admin: + access_log_path: "var/log/envoy/admin_access.log" + address: + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9901 \ No newline at end of file diff --git a/configs/envoy_front_proxy.template.json b/configs/envoy_front_proxy.template.json deleted file mode 100644 index 81790f1d236c4..0000000000000 --- a/configs/envoy_front_proxy.template.json +++ /dev/null @@ -1,162 +0,0 @@ -{% import 'routing_helper.template.json' as helper -%} - -{% macro listener(address) %} - { - "address": "{{ address }}", - {% if kwargs['ssl'] -%} - "ssl_context": { - "alpn_protocols": "h2,http/1.1", - "alt_alpn_protocols": "http/1.1", - {% if kwargs.get('pin_double_proxy_client', False) -%} - "ca_cert_file": "certs/cacert.pem", - {# This should be the hash of the /etc/envoy/envoy-double-proxy.pem cert used in the - double proxy configuration. #} - "verify_certificate_hash": "0000000000000000000000000000000000000000000000000000000000000000", - {% endif -%} - "cert_chain_file": "certs/servercert.pem", - "private_key_file": "certs/serverkey.pem" - }, - {% endif -%} - {% if kwargs['proxy_proto'] -%} - "use_proxy_proto": true, - {% endif -%} - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "add_user_agent": true, - "tracing": { - "operation_name": "ingress" - }, - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/access_error.log", - "filter": {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 500}, - {"type": "duration", "op": ">=", "value": 1000}, - {"type": "traceable_request"} - ] - } - }, - { - "path": "/var/log/envoy/access.log" - }], - "stat_prefix": "router", - {% if kwargs['proxy_proto'] -%} - "use_remote_address": true, - {% endif -%} - "route_config": {% include kwargs['router_file'] %}, - "filters": [ - { "name": "health_check", - "config": { - "pass_through_mode": false, "endpoint": "/healthcheck" - } - }, - { "name": "buffer", - "config": { - "max_request_bytes": 5242880, - "max_request_time_s": 120 - } - }, - { "name": "rate_limit", - "config" : { - "domain": "envoy_front", - "request_type": "external" - } - }, - { "name": "router", "config": {} } - ] - } - }] - } -{% endmacro %} - -{ - "listeners": [ - {# TCP listeners for public HTTP/HTTPS endpoints. Assumes a TCP LB in front such as ELB which - supports proxy proto. #} - {{ listener("tcp://0.0.0.0:9300", ssl=True, proxy_proto=True, router_file='envoy_router.template.json') }}, - {{ listener("tcp://0.0.0.0:9301", ssl=False, proxy_proto=True, router_file='envoy_router.template.json') }}, - - {# TCP listener for backhaul traffic from the double proxy. - See envoy_double_proxy.template.json #} - {{ listener("tcp://0.0.0.0:9400", ssl=True, proxy_proto=False, pin_double_proxy_client=True, - router_file='envoy_router.template.json') }} - ], - - "admin": { "access_log_path": "/var/log/envoy/admin_access.log", - "address": "tcp://0.0.0.0:9901" }, - "flags_path": "/etc/envoy/flags", - "statsd_tcp_cluster_name": "statsd", - - "tracing": { - "http": { - "driver": { - "type": "lightstep", - "config": { - "access_token_file": "/etc/envoy/lightstep_access_token", - "collector_cluster": "lightstep_saas" - } - } - } - }, - - "runtime": { - "symlink_root": "/srv/runtime_data/current", - "subdirectory": "envoy", - "override_subdirectory": "envoy_override" - }, - - "rate_limit_service": { - "type": "grpc_service", - "config": { - "cluster_name": "ratelimit" - } - }, - - "cluster_manager": { - "outlier_detection": { - "event_log_path": "/var/log/envoy/outlier_events.log" - }, - - "sds": { - "cluster": { - "name": "sds", - "connect_timeout_ms": 250, - "type": "strict_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://discovery.yourcompany.net:80"}] - }, - "refresh_delay_ms": 30000 - }, - - "clusters": [ - { - "name": "statsd", - "connect_timeout_ms": 250, - "type": "static", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://127.0.0.1:8125"}] - }, - { - "name": "lightstep_saas", - "features": "http2", - "ssl_context": { - "ca_cert_file": "certs/cacert.pem", - "verify_subject_alt_name": ["collector-grpc.lightstep.com"] - }, - "connect_timeout_ms": 1000, - "type": "logical_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://collector-grpc.lightstep.com:443"}] - }, - {% for service, options in clusters.iteritems() -%} - { - {{ helper.internal_cluster_definition(service, options) }} - }{% if not loop.last %},{% endif %} - {% endfor -%} - ] - } -} diff --git a/configs/envoy_front_proxy_v2.template.yaml b/configs/envoy_front_proxy_v2.template.yaml new file mode 100644 index 0000000000000..c24e4f883662b --- /dev/null +++ b/configs/envoy_front_proxy_v2.template.yaml @@ -0,0 +1,156 @@ +{% import 'routing_helper_v2.template.yaml' as helper -%} +{% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%} +{% macro listener(protocol, address, port_value, proxy_proto, tls) -%} + name: not_required_for_static_listeners + address: + socket_address: + protocol: {{protocol}} + address: {{address}} + port_value: {{port_value}} + filter_chains: + {% if tls %} + - tls_context: + common_tls_context: + alpn_protocols: h2,http/1.1 + tls_certificates: + - certificate_chain: + filename: certs/servercert.pem + private_key: + filename: certs/serverkey.pem + {% if kwargs.get('pin_double_proxy_client', False) %} + validation_context: + trusted_ca: + filename: certs/cacert.pm + #This should be the hash of the /etc/envoy/envoy-double-proxy.pem cert used in the + #double proxy configuration. + verify_certificate_hash: "0000000000000000000000000000000000000000000000000000000000000000" + {% endif %} + {%if proxy_proto%} + use_proxy_proto: true + {%endif%} + {%endif %} + filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + stat_prefix: router + {% if proxy_proto -%} + use_remote_address: true + {%endif-%} + stat_prefix: ingress_http + route_config: + {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} + http_filters: + - name: envoy.health_check + config: + pass_through_mode: false + endpoint: "/healthcheck" + - name: envoy.buffer + config: + max_request_bytes: 5242880 + max_request_time: 120s + - name: envoy.rate_limit + config: + domain: envoy_front + request_type: external + - name: envoy.router + config: {} + add_user_agent: true + tracing: + operation_name: INGRESS + idle_timeout: 840s + access_log: + - name: envoy.file_access_log + filter: + or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 500 + runtime_key: access_log.access_error.status + - duration_filter: + comparison: + op: GE + value: + default_value: 1000 + runtime_key: access_log.access_error.duration + - traceable_filter: {} + config: + path: "/var/log/envoy/access_error.log" + format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" +{% endmacro -%} +static_resources: + listeners: + # TCP listeners for public HTTP/HTTPS endpoints. Assumes a TCP LB in front such as ELB which + # supports proxy proto. + - {{ listener("TCP", "0.0.0.0", "9300", True, True)|indent(2) }} + - {{ listener("TCP", "0.0.0.0", "9301", True, True)|indent(2) }} + # TCP listener for backhaul traffic from the double proxy. + # See envoy_double_proxy.template.json + - {{ listener("TCP", "0.0.0.0", "9400", True, True, pin_double_proxy_client=True)|indent(2) }} + clusters: + - name: sds + type: STRICT_DNS + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: disccovery.yourcompany.net + port_value: 80 + - name: statsd + type: STATIC + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8125 + - name: lightstep_saas + type: LOGICAL_DNS + connect_timeout: 1s + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: collector-grpc.lightstep.com + port_value: 443 + http2_protocol_options: {} + {% for service, options in clusters.iteritems() -%} + - {{ helper.internal_cluster_definition(service, options)|indent(2) }} + {% endfor %} +dynamic_resources: + deprecated_v1: + sds_config: + api_config_source: + cluster_names: + - sds + refresh_delay: 30s +cluster_manager: + outlier_detection: + event_log_path: /var/log/envoy/outlier_events.log +flags_path: /etc/envoy/flags +rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit +tracing: + http: + name: envoy.lightstep + config: + collector_cluster: lightstep_saas + access_token_file: "/etc/envoy/lightstep_access_token" +runtime: + symlink_root: /srv/runtime_data/current + subdirectory: envoy + override_subdirectory: envoy_override +admin: + access_log_path: /var/log/envoy/admin_access.log + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 9901 \ No newline at end of file diff --git a/configs/envoy_router.template.json b/configs/envoy_router.template.json deleted file mode 100644 index 5e4bbf3a58497..0000000000000 --- a/configs/envoy_router.template.json +++ /dev/null @@ -1,60 +0,0 @@ -{% import 'routing_helper.template.json' as helper with context -%} - -{ - "virtual_hosts": [ - { - "name": "www", - "require_ssl": "all", - "domains": ["www.yourcompany.net"], - "rate_limits": [ - { "actions": [ { "type": "remote_address" } ] } - ], - "routes": [ - { - "prefix": "/foo/bar", - "runtime": { - "key": "routing.www.use_service_2", - "default": 0 - }, - {{ helper.make_route('service2') }} - }, - { - "prefix": "/", - {{ helper.make_route('service1') }} - } - ] - }, - { - "name": "www_redirect", - "require_ssl": "all", - "domains": ["wwww.yourcompany.net"], - "rate_limits": [ - { "actions": [ { "type": "remote_address" } ] } - ], - "routes": [ - { - "prefix": "/", - "host_redirect": "www.yourcompany.net" - } - ] - }, - { - "name": "api", - "require_ssl": "external_only", - "domains": ["api.yourcompany.net"], - "rate_limits": [ - { "actions": [ { "type": "remote_address" } ] } - ], - "routes": [ - { - "path": "/foo/bar", - {{ helper.make_route('service3') }} - }, - { - "prefix": "/", - {{ helper.make_route('service1') }} - } - ] - } - ] -} diff --git a/configs/envoy_router_v2.template.yaml b/configs/envoy_router_v2.template.yaml new file mode 100644 index 0000000000000..e097b80395426 --- /dev/null +++ b/configs/envoy_router_v2.template.yaml @@ -0,0 +1,50 @@ +{% import 'routing_helper_v2.template.yaml' as helper with context -%} +name: local_route +virtual_hosts: +- name: www + domains: + - www.yourcompany.com + routes: + - match: + prefix: "/foo/bar" + runtime: + default_value: 0 + runtime_key: routing.www.use_service_2 + route: + {{ helper.make_route('service2')|indent(4) }} + - match: + prefix: "/" + route: + {{ helper.make_route('service1')|indent(4) }} + require_tls: ALL + rate_limits: + actions: + remote_address: {} +- name: www_redirect + domains: + - wwww.yourcompany.net + routes: + - match: + prefix: "/" + redirect: + host_redirect: www.yourcompany.net + require_tls: ALL + rate_limits: + - actions: + remote_address: {} +- name: api + domains: + - api.yourcompany.net + routes: + - match: + path: "/foo/bar" + route: + {{ helper.make_route('service3')|indent(4) }} + - match: + prefix: "/" + route: + {{ helper.make_route('service1')|indent(4) }} + require_tls: EXTERNAL_ONLY + rate_limits: + - actions: + remote_address: {} \ No newline at end of file diff --git a/configs/envoy_service_to_service.template.json b/configs/envoy_service_to_service.template.json deleted file mode 100644 index ebfee83c9bdcd..0000000000000 --- a/configs/envoy_service_to_service.template.json +++ /dev/null @@ -1,459 +0,0 @@ -{% import 'routing_helper.template.json' as helper -%} -{% import 'access_log_format_helper.template.json' as access_log_helper -%} - -{% macro ingress_listener(address) %} -{ - "address": "{{ address }}", - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "tracing": { - "operation_name": "ingress" - }, - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/ingress_http.log", - "filter": {"type": "not_healthcheck"}, - {{ access_log_helper.ingress_full() }} - }, - { - "path": "/var/log/envoy/ingress_http_error.log", - "filter": {"type": "logical_and", "filters": [ - {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 400}, - {"type": "status_code", "op": "=", "value": 0}, - {"type": "duration", "op": ">=", "value": 2000}, - {"type": "traceable_request"} - ] - }, - {"type": "not_healthcheck"} - ] - }, - {{ access_log_helper.ingress_sampled_log() }} - }, - { - "path": "/var/log/envoy/ingress_http_sampled.log", - "filter": {"type": "logical_and", "filters": [ - {"type": "not_healthcheck"}, - {"type": "runtime", "key": "access_log.ingress_http"} - ] - }, - {{ access_log_helper.ingress_sampled_log() }} - }], - "stat_prefix": "ingress_http", - "route_config": - { - "virtual_hosts": [ - { - "name": "local_service", - "domains": ["*"], - "routes": [ - { - "timeout_ms": 0, - "prefix": "/", - "headers": [ - {"name": "content-type", "value": "application/grpc"} - ], - "cluster": "local_service_grpc" - }, - { - "timeout_ms": 0, - "prefix": "/", - "cluster": "local_service" - }] - } - ] - }, - "filters": [ - { "name": "health_check", - "config": { - "pass_through_mode": true, "cache_time_ms": 2500, "endpoint": "/healthcheck" - } - }, - { "name": "buffer", - "config": { - "max_request_bytes": 5242880, - "max_request_time_s": 120 - } - }, - { "name": "router", "config": {} } - ] - } - }] -} -{% endmacro %} - -{ - "listeners": [ - {{ ingress_listener("tcp://0.0.0.0:9211") }}, - { - "address": "tcp://127.0.0.1:9001", - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "tracing": { - "operation_name": "egress" - }, - "add_user_agent": true, - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/egress_http_error.log", - "filter": {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 400}, - {"type": "duration", "op": ">=", "value": 2000} - ] - }, - {{ access_log_helper.egress_error_log() }} - }], - "stat_prefix": "egress_http", - "use_remote_address": true, - "route_config": - { - "virtual_hosts": [ - {% for service, options in internal_virtual_hosts.iteritems() -%} - { - "name": "{{ service }}", - {# NOTE: The following domain is synthetic and is used so that envoy deals with - devbox vs. prod, etc. #} - "domains": ["{{ service }}"], - "routes": [ - { - "prefix": "/", - {{ helper.make_route_internal(service, options) }} - {% if options.get('service_to_service_rate_limit', False) %} - , - "rate_limits":[ - { - "actions": [ - { - "type": "destination_cluster" - }, - { - "type" : "source_cluster" - } - ] - } - ] - {% endif %} - } - ] - }{% if not loop.last %},{% endif -%} - {% endfor -%} - ] - }, - "filters": [ - {"name": "rate_limit", - "config": { - "domain": "envoy_service_to_service" - } - }, - {"name": "grpc_http1_bridge", "config": {}}, - {"name": "router", "config": {}} - ] - } - }] - }, - {# The following listener is configured to use route discovery service. #} - { - "address": "tcp://127.0.0.1:9002", - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "tracing": { - "operation_name": "egress" - }, - "add_user_agent": true, - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/egress_http_error.log", - "filter": {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 400}, - {"type": "duration", "op": ">=", "value": 2000} - ] - }, - {{ access_log_helper.egress_error_log() }} - }], - "stat_prefix": "egress_http", - "use_remote_address": true, - "rds" : { - "cluster" : "rds", - "route_config_name": "9002_http_conn_man" - }, - "filters": [ - {"name": "rate_limit", - "config": { - "domain": "envoy_service_to_service" - } - }, - {"name": "grpc_http1_bridge", "config": {}}, - {"name": "router", "config": {}} - ] - } - }] - }{% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %},{% endif -%} - - {% for mapping in external_virtual_hosts -%} - { - "address": "{{ mapping['address'] }}", - "filters": [ - { - "name": "http_connection_manager", - "config": { - "codec_type": "auto", - "idle_timeout_s": 840, - "access_log": [ - { - "path": "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log", - "filter": {"type": "logical_or", "filters": [ - {"type": "status_code", "op": ">=", "value": 400}, - {"type": "status_code", "op": "=", "value": 0} - {% if mapping.get('log_high_latency_requests', True) %} - ,{"type": "duration", "op": ">=", "value": 2000} - {% endif %} - ] - } - {% if mapping.get('is_amzn_service', False) -%} - ,{{ access_log_helper.egress_error_amazon_service() }} - {% else -%} - ,{{ access_log_helper.egress_error_log() }} - {% endif %} - }], - "stat_prefix": "egress_{{ mapping['name'] }}", - "route_config": - { - "virtual_hosts": [ - {% for host in mapping['hosts'] -%} - { - "name": "egress_{{ host['name'] }}", - "domains": ["{{ host['domain'] }}"], - "routes": [ - { - "prefix": "/", - "cluster": "egress_{{ host['name'] }}", - "retry_policy": { "retry_on": "connect-failure" } - {% if host.get('host_rewrite', False) -%} - ,"host_rewrite": "{{host['host_rewrite']}}" - {% endif -%} - }] - }{% if not loop.last %},{% endif -%} - {% endfor -%} - ] - }, - "filters": [ - {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] %} - { "name": "http_dynamo_filter", "config": {}}, - {% endif %} - { "name": "router", "config": {} } - ] - } - }] - }{% if (mongos_servers|length > 0) or (mongos_servers|length == 0 and not loop.last ) %},{% endif -%} - {% endfor -%} - - {% for key, value in mongos_servers.iteritems() -%} - { - "address": "{{ value['address'] }}", - "filters": [ - {% if value.get('ratelimit', False) %} - { - "name": "ratelimit", - "config": { - "stat_prefix": "{{ key }}", - "domain": "envoy_mongo_cps", - "descriptors": [[{"key": "database", "value": "{{ key }}"}]] - } - }, - {% endif %} - { - "name": "mongo_proxy", - "config": { - "stat_prefix": "{{ key }}", - "access_log": "/var/log/envoy/mongo_{{ key }}.log" - } - }, - { - "name": "tcp_proxy", - "config": { - "stat_prefix": "mongo_{{ key }}", - "route_config": { - "routes": [ - { - "cluster": "mongo_{{ key }}" - } - ] - } - } - }] - }{% if not loop.last %},{% endif -%} - {% endfor -%} - ], - - "admin": { "access_log_path": "/var/log/envoy/admin_access.log", - "address": "tcp://0.0.0.0:9901" }, - "flags_path": "/etc/envoy/flags", - "statsd_tcp_cluster_name": "statsd", - - "tracing": { - "http": { - "driver": { - "type": "lightstep", - "config": { - "access_token_file": "/etc/envoy/lightstep_access_token", - "collector_cluster": "lightstep_saas" - } - } - } - }, - - "rate_limit_service": { - "type": "grpc_service", - "config": { - "cluster_name": "ratelimit" - } - }, - - "runtime": { - "symlink_root": "/srv/runtime_data/current", - "subdirectory": "envoy", - "override_subdirectory": "envoy_override" - }, - - "cluster_manager": { - "sds": { - "cluster": { - "name": "sds", - "connect_timeout_ms": 250, - "type": "strict_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://discovery.yourcompany.net:80"}] - }, - "refresh_delay_ms": 30000 - }, - - "cds": { - "cluster": { - "name": "cds", - "connect_timeout_ms": 250, - "type": "strict_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://cds.yourcompany.net:80"}] - } - }, - - "clusters": [ - {% for service, options in internal_virtual_hosts.iteritems() -%} - { - {{ helper.internal_cluster_definition(service, options) }} - }, - {% endfor -%} - - {% for mapping in external_virtual_hosts -%} - {% for host in mapping['hosts'] -%} - { - "name": "egress_{{ host['name'] }}", - {% if host.get('ssl', False) -%} - "ssl_context": { - "ca_cert_file": "certs/cacert.pem" - {% if host.get('sni', False) -%} - ,"sni": "{{ host['sni'] }}" - {% endif -%} - {% if host.get('verify_subject_alt_name', False) -%} - ,"verify_subject_alt_name": ["{{ host['verify_subject_alt_name'] }}"] - {% endif -%} - }, - "connect_timeout_ms": 1000, - {% else -%} - "connect_timeout_ms": 250, - {% endif -%} - "type": "{{ mapping.get("cluster_type", "strict_dns") }}", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://{{ host['remote_address'] }}"}] - }, - {% endfor -%} - {% endfor -%} - - {% for key, value in mongos_servers.iteritems() -%} - { - "name": "mongo_{{ key }}", - "connect_timeout_ms": 250, - "type": "strict_dns", - "lb_type": "random", {# We use random LB policy here because we don't HC mongo routers. If - a router drops, we want to converge on an even distribution. - Without HC, least connection would perform terribly as we would - continue to hit the bad router. #} - "hosts": [ - {% for server in value['hosts'] -%} - {% set host = server.split(':')[0] -%} - {% set port = server.split(':')[1] -%} - {"url": "tcp://{{ host }}:{{ port }}"}{% if not loop.last %},{% endif %} - {% endfor -%} - ] - }, - {% endfor -%} - { - "name": "local_service", - "connect_timeout_ms": 250, - "type": "static", - "lb_type": "round_robin", - "circuit_breakers": { - "default": { - "max_pending_requests": 30, {# Apply back pressure quickly at the local host level. NOTE: - This only is applicable with the HTTP/1.1 connection - pool. #} - "max_connections": 100 - } - }, - "hosts": [{"url": "tcp://127.0.0.1:8080"}] - - }, - { - "name": "local_service_grpc", - "connect_timeout_ms": 250, - "type": "static", - "lb_type": "round_robin", - "features": "http2", - "circuit_breakers": { - "default": { - "max_requests": 200 - } - }, - "hosts": [{"url": "tcp://127.0.0.1:8081"}] - }, - { - "name": "rds", - "connect_timeout_ms": 250, - "type": "strict_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://rds.yourcompany.net:80"}] - }, - { - "name": "statsd", - "connect_timeout_ms": 250, - "type": "static", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://127.0.0.1:8125"}] - }, - { - "name": "lightstep_saas", - "features": "http2", - "ssl_context": { - "ca_cert_file": "certs/cacert.pem", - "verify_subject_alt_name": ["collector-grpc.lightstep.com"] - }, - "connect_timeout_ms": 1000, - "type": "logical_dns", - "lb_type": "round_robin", - "hosts": [{"url": "tcp://collector-grpc.lightstep.com:443"}] - } - ] - } -} diff --git a/configs/envoy_service_to_service_v2.template.yaml b/configs/envoy_service_to_service_v2.template.yaml new file mode 100644 index 0000000000000..e91f08d157a91 --- /dev/null +++ b/configs/envoy_service_to_service_v2.template.yaml @@ -0,0 +1,482 @@ +{% import 'routing_helper_v2.template.yaml' as helper -%} +{% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%} +{% macro ingress_listener(protocol, address, port_value) -%} +- address: + socket_address: + protocol: {{protocol}} + address: {{address}} + port_value: {{port_value}} + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: + - "*" + routes: + - match: + prefix: "/" + headers: + - name: content-type + value: application/grpc + route: + cluster: local_service_grpc + - match: + prefix: "/" + route: + cluster: local_service + http_filters: + - name: envoy.health_check + config: + pass_through_mode: true + endpoint: "/healthcheck" + cache_time: 2.5s + - name: envoy.buffer + config: + max_request_bytes: 5242880 + max_request_time: 120s + - name: envoy.router + config: {} + access_log: + - name: envoy.file_access_log + filter: + not_health_check_filter: {} + config: + path: "/var/log/envoy/ingress_http.log" + {{ access_log_helper.ingress_full()|indent(10)}} + - name: envoy.file_access_log + filter: + and_filter: + filters: + - or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: access_log.access_error.status + - status_code_filter: + comparison: + op: EQ + value: + default_value: 0 + runtime_key: access_log.access_error.status + - duration_filter: + comparison: + op: GE + value: + default_value: 2000 + runtime_key: access_log.access_error.duration + - not_health_check_filter: {} + config: + path: "/var/log/envoy/ingress_http_error.log" + {{ access_log_helper.ingress_sampled_log()|indent(10)}} + - name: envoy.file_access_log + filter: + and_filter: + filters: + - not_health_check_filter: {} + - runtime_filter: + runtime_key: access_log.ingress_http + config: + path: "/var/log/envoy/ingress_http_sampled.log" + {{ access_log_helper.ingress_sampled_log()|indent(10)}} + idle_timeout: 840s + tracing: + operation_name: INGRESS +{% endmacro -%} +static_resources: + listeners: + {{ ingress_listener("tcp", "0.0.0.0", 9211) | indent(2)}} + - address: + socket_address: + protocol: TCP + port_value: 9001 + address: 127.0.0.1 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + stat_prefix: egress_http + route_config: + name: local_route + virtual_hosts: + {% for service, options in internal_virtual_hosts.iteritems() %} + - name: {{ service }} + domains: + - {{ service }} + routes: + - match: + prefix: "/" + route: + {{ helper.make_route_internal(service, options)|indent(16) }} + {% endfor %} + add_user_agent: true + tracing: + operation_name: EGRESS + idle_timeout: 840s + access_log: + - name: envoy.file_access_log + filter: + or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: access_log.access_error.status + - duration_filter: + comparison: + op: GE + value: + default_value: 2000 + runtime_key: access_log.access_error.duration + - traceable_filter: {} + config: + path: "/var/log/envoy/egress_http_error.log" + {{ access_log_helper.egress_error_log()|indent(10)}} + use_remote_address: true + http_filters: + - name: envoy.rate_limit + config: + domain: envoy_service_to_service + - name: envoy.grpc_http1_bridge + config: {} + - name: envoy.router + config: {} + + - address: + socket_address: + protocol: TCP + port_value: 9002 + address: 127.0.0.1 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + stat_prefix: egress_http + rds: + config_source: + api_config_source: + api_type: GRPC + cluster_names: + - rds + route_config_name: rds_config_for_listener_1 + add_user_agent: true + tracing: + operation_name: EGRESS + idle_timeout: 840s + access_log: + - name: envoy.file_access_log + filter: + or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: access_log.access_error.status + - duration_filter: + comparison: + op: GE + value: + default_value: 2000 + runtime_key: access_log.access_error.duration + - traceable_filter: {} + config: + path: "/var/log/envoy/egress_http_error.log" + {{ access_log_helper.egress_error_log()|indent(10) }} + use_remote_address: true + http_filters: + - name: envoy.rate_limit + config: + domain: envoy_service_to_service + - name: envoy.grpc_http1_bridge + config: {} + - name: envoy.router + config: {} + {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%} + {% for mapping in external_virtual_hosts -%} + - name: "{{ mapping['address']}}" + address: + socket_address: + address: "{{ mapping['address'] }}" + protocol: TCP + port_value: 9901 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + codec_type: AUTO + idle_timeout: 840s + stat_prefix: egress_{{ mapping['name'] }} + #update access_logs here + route_config: + virtual_hosts: + {% for host in mapping['hosts'] %} + - name: egress_{{ host['name'] }} + domains: + - "{{ host['domain'] }}" + routes: + - match: + prefix: "/" + route: + cluster: egress_{{ host['name'] }} + retry_policy: + retry_on: connect-failure + {% if host.get('host_rewrite', False) %} + host_rewrite: "{{host['host_rewrite']}}" + {% endif %} + {% endfor %} + http_filters: + {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%} + - name: envoy.http_dynamo_filter + config: {} + {% endif -%} + - name: envoy.router + config: {} + access_log: + - name: envoy.file_access_log + filter: + or_filter: + filters: + - status_code_filter: + comparison: + op: GE + value: + default_value: 400 + runtime_key: access_log.access_error.status + - status_code_filter: + comparison: + op: EQ + value: + default_value: 0 + runtime_key: access_log.access_error.status + {% if mapping.get('log_high_latency_requests', True) %} + - duration_filter: + comparison: + op: GE + value: + default_value: 2000 + runtime_key: access_log.access_error.duration + {% endif %} + config: + path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" + {% if mapping.get('is_amzn_service', False) -%} + {{ access_log_helper.egress_error_amazon_service()|indent(10) }} + {% else -%} + {{ access_log_helper.egress_error_log()|indent(10) }} + {% endif %} + {% if (mongos_servers|length > 0) or (mongos_servers|length == 0 and not loop.last ) %}{% endif -%} + {% endfor -%} + {% for key, value in mongos_servers.iteritems() -%} + - name : "{{ value['address'] }}" + address: + socket_address: + address: "{{ value['address'] }}" + protocol: TCP + port_value: 9003 + filter_chains: + - filters: + - name: envoy.tcp_proxy + config: + stat_prefix: mongo_{{ key }} + cluster: mongo_{{ key }} + - name: envoy.mongo_proxy + config: + stat_prefix: "{{ key }}" + access_log: "/var/log/envoy/mongo_{{ key }}.log" + {% if value.get('ratelimit', False) %} + - name: envoy.ratelimit + config: + stat_prefix: "{{ key }}" + domain: envoy_mongo_cps + descriptors: + entries: + - key: database + value: "{{ key }}" + {% endif %} + {% endfor -%} + clusters: + {% for service, options in internal_virtual_hosts.iteritems() -%} + - {{ helper.internal_cluster_definition(service, options)|indent(2)}} + {% endfor -%} + {% for mapping in external_virtual_hosts -%} + {% for host in mapping['hosts'] -%} + - name: egress_{{ host['name'] }} + {% if host.get('ssl', False) %} + tls_context: + common_tls_context: + validation_context: + trusted_ca: + filename: certs/cacert.pem + {% if host.get('verify_subject_alt_name', False) %} + verify_subject_alt_name: "{{host['verify_subject_alt_name'] }}" + {% endif %} + {% if host.get('sni', False) %} + sni: "{{ host['sni'] }}" + {% endif %} + connect_timeout: 1s + {% else %} + connect_timeout: 0.25s + {% endif %} + type: LOGICAL_DNS + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: {{ host['remote_address'] }} + port_value: {{ host['port_value'] }} + protocol: {{ host['protocol'] }} + {% endfor -%} + {% endfor -%} + {% for key, value in mongos_servers.iteritems() -%} + - name: mongo_{{ key }} + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: RANDOM + hosts: + {% for server in value['hosts'] -%} + - socket_address: + protocol: {{ server['protocol'] }} + port_value: {{ server['port_value'] }} + address: {{ server['address'] }} + {% endfor -%} + {% endfor %} + - name: main_website + connect_timeout: 0.25s + type: LOGICAL_DNS + # Comment out the following line to test on v6 networks + dns_lookup_family: V4_ONLY + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + address: main_website.com + port_value: 443 + tls_context: { sni: www.main_website.com } + - name: local_service + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8080 + circuit_breakers: + thresholds: + max_pending_requests: 30 + max_connections: 100 + - name: local_service_grpc + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + http2_protocol_options: {} + hosts: + - socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8081 + circuit_breakers: + thresholds: + max_requests: 200 + dns_lookup_family: V4_ONLY + - name: rds + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: rds.yourcompany.net + port_value: 80 + dns_lookup_family: V4_ONLY + - name: statsd + connect_timeout: 0.25s + type: STATIC + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 8125 + dns_lookup_family: V4_ONLY + - name: lightstep_saas + connect_timeout: 1s + type: LOGICAL_DNS + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: collector-grpc.lightstep.com + port_value: 443 + http2_protocol_options: + max_concurrent_streams: 100 + tls_context: + common_tls_context: + validation_context: + trusted_ca: + filename: certs/cacert.pem + verify_subject_alt_name: + - collector-grpc.lightstep.com + - name: cds_cluster + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: cds.yourcompany.net + port_value: 80 + - name: sds + connect_timeout: 0.25s + type: STRICT_DNS + lb_policy: ROUND_ROBIN + hosts: + - socket_address: + protocol: TCP + address: discovery.yourcompany.net + port_value: 80 +dynamic_resources: + cds_config: + api_config_source: + cluster_names: + - cds_cluster + refresh_delay: 30s +cluster_manager: {} +flags_path: "/etc/envoy/flags" +stats_sinks: + - name: envoy.statsd + config: + tcp_cluster_name: statsd +watchdog: {} +tracing: + http: + name: envoy.lightstep + config: + access_token_file: "/etc/envoy/lightstep_access_token" + collector_cluster: lightstep_saas +rate_limit_service: + grpc_service: + envoy_grpc: + cluster_name: ratelimit +runtime: + symlink_root: "/srv/runtime_data/current" + subdirectory: envoy + override_subdirectory: envoy_override +admin: + access_log_path: /var/log/envoy/admin_access.log + address: + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 9901 \ No newline at end of file diff --git a/configs/google_com_proxy.v2.yaml b/configs/google_com_proxy.v2.yaml index d0c36bc68c9ed..b97092748b9ec 100644 --- a/configs/google_com_proxy.v2.yaml +++ b/configs/google_com_proxy.v2.yaml @@ -1,13 +1,18 @@ admin: access_log_path: /tmp/admin_access.log address: - socket_address: { address: 127.0.0.1, port_value: 9901 } - + socket_address: + protocol: TCP + address: 127.0.0.1 + port_value: 9901 static_resources: listeners: - name: listener_0 address: - socket_address: { address: 0.0.0.0, port_value: 10000 } + socket_address: + protocol: TCP + address: 0.0.0.0 + port_value: 10000 filter_chains: - filters: - name: envoy.http_connection_manager @@ -19,8 +24,11 @@ static_resources: - name: local_service domains: ["*"] routes: - - match: { prefix: "/" } - route: { host_rewrite: www.google.com, cluster: service_google } + - match: + prefix: "/" + route: + host_rewrite: www.google.com + cluster: service_google http_filters: - name: envoy.router clusters: @@ -30,5 +38,8 @@ static_resources: # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN - hosts: [{ socket_address: { address: google.com, port_value: 443 }}] + hosts: + - socket_address: + address: google.com + port_value: 443 tls_context: { sni: www.google.com } diff --git a/configs/routing_helper.template.json b/configs/routing_helper.template.json deleted file mode 100644 index 9d505071b7f24..0000000000000 --- a/configs/routing_helper.template.json +++ /dev/null @@ -1,42 +0,0 @@ -{% macro make_route_internal(cluster, options) %} - {% if 'timeout_ms' in options %} - "timeout_ms": {{ options['timeout_ms'] }}, - {% endif %} - "retry_policy": { - "retry_on": "{{ options.get('retry_on', 'connect-failure') }}" - }, - "cluster": "{{ cluster }}" -{% endmacro %} - -{% macro make_route(cluster) %} - {{ make_route_internal(cluster, clusters.get(cluster, {})) }} -{% endmacro %} - -{% macro internal_cluster_definition(service, options) %} - "name": "{{ service }}", - "connect_timeout_ms": 250, - "type": "sds", - "lb_type": "least_request", - "features": "http2", - "service_name": "{{ service }}", - {% if 'max_requests' in options %} - "circuit_breakers": { - "default": { - "max_requests": {{ options['max_requests'] }} - } - }, - {% endif %} - "health_check": { - "type": "http", - "timeout_ms": 2000, - "interval_ms": 5000, - "interval_jitter_ms": 5000, - "unhealthy_threshold": 2, - "healthy_threshold": 2, - "path": "/healthcheck", - "service_name": "{{ service }}" - }, - "outlier_detection": { - "success_rate_stdev_factor": 1900 - } -{% endmacro %} diff --git a/configs/routing_helper_v2.template.yaml b/configs/routing_helper_v2.template.yaml new file mode 100644 index 0000000000000..ab16769bfd0d3 --- /dev/null +++ b/configs/routing_helper_v2.template.yaml @@ -0,0 +1,43 @@ +{%- macro make_route_internal(cluster, options) %} + cluster: {{ cluster }} + {%- if 'timeout' in options -%} + timeout: {{ options['timeout'] }}, + {% endif %} + retry_policy: + retry_on: 5xx +{%- endmacro %} +{%- macro make_route(cluster) -%} + {{ make_route_internal(cluster, clusters.get(cluster, {})) }} +{%- endmacro -%} +{%- macro internal_cluster_definition(service, options) -%} + name: {{ service }} + connect_timeout: 0.250s + type: EDS + eds_cluster_config: + eds_config: + api_config_source: + api_type: REST + cluster_names: + - sds + refresh_delay: 30s + service_name: {{ service }} + lb_policy: LEAST_REQUEST + {% if 'max_requests' in options -%} + circuit_breakers: + thresholds: + - priority: DEFAULT + max_requests: {{ options['max_requests'] }} + {% endif -%} + health_checks: + - http_health_check: + path: /healthcheck + service_name: accidents + timeout: 2s + interval: 5s + interval_jitter: 5s + unhealthy_threshold: 2 + healthy_threshold: 2 + outlier_detection: + success_rate_stdev_factor: 1900 + http2_protocol_options: {} +{% endmacro -%} \ No newline at end of file diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc index 3b9c101b96d19..b83cf63b8e5c4 100644 --- a/test/config_test/example_configs_test.cc +++ b/test/config_test/example_configs_test.cc @@ -17,10 +17,11 @@ TEST(ExampleConfigsTest, All) { #ifdef __APPLE__ // freebind/freebind.yaml is not supported on OS X and disabled via Bazel. - EXPECT_EQ(29UL, ConfigTest::run(directory)); + EXPECT_EQ(27UL, ConfigTest::run(directory)); #else - EXPECT_EQ(30UL, ConfigTest::run(directory)); + EXPECT_EQ(28UL, ConfigTest::run(directory)); #endif + ConfigTest::testMerge(); ConfigTest::testIncompatibleMerge(); diff --git a/test/server/config_validation/server_test.cc b/test/server/config_validation/server_test.cc index 00150ffc2e691..e05f1a659023b 100644 --- a/test/server/config_validation/server_test.cc +++ b/test/server/config_validation/server_test.cc @@ -58,8 +58,7 @@ TEST_P(ValidationServerTest, Validate) { // the filesystem for TLS certs, etc. In the meantime, these are the example configs that work // as-is. INSTANTIATE_TEST_CASE_P(ValidConfigs, ValidationServerTest, - ::testing::Values("front-envoy.yaml", "google_com_proxy.json", - "google_com_proxy.yaml", "google_com_proxy.v2.yaml", + ::testing::Values("front-envoy.yaml", "google_com_proxy.v2.yaml", "s2s-grpc-envoy.yaml", "service-envoy.yaml")); // Just make sure that all configs can be ingested without a crash. Processing of config files