diff --git a/.circleci/config.yml b/.circleci/config.yml index 4374d770e..e26d70eb6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,6 +41,16 @@ jobs: - store_artifacts: path: /root/project/generated destination: / + benchmark: + docker: + - image: *envoy-build-image + resource_class: xlarge + steps: + - checkout + - run: ci/do_ci.sh benchmark_with_own_binaries + - store_artifacts: + path: /root/project/generated + destination: / asan: docker: - image: *envoy-build-image @@ -83,5 +93,6 @@ workflows: - coverage - asan - tsan + - benchmark - format - docker diff --git a/WORKSPACE b/WORKSPACE index a13e6dad8..5d24c1556 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -25,13 +25,13 @@ load("@envoy//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() # For PIP support: -load("@rules_python//python:pip.bzl", "pip_import", "pip_repositories") +load("@rules_python//python:pip.bzl", "pip3_import", "pip_repositories") pip_repositories() # This rule translates the specified requirements.txt into # @my_deps//:requirements.bzl, which itself exposes a pip_install method. -pip_import( +pip3_import( name = "python_pip_deps", requirements = "//:requirements.txt", ) diff --git a/benchmarks/.gitignore b/benchmarks/.gitignore new file mode 100644 index 000000000..e540f5b38 --- /dev/null +++ b/benchmarks/.gitignore @@ -0,0 +1 @@ +tmp/* \ No newline at end of file diff --git a/benchmarks/BUILD b/benchmarks/BUILD index 820d7ac03..9b6ed35b1 100644 --- a/benchmarks/BUILD +++ b/benchmarks/BUILD @@ -1,21 +1,38 @@ -load("@rules_python//python:defs.bzl", "py_test") -load( - "@envoy//bazel:envoy_build_system.bzl", - "envoy_package", -) +load("@rules_python//python:defs.bzl", "py_binary", "py_library", "py_test") licenses(["notice"]) # Apache 2 -envoy_package() +py_binary( + name = "benchmarks", + srcs = [ + "benchmarks.py", + ], + deps = [ + ":benchmarks_envoy_proxy_lib", + "//test/integration:integration_test_base_lean", + ], +) py_test( - name = "benchmarks", + name = "test_benchmarks", srcs = [ "benchmarks.py", - "test_benchmark.py", + "test/test_discovery.py", ], + main = "benchmarks.py", deps = [ + ":benchmarks_envoy_proxy_lib", "//test/integration:integration_test_base", - "//test/integration:test_integration_basics_lib", + ], +) + +py_library( + name = "benchmarks_envoy_proxy_lib", + srcs = [ + "envoy_proxy.py", + ], + data = [ + "configurations/envoy_proxy.yaml", + "test/templates/simple_plot.html", ], ) diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 000000000..94a145be7 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,130 @@ +# Benchmarking testsuite + +The NH benchmark test suite builds on top Nighthawk's integration test framework, and +can be used to scaffold tests and obtain latency reports as well as flamegraphs. + +## Immediate goals + +The goal is to be able to: + +- facilitate integration into CI flows (via fully dockerized flow) +- facilitate A/B testing by developers +- run the suite against arbitrary Envoy revisions +- persist profile dumps, flamegraphs, and latency numbers per test +- offer stock tests, but also allow scaffolding consumer-specific tests + +## Example output visualization + +The benchmark will drop a visual in each test directory. + + + +## Example: Docker based execution, scavaging benchmark/ + +This scripts shows how to use the benchmarking suite. +It will run a selection of an example [benchmarks](test/test_discovery.py) +extracted from `/benchmarks`, which injects Envoy between the benchmark client and test server. + +```bash +git clone https://github.com/oschaaf/nighthawk.git benchmark-test +cd benchmark-test +bazel build //benchmarks:benchmarks + +# Specify the ip address family we'll be using. [v4only|v6only|all] +export ENVOY_IP_TEST_VERSIONS=v4only +# Explicit tmpdir for OSX Docker, to make sure we'll use a volume that works when +export TMPDIR="$(pwd)/benchmarks/tmp" +# Nighthawk tools will be sourced from this Docker image +export NH_DOCKER_IMAGE="envoyproxy/nighthawk-dev:latest" +# Envoy Docker image that we'll use to inject the Envoy proxy +export ENVOY_DOCKER_IMAGE_TO_TEST="envoyproxy/envoy-dev:74290ef76a76fbbf50f072dc33438791f93f68c7" +# Envoy is called 'Envoy' in the Envoy Docker image. +export ENVOY_PATH="envoy" + +# run all tests starting with test_http_h1_small in benchmarks/ +bazel-bin/benchmarks/benchmarks --log-cli-level=info -vvvv -k test_http_h1_small benchmarks/ +``` + +## Example: running with binaries + +This will build the Nighthawk binaries from the C++ code, and use those to +execute the benchmarks. Environment variable `ENVOY_PATH` can be used to +specify a custom Envoy binary to use to inject as a proxy between the test +client and server. If not set, the benchmark suite will fall back to configuring +Nighthawk's test server for that. Note that the build can be a lengthy process. + +```bash +git clone https://github.com/oschaaf/nighthawk.git benchmark-test +cd benchmark-test +bazel test \ + --test_summary=detailed \ + --test_output=all \ + --test_arg=--log-cli-level=info \ + --test_env=ENVOY_IP_TEST_VERSIONS=v4only \ + --test_env=HEAPPROFILE= \ + --test_env=HEAPCHECK= \ + --cache_test_results=no \ + --compilation_mode=opt \ + --cxxopt=-g \ + --cxxopt=-ggdb3 \ + //benchmarks:* +``` + +## Example: fully dockerized flow + +The framework can be run via Docker and used that way to execute +Python benchmarks scripts not sourced from this repository, but +elsewhere. An example: + +```bash +# This script runs the dockerized benchmarking framework, which in +# turn will pull Nighthawk and Envoy in via Docker. + +set -eo pipefail +set +x +set -u + +# The benchmark logs and artifacts will be dropped here +OUTDIR="/my-artifacts-dir/" +# Used to map the test that we want to see executed into the Docker container +# Note: the contents could be fetched via http, for example. +TEST_DIR="/dir-that-has-my-tests/" + +# Rebuild the Docker in case something changed. +./docker_build.sh && +docker run -it --rm \ + -v "/var/run/docker.sock:/var/run/docker.sock:rw" \ + -v "${OUTDIR}:${OUTDIR}:rw" \ + -v "${TEST_DIR}:/usr/local/bin/benchmarks/benchmarks.runfiles/nighthawk/benchmarks/external_tests/" \ + --network=host \ + --env NH_DOCKER_IMAGE="envoyproxy/nighthawk-dev:latest" \ + --env ENVOY_DOCKER_IMAGE_TO_TEST="envoyproxy/envoy-dev:f61b096f6a2dd3a9c74b9a9369a6ea398dbe1f0f" \ + --env TMPDIR="${OUTDIR}" \ + oschaaf/benchmark-dev:latest ./benchmarks --log-cli-level=info -vvvv +``` + +# TODOs + +- Copy out the artifacts and push those to a gcp bucket. Current status: + - cpu profiles are dumped to tmp per test (named according to the test). ideally we'd + also dump flamegraph svg's +- Enhance the UI. Currently we rely on CircleCI to directory-list our artifacts. This is + is pretty bare-bones, ideally we'd have something more fancy and we would be able to + visualize test data historically. +- Use taskset/cpuset when starting processes. +- Offer a docker image with binaries pre-built for profiling + the python test framework. +- Multi-origin support. The integration tests support it, but this isn't fitted into + the benchmark framework yet. This _might_ just be a doc issue. + +# FUTURE + +- The current status can collect CPU profiles, but these are useless + without access to the binary & libs that have been involved in producing them. +- profiling / flamegraphing via perf/bcc tools +- Allow injection of other proxies: nginx, haproxy +- Allow using alt clients, like Fortio & wrk2 +- An app that integrates fortios UI, pprof's web UI +- Have a mode where nighthawk_test_server provides high-res control timings in its + access logs +- The ability to repeat the runs multiple times and obtain stats, e.g. how much variance there is, mean, etc. +- The ability to do A/B testing, similar to https://github.com/envoyproxy/envoy-perf/blob/master/siege/siege.py#L3. diff --git a/benchmarks/benchmark-visual-example.png b/benchmarks/benchmark-visual-example.png new file mode 100644 index 000000000..be829fd69 Binary files /dev/null and b/benchmarks/benchmark-visual-example.png differ diff --git a/benchmarks/benchmarks.py b/benchmarks/benchmarks.py index 599ec6ba0..654c5c324 100644 --- a/benchmarks/benchmarks.py +++ b/benchmarks/benchmarks.py @@ -1,24 +1,13 @@ #!/usr/bin/env python3 -"""@package integration_test.py -Entry point for our integration testing +"""@package benchmarks. + +Entry point for benchmark execution. """ -import logging import os import sys import pytest if __name__ == '__main__': path = os.path.dirname(os.path.realpath(__file__)) - test_selection_arg = sys.argv[1] if len(sys.argv) > 1 else "" - r = pytest.main([ - "--rootdir=" + path, - "-vvvv", - "--showlocals", # Don't abbreviate/truncate long values in asserts. - "-p", - "no:cacheprovider", # Avoid a bunch of warnings on readonly filesystems - "-x", - path, - "--log-cli-level", - "info" - ]) + r = pytest.main(["--rootdir=" + path, "-x", path, "-p", "no:cacheprovider", *sys.argv]) exit(r) diff --git a/benchmarks/configurations/envoy_proxy.yaml b/benchmarks/configurations/envoy_proxy.yaml new file mode 100644 index 000000000..a7e984dc3 --- /dev/null +++ b/benchmarks/configurations/envoy_proxy.yaml @@ -0,0 +1,42 @@ +admin: + access_log_path: $tmpdir/envoyproxy-admin-access.log + profile_path: $tmpdir/envoyproxy.prof + address: + socket_address: { address: $proxy_ip, port_value: 0 } +static_resources: + listeners: + - address: + socket_address: + address: $proxy_ip + port_value: 0 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + generate_request_id: false + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + routes: + - match: + prefix: / + route: + cluster: local_service + http_filters: + - name: envoy.router + config: + dynamic_stats: false + clusters: + - name: local_service + connect_timeout: 0.25s + type: strict_dns + lb_policy: round_robin + hosts: + - socket_address: + address: $server_ip + port_value: $server_port diff --git a/benchmarks/docker/Dockerfile-benchmark b/benchmarks/docker/Dockerfile-benchmark new file mode 100644 index 000000000..2a250db8a --- /dev/null +++ b/benchmarks/docker/Dockerfile-benchmark @@ -0,0 +1,14 @@ +FROM frolvlad/alpine-python3 + +RUN apk add docker openrc +RUN rc-update add docker boot + +ADD benchmarks /usr/local/bin/benchmarks + +WORKDIR /usr/local/bin/benchmarks + +ENV ENVOY_PATH="envoy" \ + RUNFILES_DIR="/usr/local/bin/benchmarks/benchmarks.runfiles/" \ + ENVOY_IP_TEST_VERSIONS="v4only" + +CMD ["./benchmarks", "--help"] \ No newline at end of file diff --git a/benchmarks/docker/docker_build.sh b/benchmarks/docker/docker_build.sh new file mode 100755 index 000000000..e31ed6b80 --- /dev/null +++ b/benchmarks/docker/docker_build.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Builds a docker image nighthawk-dev:latest containing the stripped binaries +# based on a pre-build bazel-bin directory (with "-c opt" set). + +set -eo pipefail +set +x +set -u + +# NOTE: explicit no -x for verbose commands. Because this is run in CI, doing so may result in +# publishing sensitive information into public CI logs if someone makes a change in a +# consuming script that is off guard. + +DOCKER_NAME="benchmark" +DOCKER_IMAGE_PREFIX="${USER}/${DOCKER_NAME}" +BAZEL_BIN="$(bazel info bazel-bin)" +WORKSPACE="$(bazel info workspace)" +bazel build //benchmarks:benchmarks +TMP_DIR="$(mktemp -d)" +PUSH=${PUSH:-0} + +echo "Preparing docker build context in ${TMP_DIR}" +# We flatten any symlinks to make this work on Linux (OSX doesn't need this) +cp -Lr "${WORKSPACE}/benchmarks/docker/" "${TMP_DIR}/" +cp -Lr "${BAZEL_BIN}/benchmarks" "${TMP_DIR}/" + + +cd "${TMP_DIR}" +echo "running docker build ... " +docker build -f "${TMP_DIR}/docker/Dockerfile-${DOCKER_NAME}" -t "${DOCKER_IMAGE_PREFIX}-dev:latest" . +rm -rf "${TMP_DIR}" +echo "docker build finished" + +if [[ $PUSH == "1" ]]; then + echo "pushing ${DOCKER_IMAGE_PREFIX}-dev:latest .." + docker tag "${DOCKER_IMAGE_PREFIX}-dev:latest" "${DOCKER_IMAGE_PREFIX}-dev:latest" + docker push "${DOCKER_IMAGE_PREFIX}-dev:latest" + echo "docker image pushed" +fi \ No newline at end of file diff --git a/benchmarks/envoy_proxy.py b/benchmarks/envoy_proxy.py new file mode 100644 index 000000000..9e7fd1909 --- /dev/null +++ b/benchmarks/envoy_proxy.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python3 +"""@package envoy_proxy. + +Contains customized fixture & EnvoyProxyServer abstraction for use in tests. +""" + +import logging +import os +import pytest + +from test.integration.integration_test_fixtures import (HttpIntegrationTestBase, + determineIpVersionsFromEnvironment) +from test.integration.nighthawk_test_server import NighthawkTestServer + + +class EnvoyProxyServer(NighthawkTestServer): + """Envoy proxy server abstraction. + + Note that it derives from NighthawkTestServer, as that is implemented as a customized + Envoy, which is convenient here: the CLI and admin interface mechanics that we rely on + are the same. So all we do here, is specialize so we can override the docker image and + binary name. + + Attributes: + See base class + + Example: + See InjectHttpProxyIntegrationTestBase below for usage. + """ + + def __init__(self, config_template_path, server_ip, ip_version, parameters=dict(), tag=""): + """Initialize an EnvoyProxyServer instance. + + Arguments: + config_template_path: Configuration template for the proxy. + server_ip: IP address for the proxy to use. + ip_version: IP version that the proxy should use when listening. + parameters: Dictionary. Supply this to provide template parameter replacement values (optional). + tag: String. Supply this to get recognizeable output locations (optional). + """ + # If no explicit envoy path is passed, we'll use nighthawk_test_server. + super(EnvoyProxyServer, self).__init__( + os.getenv("ENVOY_PATH", "nighthawk_test_server"), + config_template_path, + server_ip, + ip_version, + parameters=parameters, + tag=tag) + self.docker_image = os.getenv("ENVOY_DOCKER_IMAGE_TO_TEST", "") + + +@pytest.fixture() +def proxy_config(): + """Yield the stock Envoy proxy configuration.""" + yield "nighthawk/benchmarks/configurations/envoy_proxy.yaml" + + +class InjectHttpProxyIntegrationTestBase(HttpIntegrationTestBase): + """Proxy and Test server fixture. + + Fixture which spins up a Nighthawk test server as well as an Envoy proxy + which directs traffic to that. Both will be listing for plain http traffic. + """ + + def __init__(self, ip_version, server_config, proxy_config): + """Initialize an InjectHttpProxyIntegrationTestBase. + + Arguments: + ip_version: Use ipv4 or ipv6 + server_config: Path to the server configuration. + proxy_config: Path to the proxy configuration. + """ + super(InjectHttpProxyIntegrationTestBase, self).__init__(ip_version, server_config) + self._proxy_config = proxy_config + + def setUp(self): + """Set up the injected Envoy proxy as well as the test server. + + Assert that both started successfully, and return afterwards. + """ + super(InjectHttpProxyIntegrationTestBase, self).setUp() + logging.info("injecting envoy proxy ...") + # TODO(oschaaf): how should this interact with multiple backends? + self.parameters["proxy_ip"] = self.test_server.server_ip + self.parameters["server_port"] = self.test_server.server_port + proxy_server = EnvoyProxyServer( + self._proxy_config, + self.server_ip, + self.ip_version, + parameters=self.parameters, + tag=self.tag) + assert (proxy_server.start()) + logging.info("envoy proxy listening at {ip}:{port}".format( + ip=proxy_server.server_ip, port=proxy_server.server_port)) + self.proxy_server = proxy_server + + def tearDown(self): + """Tear down the proxy and test server. Assert that both exit succesfully.""" + super(InjectHttpProxyIntegrationTestBase, self).tearDown() + assert (self.proxy_server.stop() == 0) + + def getTestServerRootUri(self): + """Get the root uri, pointing to the proxy address and port.""" + root_uri = super(InjectHttpProxyIntegrationTestBase, self).getTestServerRootUri() + root_uri = root_uri.replace(":%s" % self.test_server.server_port, + ":%s" % self.proxy_server.server_port) + return root_uri + + +@pytest.fixture(params=determineIpVersionsFromEnvironment()) +def inject_envoy_http_proxy_fixture(request, server_config, proxy_config): + """Injects an Envoy proxy in front of the test server. + + NOTE: Depends on the proxy_config fixture, which must be explicitly imported + into the consuming module when using this fixture. + + Arguments: + request: supplies the ip version. + server_config: path to the server configuration template. + proxy_config: path to the proxy configuration template. + + Yields: a successfully set up InjectHttpProxyIntegrationTestBase instance. + """ + fixture = InjectHttpProxyIntegrationTestBase(request.param, server_config, proxy_config) + fixture.setUp() + yield fixture + fixture.tearDown() diff --git a/benchmarks/run_benchmark.sh b/benchmarks/run_benchmark.sh new file mode 100755 index 000000000..9518fa519 --- /dev/null +++ b/benchmarks/run_benchmark.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -eo pipefail +set +x +set -u + +BAZEL="bazel" + +if [[ "$OSTYPE" == "darwin"* ]]; then + # On OSX we update the docker env vars to the latest + eval "$(docker-machine env default)" + # We also update the output location bazel uses, to make sure + # that we will be able to map paths. + # TODO(oschaaf): does this work on Linux?: + export TEST_SERVER_EXTERNAL_IP="$(docker-machine ip)" +fi + +pushd $("${BAZEL}" info workspace) +"${BAZEL}" build //benchmarks:benchmarks + +export ENVOY_IP_TEST_VERSIONS=v4only +export ENVOY_PATH="envoy" +export TMPDIR="$(pwd)/benchmarks/tmp" +export NH_DOCKER_IMAGE="envoyproxy/nighthawk-dev:latest" +export ENVOY_DOCKER_IMAGE_TO_TEST="envoyproxy/envoy-dev:latest" + +# run all tests +bazel-bin/benchmarks/benchmarks --log-cli-level=info -vvvv benchmarks/ diff --git a/benchmarks/test/README.md b/benchmarks/test/README.md new file mode 100644 index 000000000..56d5e2fc9 --- /dev/null +++ b/benchmarks/test/README.md @@ -0,0 +1,4 @@ +The dockerized version of the python benchmarking framework +will not include this. It is used to test during development +as well as ensure executing consumed tests by mapping them +into a docker container works correctly. \ No newline at end of file diff --git a/benchmarks/test/templates/simple_plot.html b/benchmarks/test/templates/simple_plot.html new file mode 100644 index 000000000..72c0363b1 --- /dev/null +++ b/benchmarks/test/templates/simple_plot.html @@ -0,0 +1,211 @@ + + + +
+ + + + + + + + + + + \ No newline at end of file diff --git a/benchmarks/test/test_discovery.py b/benchmarks/test/test_discovery.py new file mode 100644 index 000000000..09f5e1636 --- /dev/null +++ b/benchmarks/test/test_discovery.py @@ -0,0 +1,120 @@ +#!/usr/bin/env python3 +"""@package integration_test. + +Just a demo for now. Shows how to tap into Nighthawk's +integration test framework to run benchmark executions. +""" + +import logging +import json +import pytest +import os +from test.integration.integration_test_fixtures import (http_test_server_fixture, + https_test_server_fixture) +from test.integration import utility +from envoy_proxy import (inject_envoy_http_proxy_fixture, proxy_config) +from rules_python.python.runfiles import runfiles +from shutil import copyfile + + +def _run_benchmark(fixture, + rps=1000, + duration=30, + max_connections=1, + max_active_requests=100, + request_body_size=0, + response_size=1024, + concurrency=1): + if hasattr(fixture, "proxy_server"): + assert (fixture.proxy_server.enableCpuProfiler()) + assert (fixture.test_server.enableCpuProfiler()) + args = [ + fixture.getTestServerRootUri(), "--rps", + str(rps), "--duration", + str(duration), "--connections", + str(max_connections), "--max-active-requests", + str(max_active_requests), "--concurrency", + str(concurrency), "--request-header", + "x-nighthawk-test-server-config:{response_body_size:%s}" % response_size, + "--experimental-h1-connection-reuse-strategy", "lru", "--prefetch-connections" + ] + if request_body_size > 0: + args.append("--request-body-size") + args.append(str(request_body_size)) + + parsed_json, _ = fixture.runNighthawkClient(args) + counters = fixture.getNighthawkCounterMapFromJson(parsed_json) + response_count = counters["benchmark.http_2xx"] + request_count = counters["upstream_rq_total"] + connection_counter = "upstream_cx_http1_total" + + # Some arbitrary sanity checks + utility.assertCounterGreaterEqual(counters, "benchmark.http_2xx", + (concurrency * rps * duration) * 0.99) + utility.assertGreater(counters["upstream_cx_rx_bytes_total"], response_count * response_size) + utility.assertGreater(counters["upstream_cx_tx_bytes_total"], request_count * request_body_size) + utility.assertCounterEqual(counters, connection_counter, concurrency * max_connections) + + # Could potentially set thresholds on acceptable latency here. + + # dump human readable output to logs + json_as_string = json.dumps(parsed_json) + human_output = fixture.transformNighthawkJson(json_as_string, "human") + logging.info(human_output) + + with open(os.path.join(fixture.test_server.tmpdir, "nighthawk-human.txt"), "w") as f: + f.write(human_output) + with open(os.path.join(fixture.test_server.tmpdir, "nighthawk.json"), "w") as f: + f.write(json_as_string) + with open(os.path.join(fixture.test_server.tmpdir, "nighthawk.yaml"), "w") as f: + f.write(fixture.transformNighthawkJson(json_as_string, "yaml")) + with open(os.path.join(fixture.test_server.tmpdir, "fortio.json"), "w") as f: + f.write(fixture.transformNighthawkJson(json_as_string, "fortio")) + with open(os.path.join(fixture.test_server.tmpdir, "server_version.txt"), "w") as f: + f.write(fixture.test_server.getCliVersionString()) + if hasattr(fixture, "proxy_server"): + with open(os.path.join(fixture.test_server.tmpdir, "proxy_version.txt"), "w") as f: + f.write(fixture.proxy_server.getCliVersionString()) + r = runfiles.Create() + copyfile( + r.Rlocation("nighthawk/benchmarks/test/templates/simple_plot.html"), + os.path.join(fixture.test_server.tmpdir, "simple_plot.html")) + + +# Test via injected Envoy +@pytest.mark.parametrize('proxy_config', ["nighthawk/benchmarks/configurations/envoy_proxy.yaml"]) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_http_origin.yaml"]) +def test_http_h1_small_request_small_reply_via(inject_envoy_http_proxy_fixture, + proxy_config): # noqa + _run_benchmark(inject_envoy_http_proxy_fixture) + + +# via Envoy, 4 workers. global targets: 1000 qps / 4 connections. +@pytest.mark.parametrize('proxy_config', ["nighthawk/benchmarks/configurations/envoy_proxy.yaml"]) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_http_origin.yaml"]) +def test_http_h1_small_request_small_reply_via_multiple_workers(inject_envoy_http_proxy_fixture, + proxy_config): # noqa + _run_benchmark(inject_envoy_http_proxy_fixture, rps=250, concurrency=4) + + +# Test the origin directly, using a stock fixture +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_http_origin.yaml"]) +def test_http_h1_small_request_small_reply_direct(http_test_server_fixture): # noqa + _run_benchmark(http_test_server_fixture) + + +# Direct, 4 workers. global targets: 1000 qps / 4 connections. +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_http_origin.yaml"]) +def test_http_h1_small_request_small_reply_direct_multiple_workers( + http_test_server_fixture): # noqa + _run_benchmark(http_test_server_fixture, rps=250, concurrency=4) + + +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) +def test_https_h1_small_request_small_reply_direct_s(https_test_server_fixture): # noqa + _run_benchmark(https_test_server_fixture) diff --git a/benchmarks/test_benchmark.py b/benchmarks/test_benchmark.py deleted file mode 100644 index 4629d3d96..000000000 --- a/benchmarks/test_benchmark.py +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env python3 - -import logging -import os -import sys -import pytest - -from test.integration.common import IpVersion -from test.integration.integration_test_fixtures import (http_test_server_fixture, - https_test_server_fixture) -from test.integration.utility import * - - -def test_http_h1_maxrps_no_client_side_queueing(http_test_server_fixture): - assert (http_test_server_fixture.test_server.enableCpuProfiler()) - MIN_EXPECTED_REQUESTS = 100 - parsed_json, _ = http_test_server_fixture.runNighthawkClient( - [http_test_server_fixture.getTestServerRootUri(), "--rps", "999999", "--duration", "30"]) - counters = http_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) - # We expect to have executed a certain amount of requests - assertCounterGreater(counters, "benchmark.http_2xx", MIN_EXPECTED_REQUESTS) - # We expect to have created only a single connection - assertCounterEqual(counters, "upstream_cx_http1_total", 1) - global_histograms = http_test_server_fixture.getNighthawkGlobalHistogramsbyIdFromJson(parsed_json) - assertGreater(int(global_histograms["sequencer.blocking"]["count"]), MIN_EXPECTED_REQUESTS) - assertGreater( - int(global_histograms["benchmark_http_client.request_to_response"]["count"]), - MIN_EXPECTED_REQUESTS) - # dump output - logging.info(str(parsed_json)) diff --git a/ci/do_ci.sh b/ci/do_ci.sh index e6a43419e..39c618052 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -1,9 +1,17 @@ #!/bin/bash -set -e +set -eo pipefail +set +x +set -u export BUILDIFIER_BIN="${BUILDIFIER_BIN:=/usr/local/bin/buildifier}" export BUILDOZER_BIN="${BUILDOZER_BIN:=/usr/local/bin/buildozer}" +export NUM_CPUS=${NUM_CPUS:=$(grep -c ^processor /proc/cpuinfo)} +export CIRCLECI=${CIRCLECI:="")} +export BAZEL_EXTRA_TEST_OPTIONS=${BAZEL_EXTRA_TEST_OPTIONS:=""} +export BAZEL_OPTIONS=${BAZEL_OPTIONS:=""} +export BAZEL_BUILD_EXTRA_OPTIONS=${BAZEL_BUILD_EXTRA_OPTIONS:=""} +export SRCDIR=${SRCDIR:="${PWD}"} function do_build () { bazel build $BAZEL_BUILD_OPTIONS --verbose_failures=true //:nighthawk @@ -31,14 +39,13 @@ function do_clang_tidy() { } function do_coverage() { + export TEST_TARGETS="//test/..." echo "bazel coverage build with tests ${TEST_TARGETS}" # Reduce the amount of memory Bazel tries to use to prevent it from launching too many subprocesses. # This should prevent the system from running out of memory and killing tasks. See discussion on # https://github.com/envoyproxy/envoy/pull/5611. [ -z "$CIRCLECI" ] || export BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS} --local_ram_resources=12288" - - export TEST_TARGETS="//test/..." test/run_nighthawk_bazel_coverage.sh ${TEST_TARGETS} exit 0 } @@ -90,6 +97,36 @@ function do_tsan() { run_bazel test ${BAZEL_TEST_OPTIONS} -c dbg --config=clang-tsan //test/... } +function cleanup_benchmark_artifacts { + # TODO(oschaaf): we clean the tmp dir above from uninteresting stuff + # that crept into the tmp/output directory. The cruft gets in there because + # other tooling also responds to the TMPDIR environment variable, which in retrospect + # was a bad choice. + # Consider using a different environment variable for the benchmark tooling + # to use for this. + size=${#TMPDIR} + if [ $size -gt 4 ] && [ -d "${TMPDIR}" ]; then + rm -rf ${TMPDIR}/tmp.* + fi +} + +function do_benchmark_with_own_binaries() { + echo "Running benchmark framework with own binaries" + cd "${SRCDIR}" + # Benchmark artifacts will be dropped into this directory: + export TMPDIR="${SRCDIR}/generated" + mkdir -p "${TMPDIR}" + trap cleanup_benchmark_artifacts EXIT + run_bazel test ${BAZEL_TEST_OPTIONS} --test_summary=detailed \ + --test_arg=--log-cli-level=info \ + --test_env=HEAPPROFILE= \ + --test_env=HEAPCHECK= \ + --compilation_mode=opt \ + --cxxopt=-g \ + --cxxopt=-ggdb3 \ + //benchmarks:* +} + function do_check_format() { echo "check_format..." cd "${SRCDIR}" @@ -113,8 +150,6 @@ function do_fix_format() { ./tools/format_python_tools.sh fix } -[ -z "${NUM_CPUS}" ] && export NUM_CPUS=`grep -c ^processor /proc/cpuinfo` - if [ -n "$CIRCLECI" ]; then if [[ -f "${HOME:-/root}/.gitconfig" ]]; then mv "${HOME:-/root}/.gitconfig" "${HOME:-/root}/.gitconfig_save" @@ -159,7 +194,6 @@ export BAZEL_BUILD_OPTIONS=" \ export BAZEL_TEST_OPTIONS="${BAZEL_BUILD_OPTIONS} --test_env=HOME --test_env=PYTHONUSERBASE \ --test_env=UBSAN_OPTIONS=print_stacktrace=1 \ --cache_test_results=no --test_output=all ${BAZEL_EXTRA_TEST_OPTIONS}" -[[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" setup_clang_toolchain export CLANG_FORMAT=clang-format @@ -209,8 +243,12 @@ case "$1" in do_fix_format exit 0 ;; + benchmark_with_own_binaries) + do_benchmark_with_own_binaries + exit 0 + ;; *) - echo "must be one of [build,test,clang_tidy,test_with_valgrind,coverage,asan,tsan,docker,check_format,fix_format]" + echo "must be one of [build,test,clang_tidy,test_with_valgrind,coverage,asan,tsan,benchmark_with_own_binaries,docker,check_format,fix_format]" exit 1 ;; esac diff --git a/requirements.txt b/requirements.txt index 8c23bd88d..4ce0db090 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,3 +2,4 @@ requests pytest pytest-dependency pytest-xdist +pyyaml diff --git a/test/integration/BUILD b/test/integration/BUILD index 720e83f00..2b16b3d18 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -11,6 +11,23 @@ envoy_package() py_library( name = "integration_test_base", + data = [ + "configurations/nighthawk_http_origin.yaml", + "configurations/nighthawk_https_origin.yaml", + "configurations/sni_origin.yaml", + "//:nighthawk_client", + "//:nighthawk_output_transform", + "//:nighthawk_service", + "//:nighthawk_test_server", + "@envoy//test/config/integration/certs", + ], + deps = [ + ":integration_test_base_lean", + ], +) + +py_library( + name = "integration_test_base_lean", srcs = [ "common.py", "integration_test_fixtures.py", @@ -22,13 +39,10 @@ py_library( "configurations/nighthawk_http_origin.yaml", "configurations/nighthawk_https_origin.yaml", "configurations/sni_origin.yaml", - "//:nighthawk_client", - "//:nighthawk_output_transform", - "//:nighthawk_service", - "//:nighthawk_test_server", "@envoy//test/config/integration/certs", ], deps = [ + "@rules_python//python/runfiles", requirement("requests"), # The following are implied by 'request'. requirement("urllib3"), @@ -42,15 +56,15 @@ py_library( requirement("pluggy"), requirement("zipp"), requirement("six"), - requirement("atomicwrites"), requirement("py"), requirement("more_itertools"), requirement("importlib_metadata"), - requirement("pathlib2"), requirement("packaging"), requirement("pytest-xdist"), requirement("execnet"), requirement("apipkg"), + requirement("wcwidth"), + requirement("pyyaml"), ], ) @@ -99,8 +113,6 @@ py_binary( ], "//conditions:default": [], }), - python_version = "PY3", - srcs_version = "PY3ONLY", deps = [ ":test_connection_management_lib", ":test_grpc_service_lib", diff --git a/test/integration/configurations/nighthawk_http_origin.yaml b/test/integration/configurations/nighthawk_http_origin.yaml index b8b0ca26e..cf39a6ffc 100644 --- a/test/integration/configurations/nighthawk_http_origin.yaml +++ b/test/integration/configurations/nighthawk_http_origin.yaml @@ -1,6 +1,6 @@ admin: - access_log_path: /tmp/admin_access-$test_id.log - profile_path: /tmp/envoy-$test_id.prof + access_log_path: $tmpdir/nighthawk-test-server-admin-access.log + profile_path: $tmpdir/nighthawk-test-server.prof address: socket_address: { address: $server_ip, port_value: 0 } static_resources: diff --git a/test/integration/configurations/nighthawk_https_origin.yaml b/test/integration/configurations/nighthawk_https_origin.yaml index 997cfbd4f..708dbdb67 100644 --- a/test/integration/configurations/nighthawk_https_origin.yaml +++ b/test/integration/configurations/nighthawk_https_origin.yaml @@ -1,40 +1,42 @@ admin: - access_log_path: /tmp/admin_access-$test_id.log - profile_path: /tmp/envoy-$test_id.prof + access_log_path: $tmpdir/nighthawk-test-server-admin-access.log + profile_path: $tmpdir/nighthawk-test-server.prof address: socket_address: { address: $server_ip, port_value: 0 } static_resources: listeners: - - address: - socket_address: - address: $server_ip - port_value: 0 - filter_chains: - - filters: - - name: envoy.http_connection_manager - config: - generate_request_id: false - codec_type: auto - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: service - domains: - - "*" - http_filters: - - name: test-server - config: - response_body_size: 10 - response_headers: - - { header: { key: "x-nh", value: "1"}} - - name: envoy.router - config: - dynamic_stats: false - tls_context: - common_tls_context: - tls_certificates: - - certificate_chain: - filename: $ssl_cert_path - private_key: - filename: $ssl_key_path \ No newline at end of file + - address: + socket_address: + address: $server_ip + port_value: 0 + filter_chains: + - filters: + - name: envoy.http_connection_manager + config: + generate_request_id: false + codec_type: auto + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: service + domains: + - "*" + http_filters: + - name: test-server + config: + response_body_size: 10 + response_headers: + - { header: { key: "x-nh", value: "1" } } + - name: envoy.router + config: + dynamic_stats: false + tls_context: + common_tls_context: + tls_certificates: + - certificate_chain: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem + private_key: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem diff --git a/test/integration/configurations/sni_origin.yaml b/test/integration/configurations/sni_origin.yaml index 827bc9d3a..9d49bae35 100644 --- a/test/integration/configurations/sni_origin.yaml +++ b/test/integration/configurations/sni_origin.yaml @@ -1,6 +1,6 @@ admin: - access_log_path: /tmp/admin_access-$test_id.log - profile_path: /tmp/envoy-$test_id.prof + access_log_path: $tmpdir/nighthawk-test-server-admin-access.log + profile_path: $tmpdir/nighthawk-test-server.prof address: socket_address: { address: $server_ip, port_value: 0 } static_resources: @@ -21,8 +21,12 @@ static_resources: "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext common_tls_context: tls_certificates: - - certificate_chain: { filename: "$ssl_cert_path" } - private_key: { filename: "$ssl_key_path" } + - certificate_chain: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem + private_key: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem listener_filters: - name: "envoy.listener.tls_inspector" typed_config: {} @@ -35,8 +39,12 @@ static_resources: "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext common_tls_context: tls_certificates: - - certificate_chain: { filename: "$ssl_cert_path" } - private_key: { filename: "$ssl_key_path" } + - certificate_chain: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/servercert.pem + private_key: + inline_string: | + @inject-runfile:nighthawk/external/envoy/test/config/integration/certs/serverkey.pem filters: - name: envoy.http_connection_manager config: diff --git a/test/integration/integration_test_fixtures.py b/test/integration/integration_test_fixtures.py index 0d1583c48..2e73ce908 100644 --- a/test/integration/integration_test_fixtures.py +++ b/test/integration/integration_test_fixtures.py @@ -17,6 +17,8 @@ from test.integration.nighthawk_test_server import NighthawkTestServer from test.integration.nighthawk_grpc_service import NighthawkGrpcService +_TIMESTAMP = time.strftime('%Y-%m-%d-%H-%M-%S') + def determineIpVersionsFromEnvironment(): env_versions = os.environ.get("ENVOY_IP_TEST_VERSIONS", "all") @@ -38,33 +40,46 @@ class IntegrationTestBase(): Support for multiple test servers has been added in a way that minimizes impact to existing tests. self.test_server always points to the first test server, and methods assuming a single backend such - as getTestServerRootUri were left intact. self.test_servers contains all test servers, including the + as getTestServerRootUri were left intact. self._test_servers contains all test servers, including the first. Methods such as getTestServerRootUris that are aware of multiple test servers will also work when there is only one test server. This class will be refactored (https://github.com/envoyproxy/nighthawk/issues/258). """ - def __init__(self, ip_version, backend_count=1): + def __init__(self, ip_version, server_config, backend_count=1): """ Args: ip_version: a single IP mode that this instance will test: IpVersion.IPV4 or IpVersion.IPV6 + server_config: path to the server configuration backend_count: number of Nighthawk Test Server backends to run, to allow testing MultiTarget mode + Attributes: + ip_version: IP version that the proxy should use when listening. + server_ip: string containing the server ip that will be used to listen + tag: String. Supply this to get recognizeable output locations. + parameters: Dictionary. Supply this to provide template parameter replacement values. + grpc_service: NighthawkGrpcService instance or None. Set by startNighthawkGrpcService(). + test_server: NighthawkTestServer instance, set during setUp(). + nighthawk_client_path: String, path to the nighthawk_client binary. """ super(IntegrationTestBase, self).__init__() - self.test_rundir = os.path.join(os.environ["TEST_SRCDIR"], os.environ["TEST_WORKSPACE"]) - self.nighthawk_test_server_path = os.path.join(self.test_rundir, "nighthawk_test_server") - self.nighthawk_test_config_path = None - self.nighthawk_client_path = os.path.join(self.test_rundir, "nighthawk_client") assert ip_version != IpVersion.UNKNOWN - self.server_ip = "::1" if ip_version == IpVersion.IPV6 else "127.0.0.1" - self.socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET - self.test_server = None - self.test_servers = [] - self.backend_count = backend_count - self.parameters = {} self.ip_version = ip_version + self.server_ip = "::/0" if ip_version == IpVersion.IPV6 else "0.0.0.0" + self.server_ip = os.getenv("TEST_SERVER_EXTERNAL_IP", self.server_ip) + self.tag = "" + self.parameters = {} self.grpc_service = None + self.test_server = None + self.nighthawk_client_path = "nighthawk_client" + self._nighthawk_test_server_path = "nighthawk_test_server" + self._nighthawk_test_config_path = server_config + self._nighthawk_service_path = "nighthawk_service" + self._nighthawk_output_transform_path = "nighthawk_output_transform" + self._socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET + self._test_servers = [] + self._backend_count = backend_count + self._test_id = "" # TODO(oschaaf): For the NH test server, add a way to let it determine a port by itself and pull that # out. @@ -75,7 +90,7 @@ def getFreeListenerPortForAddress(self, address): The upside is that we can push the port upon the server we are about to start through configuration which is compatible accross servers. """ - with socket.socket(self.socket_type, socket.SOCK_STREAM) as sock: + with socket.socket(self._socket_type, socket.SOCK_STREAM) as sock: sock.bind((address, 0)) port = sock.getsockname()[1] return port @@ -84,19 +99,17 @@ def setUp(self): """ Performs sanity checks and starts up the server. Upon exit the server is ready to accept connections. """ - assert (os.path.exists(self.nighthawk_test_server_path)) - assert (os.path.exists(self.nighthawk_client_path)) - test_id = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0].replace( - "[", "_").replace("]", "") - self.parameters["test_id"] = test_id - for i in range(self.backend_count): - test_server = NighthawkTestServer(self.nighthawk_test_server_path, - self.nighthawk_test_config_path, self.server_ip, - self.ip_version, self.parameters) - assert (test_server.start()) - self.test_servers.append(test_server) - if i == 0: - self.test_server = test_server + if os.getenv("NH_DOCKER_IMAGE", "") == "": + assert os.path.exists( + self._nighthawk_test_server_path + ), "Test server binary not found: '%s'" % self._nighthawk_test_server_path + assert os.path.exists(self.nighthawk_client_path + ), "Nighthawk client binary not found: '%s'" % self.nighthawk_client_path + + self._test_id = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0].replace( + "[", "_").replace("]", "").replace("/", "_")[5:] + self.tag = "{timestamp}/{test_id}".format(timestamp=_TIMESTAMP, test_id=self._test_id) + assert self._tryStartTestServers(), "Test server(s) failed to start" def tearDown(self): """ @@ -106,11 +119,27 @@ def tearDown(self): assert (self.grpc_service.stop() == 0) any_failed = False - for test_server in self.test_servers: + for test_server in self._test_servers: if test_server.stop() != 0: any_failed = True assert (not any_failed) + def _tryStartTestServers(self): + for i in range(self._backend_count): + test_server = NighthawkTestServer( + self._nighthawk_test_server_path, + self._nighthawk_test_config_path, + self.server_ip, + self.ip_version, + parameters=self.parameters, + tag=self.tag) + if not test_server.start(): + return False + self._test_servers.append(test_server) + if i == 0: + self.test_server = test_server + return True + def getGlobalResults(self, parsed_json): """ Utility to find the global/aggregated result in the json output @@ -158,7 +187,7 @@ def getAllTestServerRootUris(self, https=False): return [ "%s://%s:%s/" % ("https" if https else "http", uri_host, test_server.server_port) - for test_server in self.test_servers + for test_server in self._test_servers ] def getTestServerStatisticsJson(self): @@ -173,7 +202,7 @@ def getAllTestServerStatisticsJsons(self): """ return [ test_server.fetchJsonFromAdminInterface("/stats?format=json") - for test_server in self.test_servers + for test_server in self._test_servers ] def getServerStatFromJson(self, server_stats_json, name): @@ -193,19 +222,25 @@ def runNighthawkClient(self, args, expect_failure=False, timeout=30, as_json=Tru """ # Copy the args so our modifications to it stay local. args = args.copy() + if os.getenv("NH_DOCKER_IMAGE", "") != "": + args = [ + "docker", "run", "--network=host", "--rm", + os.getenv("NH_DOCKER_IMAGE"), self.nighthawk_client_path + ] + args + else: + args = [self.nighthawk_client_path] + args if self.ip_version == IpVersion.IPV6: - args.insert(0, "--address-family v6") + args.append("--address-family v6") if as_json: - args.insert(0, "--output-format json") - args.insert(0, self.nighthawk_client_path) + args.append("--output-format json") logging.info("Nighthawk client popen() args: [%s]" % args) client_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = client_process.communicate() logs = stderr.decode('utf-8') output = stdout.decode('utf-8') - logging.info("Nighthawk client stdout: [%s]" % output) + logging.debug("Nighthawk client stdout: [%s]" % output) if logs: - logging.warning("Nighthawk client stderr: [%s]" % logs) + logging.debug("Nighthawk client stderr: [%s]" % logs) if as_json: output = json.loads(output) if expect_failure: @@ -214,26 +249,50 @@ def runNighthawkClient(self, args, expect_failure=False, timeout=30, as_json=Tru assert (client_process.returncode == 0) return output, logs + def transformNighthawkJson(self, json, format="human"): + """Use to obtain one of the supported output from Nighthawk's raw json output. + + Arguments: + json: String containing raw json output obtained via nighthawk_client --output-format=json + format: String that specifies the desired output format. Must be one of [human|yaml|dotted-string|fortio]. Optional, defaults to "human". + """ + + # TODO(oschaaf): validate format arg. + args = [] + if os.getenv("NH_DOCKER_IMAGE", "") != "": + args = ["docker", "run", "--rm", "-i", os.getenv("NH_DOCKER_IMAGE")] + args = args + [self._nighthawk_output_transform_path, "--output-format", format] + logging.info("Nighthawk output transform popen() args: %s" % args) + client_process = subprocess.Popen( + args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + logging.info("Nighthawk client popen() args: [%s]" % args) + stdout, stderr = client_process.communicate(input=json.encode()) + logs = stderr.decode('utf-8') + output = stdout.decode('utf-8') + assert (client_process.returncode == 0) + return stdout.decode('utf-8') + def assertIsSubset(self, subset, superset): self.assertLessEqual(subset.items(), superset.items()) def startNighthawkGrpcService(self, service_name="traffic-generator-service"): host = self.server_ip if self.ip_version == IpVersion.IPV4 else "[%s]" % self.server_ip - self.grpc_service = NighthawkGrpcService( - os.path.join(self.test_rundir, "nighthawk_service"), host, self.ip_version, service_name) + self.grpc_service = NighthawkGrpcService(self._nighthawk_service_path, host, self.ip_version, + service_name) assert (self.grpc_service.start()) class HttpIntegrationTestBase(IntegrationTestBase): """ Base for running plain http tests against the Nighthawk test server + NOTE: any script that consumes derivations of this, needs to needs also explictly + import server_config, to avoid errors caused by the server_config not being found + by pytest. """ - def __init__(self, ip_version): + def __init__(self, ip_version, server_config): """See base class.""" - super(HttpIntegrationTestBase, self).__init__(ip_version) - self.nighthawk_test_config_path = os.path.join( - self.test_rundir, "test/integration/configurations/nighthawk_http_origin.yaml") + super(HttpIntegrationTestBase, self).__init__(ip_version, server_config) def getTestServerRootUri(self): """See base class.""" @@ -245,11 +304,10 @@ class MultiServerHttpIntegrationTestBase(IntegrationTestBase): Base for running plain http tests against multiple Nighthawk test servers """ - def __init__(self, ip_version, backend_count): + def __init__(self, ip_version, server_config, backend_count): """See base class.""" - super(MultiServerHttpIntegrationTestBase, self).__init__(ip_version, backend_count) - self.nighthawk_test_config_path = os.path.join( - self.test_rundir, "test/integration/configurations/nighthawk_http_origin.yaml") + super(MultiServerHttpIntegrationTestBase, self).__init__(ip_version, server_config, + backend_count) def getTestServerRootUri(self): """See base class.""" @@ -265,15 +323,9 @@ class HttpsIntegrationTestBase(IntegrationTestBase): Base for https tests against the Nighthawk test server """ - def __init__(self, ip_version): + def __init__(self, ip_version, server_config): """See base class.""" - super(HttpsIntegrationTestBase, self).__init__(ip_version) - self.parameters["ssl_key_path"] = os.path.join( - self.test_rundir, "external/envoy/test/config/integration/certs/serverkey.pem") - self.parameters["ssl_cert_path"] = os.path.join( - self.test_rundir, "external/envoy/test/config/integration/certs/servercert.pem") - self.nighthawk_test_config_path = os.path.join( - self.test_rundir, "test/integration/configurations/nighthawk_https_origin.yaml") + super(HttpsIntegrationTestBase, self).__init__(ip_version, server_config) def getTestServerRootUri(self): """See base class.""" @@ -285,10 +337,8 @@ class SniIntegrationTestBase(HttpsIntegrationTestBase): Base for https/sni tests against the Nighthawk test server """ - def __init__(self, ip_version): - super(SniIntegrationTestBase, self).__init__(ip_version) - self.nighthawk_test_config_path = os.path.join( - self.test_rundir, "test/integration/configurations/sni_origin.yaml") + def __init__(self, ip_version, server_config): + super(SniIntegrationTestBase, self).__init__(ip_version, server_config) def getTestServerRootUri(self): """See base class.""" @@ -300,14 +350,9 @@ class MultiServerHttpsIntegrationTestBase(IntegrationTestBase): Base for https tests against multiple Nighthawk test servers """ - def __init__(self, ip_version, backend_count): - super(MultiServerHttpsIntegrationTestBase, self).__init__(ip_version, backend_count) - self.parameters["ssl_key_path"] = os.path.join( - self.test_rundir, "external/envoy/test/config/integration/certs/serverkey.pem") - self.parameters["ssl_cert_path"] = os.path.join( - self.test_rundir, "external/envoy/test/config/integration/certs/servercert.pem") - self.nighthawk_test_config_path = os.path.join( - self.test_rundir, "test/integration/configurations/nighthawk_https_origin.yaml") + def __init__(self, ip_version, server_config, backend_count): + super(MultiServerHttpsIntegrationTestBase, self).__init__(ip_version, server_config, + backend_count) def getTestServerRootUri(self): """See base class.""" @@ -318,41 +363,38 @@ def getAllTestServerRootUris(self): return super(MultiServerHttpsIntegrationTestBase, self).getAllTestServerRootUris(True) -@pytest.fixture(params=determineIpVersionsFromEnvironment()) -def http_test_server_fixture(request): - f = HttpIntegrationTestBase(request.param) - f.setUp() - yield f - f.tearDown() +@pytest.fixture() +def server_config(): + yield "nighthawk/test/integration/configurations/nighthawk_http_origin.yaml" @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def https_test_server_fixture(request): - f = HttpsIntegrationTestBase(request.param) +def http_test_server_fixture(request, server_config): + f = HttpIntegrationTestBase(request.param, server_config) f.setUp() yield f f.tearDown() @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_http_test_server_fixture(request): - f = MultiServerHttpIntegrationTestBase(request.param, backend_count=3) +def https_test_server_fixture(request, server_config): + f = HttpsIntegrationTestBase(request.param, server_config) f.setUp() yield f f.tearDown() @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def multi_https_test_server_fixture(request): - f = MultiServerHttpsIntegrationTestBase(request.param, backend_count=3) +def multi_http_test_server_fixture(request, server_config): + f = MultiServerHttpIntegrationTestBase(request.param, server_config, backend_count=3) f.setUp() yield f f.tearDown() @pytest.fixture(params=determineIpVersionsFromEnvironment()) -def sni_test_server_fixture(request): - f = SniIntegrationTestBase(request.param) +def multi_https_test_server_fixture(request, server_config): + f = MultiServerHttpsIntegrationTestBase(request.param, server_config, backend_count=3) f.setUp() yield f f.tearDown() diff --git a/test/integration/nighthawk_grpc_service.py b/test/integration/nighthawk_grpc_service.py index 37d69d7e9..7af686ca3 100644 --- a/test/integration/nighthawk_grpc_service.py +++ b/test/integration/nighthawk_grpc_service.py @@ -5,7 +5,7 @@ import threading import time -from common import IpVersion +from test.integration.common import IpVersion # TODO(oschaaf): unify some of this code with the test server wrapper. diff --git a/test/integration/nighthawk_test_server.py b/test/integration/nighthawk_test_server.py index 28ecb359c..4f3ddb202 100644 --- a/test/integration/nighthawk_test_server.py +++ b/test/integration/nighthawk_test_server.py @@ -10,61 +10,126 @@ import tempfile import threading import time +import yaml from string import Template +from pathlib import Path +from rules_python.python.runfiles import runfiles from test.integration.common import IpVersion, NighthawkException +def _substitute_yaml_values(runfiles_instance, obj, params): + if isinstance(obj, dict): + for k, v in obj.items(): + obj[k] = _substitute_yaml_values(runfiles_instance, v, params) + elif isinstance(obj, list): + for i in range(len(obj)): + obj[i] = _substitute_yaml_values(runfiles_instance, obj[i], params) + else: + if isinstance(obj, str): + # Inspect string values and substitute where applicable. + INJECT_RUNFILE_MARKER = '@inject-runfile:' + if obj[0] == '$': + return Template(obj).substitute(params) + elif obj.startswith(INJECT_RUNFILE_MARKER): + with open(runfiles_instance.Rlocation(obj[len(INJECT_RUNFILE_MARKER):].strip()), + 'r') as file: + return file.read() + return obj + + class TestServerBase(object): """ Base class for running a server in a separate process. + + Arguments: + server_binary_path: String, specify the path to the test server binary. + config_template_path: String, specify the path to the test server configuration template. + server_ip: String, specify the ip address the test server should use to listen for traffic. + server_binary_config_path_arg: String, specify the name of the CLI argument the test server binary uses to accept a configuration path. + parameters: Dictionary. Supply this to provide configuration template parameter replacement values. + tag: String. Supply this to get recognizeable output locations. + + Attributes: + ip_version: IP version that the proxy should use when listening. + server_ip: string containing the server ip that will be used to listen + server_port: Integer, get the port used by the server to listen for traffic. + docker_image: String, supplies a docker image for execution of the test server binary. Sourced from environment variable NH_DOCKER_IMAGE. + tmpdir: String, indicates the location used to store outputs like logs. """ def __init__(self, server_binary_path, config_template_path, server_ip, ip_version, - server_binary_config_path_arg, parameters): + server_binary_config_path_arg, parameters, tag): assert ip_version != IpVersion.UNKNOWN self.ip_version = ip_version - self.server_binary_path = server_binary_path - self.config_template_path = config_template_path - self.server_thread = threading.Thread(target=self.serverThreadRunner) - self.server_process = None self.server_ip = server_ip - self.socket_type = socket.AF_INET6 if ip_version == IpVersion.IPV6 else socket.AF_INET self.server_port = -1 - self.admin_port = -1 - self.admin_address_path = "" - self.parameterized_config_path = "" - self.instance_id = str(random.randint(1, 1024 * 1024 * 1024)) - self.parameters = parameters - self.server_binary_config_path_arg = server_binary_config_path_arg - - self.parameters["server_ip"] = self.server_ip - with open(self.config_template_path) as f: - config = Template(f.read()) - config = config.substitute(self.parameters) - logging.info("Parameterized server configuration: %s", config) - - with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".yaml") as tmp: - self.parameterized_config_path = tmp.name - tmp.write(config) - with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".adminpath") as tmp: - self.admin_address_path = tmp.name + self.docker_image = os.getenv("NH_DOCKER_IMAGE", "") + self.tmpdir = os.path.join(os.getenv("TMPDIR", "/tmp/nighthawk_benchmark/"), tag + "/") + self._server_binary_path = server_binary_path + self._config_template_path = config_template_path + self._parameters = dict(parameters) + self._parameters["server_ip"] = self.server_ip + self._parameters["tmpdir"] = self.tmpdir + self._parameters["tag"] = tag + self._server_process = None + self._server_thread = threading.Thread(target=self.serverThreadRunner) + self._admin_address_path = "" + self._parameterized_config_path = "" + self._instance_id = str(random.randint(1, 1024 * 1024 * 1024)) + self._server_binary_config_path_arg = server_binary_config_path_arg + self._prepareForExecution() + + def _prepareForExecution(self): + runfiles_instance = runfiles.Create() + with open(runfiles_instance.Rlocation(self._config_template_path)) as f: + data = yaml.load(f, Loader=yaml.FullLoader) + data = _substitute_yaml_values(runfiles_instance, data, self._parameters) + + Path(self.tmpdir).mkdir(parents=True, exist_ok=True) + + with tempfile.NamedTemporaryFile( + mode="w", delete=False, suffix=".config.yaml", dir=self.tmpdir) as tmp: + self._parameterized_config_path = tmp.name + yaml.safe_dump( + data, + tmp, + default_flow_style=False, + explicit_start=True, + allow_unicode=True, + encoding='utf-8') + + with tempfile.NamedTemporaryFile( + mode="w", delete=False, suffix=".adminport", dir=self.tmpdir) as tmp: + self._admin_address_path = tmp.name def serverThreadRunner(self): - args = [ - self.server_binary_path, self.server_binary_config_path_arg, self.parameterized_config_path, - "-l", "error", "--base-id", self.instance_id, "--admin-address-path", - self.admin_address_path + args = [] + if self.docker_image != "": + # TODO(#383): As of https://github.com/envoyproxy/envoy/commit/e8a2d1e24dc9a0da5273442204ec3cdfad1e7ca8 + # we need to have ENVOY_UID=0 in the environment, or this will break on docker runs, as Envoy + # will not be able to read the configuration files we stub here in docker runs. + args = [ + "docker", "run", "--network=host", "--rm", "-v", "{t}:{t}".format(t=self.tmpdir), "-e", + "ENVOY_UID=0", self.docker_image + ] + args = args + [ + self._server_binary_path, self._server_binary_config_path_arg, + self._parameterized_config_path, "-l", "debug", "--base-id", self._instance_id, + "--admin-address-path", self._admin_address_path, "--concurrency", "1" ] - logging.info("Test server popen() args: [%s]" % args) - self.server_process = subprocess.Popen(args) - self.server_process.communicate() + logging.info("Test server popen() args: %s" % str.join(" ", args)) + self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = self._server_process.communicate() + logging.debug(stdout.decode("utf-8")) + logging.debug(stderr.decode("utf-8")) def fetchJsonFromAdminInterface(self, path): uri_host = self.server_ip if self.ip_version == IpVersion.IPV6: uri_host = "[%s]" % self.server_ip uri = "http://%s:%s%s" % (uri_host, self.admin_port, path) + logging.info("Fetch listeners via %s" % uri) r = requests.get(uri) if r.status_code != 200: raise NighthawkException("Bad status code wile fetching json from admin interface: %s", @@ -72,7 +137,7 @@ def fetchJsonFromAdminInterface(self, path): return r.json() def tryUpdateFromAdminInterface(self): - with open(self.admin_address_path) as admin_address_file: + with open(self._admin_address_path) as admin_address_file: admin_address = admin_address_file.read() tmp = admin_address.split(":") # we expect at least two elements (host:port). This might still be an empty file @@ -95,7 +160,7 @@ def enableCpuProfiler(self): uri_host = "[%s]" % self.server_ip uri = "http://%s:%s%s" % (uri_host, self.admin_port, "/cpuprofiler?enable=y") r = requests.post(uri) - logging.info("Enabled CPU profiling: %s", r.status_code == 200) + logging.info("Enabled CPU profiling via %s: %s", uri, r.status_code == 200) return r.status_code == 200 def waitUntilServerListening(self): @@ -110,14 +175,15 @@ def waitUntilServerListening(self): return False def start(self): - self.server_thread.daemon = True - self.server_thread.start() + self._server_thread.daemon = True + self._server_thread.start() return self.waitUntilServerListening() def stop(self): - self.server_process.terminate() - self.server_thread.join() - return self.server_process.returncode + os.remove(self._admin_address_path) + self._server_process.terminate() + self._server_thread.join() + return self._server_process.returncode class NighthawkTestServer(TestServerBase): @@ -131,6 +197,21 @@ def __init__(self, config_template_path, server_ip, ip_version, - parameters=dict()): + parameters=dict(), + tag=""): super(NighthawkTestServer, self).__init__(server_binary_path, config_template_path, server_ip, - ip_version, "--config-path", parameters) + ip_version, "--config-path", parameters, tag) + + def getCliVersionString(self): + """ Get the version string as written to the output by the CLI. + """ + + args = [] + if self.docker_image != "": + args = ["docker", "run", "--rm", self.docker_image] + args = args + [self._server_binary_path, "--base-id", self._instance_id, "--version"] + + process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + assert process.wait() == 0 + return stdout.decode("utf-8").strip() diff --git a/test/integration/test_connection_management.py b/test/integration/test_connection_management.py index 362519d45..9f1fd5459 100644 --- a/test/integration/test_connection_management.py +++ b/test/integration/test_connection_management.py @@ -5,7 +5,7 @@ import sys import pytest -from test.integration.integration_test_fixtures import (http_test_server_fixture) +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) from test.integration.utility import * diff --git a/test/integration/test_grpc_service.py b/test/integration/test_grpc_service.py index f0624caad..1271cd5b0 100644 --- a/test/integration/test_grpc_service.py +++ b/test/integration/test_grpc_service.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import pytest -from test.integration.integration_test_fixtures import http_test_server_fixture +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) from test.integration import utility diff --git a/test/integration/test_integration_basics.py b/test/integration/test_integration_basics.py index d82fdb478..2a50a31ca 100644 --- a/test/integration/test_integration_basics.py +++ b/test/integration/test_integration_basics.py @@ -12,7 +12,7 @@ from test.integration.common import IpVersion from test.integration.integration_test_fixtures import ( http_test_server_fixture, https_test_server_fixture, multi_http_test_server_fixture, - multi_https_test_server_fixture, sni_test_server_fixture) + multi_https_test_server_fixture, server_config) from test.integration.utility import * # TODO(oschaaf): we mostly verify stats observed from the client-side. Add expectations @@ -34,7 +34,7 @@ def test_http_h1(http_test_server_fixture): assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) assertCounterEqual(counters, "upstream_cx_total", 1) assertCounterEqual(counters, "upstream_cx_tx_bytes_total", - 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1500) + 1400 if http_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) assertCounterEqual(counters, "upstream_rq_pending_total", 1) assertCounterEqual(counters, "upstream_rq_total", 25) assertCounterEqual(counters, "default.total_match_count", 1) @@ -204,6 +204,8 @@ def test_http_concurrency(http_test_server_fixture): assertCounterEqual(counters, "upstream_cx_http1_total", 4) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1(https_test_server_fixture): """ Runs the CLI configured to use HTTP/1 over https against our test server, and sanity @@ -219,7 +221,7 @@ def test_https_h1(https_test_server_fixture): assertCounterEqual(counters, "upstream_cx_rx_bytes_total", 3400) assertCounterEqual(counters, "upstream_cx_total", 1) assertCounterEqual(counters, "upstream_cx_tx_bytes_total", - 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1500) + 1400 if https_test_server_fixture.ip_version == IpVersion.IPV6 else 1450) assertCounterEqual(counters, "upstream_rq_pending_total", 1) assertCounterEqual(counters, "upstream_rq_total", 25) assertCounterEqual(counters, "ssl.ciphers.ECDHE-RSA-AES128-GCM-SHA256", 1) @@ -236,6 +238,8 @@ def test_https_h1(https_test_server_fixture): "http.ingress_http.downstream_rq_2xx"), 25) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2(https_test_server_fixture): """ Runs the CLI configured to use HTTP/2 (using https) against our test server, and sanity @@ -265,6 +269,8 @@ def test_https_h2(https_test_server_fixture): assertEqual(len(counters), 17) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_multiple_connections(https_test_server_fixture): """ Test that the experimental h2 pool uses multiple connections. @@ -317,6 +323,8 @@ def _do_tls_configuration_test(https_test_server_fixture, cli_parameter, use_h2) assertCounterGreaterEqual(counters, "ssl.ciphers.%s" % cipher, 1) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_tls_context_configuration(https_test_server_fixture): """ Verifies specifying tls cipher suites works with the h1 pool @@ -324,6 +332,8 @@ def test_https_h1_tls_context_configuration(https_test_server_fixture): _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=False) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h1_transport_socket_configuration(https_test_server_fixture): """ Verifies specifying tls cipher suites via transport socket works with the h1 pool @@ -332,6 +342,8 @@ def test_https_h1_transport_socket_configuration(https_test_server_fixture): _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=False) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_tls_context_configuration(https_test_server_fixture): """ Verifies specifying tls cipher suites works with the h2 pool @@ -339,6 +351,8 @@ def test_https_h2_tls_context_configuration(https_test_server_fixture): _do_tls_configuration_test(https_test_server_fixture, "--tls-context", use_h2=True) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_h2_transport_socket_configuration(https_test_server_fixture): """ Verifies specifying tls cipher suites via transport socket works with the h2 pool @@ -346,6 +360,8 @@ def test_https_h2_transport_socket_configuration(https_test_server_fixture): _do_tls_configuration_test(https_test_server_fixture, "--transport-socket", use_h2=True) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_prefetching(https_test_server_fixture): """ Test we prefetch connections. We test for 1 second at 1 rps, which should @@ -360,6 +376,8 @@ def test_https_prefetching(https_test_server_fixture): assertCounterEqual(counters, "upstream_cx_http1_total", 50) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_https_log_verbosity(https_test_server_fixture): """ Test that that the specified log verbosity level is respected. @@ -515,6 +533,8 @@ def test_multiple_backends_http_h1(multi_http_test_server_fixture): assertBetweenInclusive(total_2xx, 24, 25) +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/nighthawk_https_origin.yaml"]) def test_multiple_backends_https_h1(multi_https_test_server_fixture): """ Runs the CLI configured to use HTTP/1 with TLS against multiple test servers, and sanity @@ -548,74 +568,79 @@ def test_multiple_backends_https_h1(multi_https_test_server_fixture): assertBetweenInclusive(total_2xx, 24, 25) -def test_https_h1_sni(sni_test_server_fixture): +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/sni_origin.yaml"]) +def test_https_h1_sni(https_test_server_fixture): """ Tests SNI indication works on https/h1 """ # Verify success when we set the right host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ - sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com" ]) - counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) assertCounterGreaterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) - parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"], + parsed_json, _ = https_test_server_fixture.runNighthawkClient( + [https_test_server_fixture.getTestServerRootUri(), "--rps", "20", "--duration", "100"], expect_failure=True) # Verify success when we use plain http and don't request the sni host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ - sni_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps", "100", - "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2" + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri().replace("https://", "http://"), "--rps", + "100", "--duration", "20", "--termination-predicate", "benchmark.http_2xx:2" ], - expect_failure=False) + expect_failure=False) - counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http1_total", 1) assertNotIn("ssl.handshake", counters) -def test_https_h2_sni(sni_test_server_fixture): +@pytest.mark.parametrize('server_config', + ["nighthawk/test/integration/configurations/sni_origin.yaml"]) +def test_https_h2_sni(https_test_server_fixture): """ Tests SNI indication works on https/h1 """ # Verify success when we set the right host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ - sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", ":authority: sni.com", "--h2" ]) - counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) assertCounterEqual(counters, "ssl.handshake", 1) # Verify success when we set the right host - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ - sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--termination-predicate", "benchmark.http_2xx:2", "--request-header", "host: sni.com", "--h2" ]) - counters = sni_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) + counters = https_test_server_fixture.getNighthawkCounterMapFromJson(parsed_json) assertCounterGreaterEqual(counters, "benchmark.http_2xx", 1) assertCounterGreaterEqual(counters, "upstream_cx_http2_total", 1) assertCounterEqual(counters, "ssl.handshake", 1) # Verify failure when we set no host (will get plain http) - parsed_json, _ = sni_test_server_fixture.runNighthawkClient( - [sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2"], - expect_failure=True) + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2" + ], + expect_failure=True) # Verify failure when we provide both host and :authority: (will get plain http) - parsed_json, _ = sni_test_server_fixture.runNighthawkClient([ - sni_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2", + parsed_json, _ = https_test_server_fixture.runNighthawkClient([ + https_test_server_fixture.getTestServerRootUri(), "--rps", "100", "--duration", "100", "--h2", "--request-header", "host: sni.com", "--request-header", ":authority: sni.com" ], - expect_failure=True) + expect_failure=True) @pytest.fixture(scope="function", params=[1, 25]) diff --git a/test/integration/test_integration_zipkin.py b/test/integration/test_integration_zipkin.py index 347aec714..420861507 100644 --- a/test/integration/test_integration_zipkin.py +++ b/test/integration/test_integration_zipkin.py @@ -1,7 +1,9 @@ #!/usr/bin/env python3 import pytest -from integration_test_fixtures import (http_test_server_fixture) +# server_config needs to be explicitly imported to avoid an error, as http_test_server_fixture +# relies on it. +from integration_test_fixtures import (http_test_server_fixture, server_config) from utility import * diff --git a/test/integration/test_remote_execution.py b/test/integration/test_remote_execution.py index a61208020..b4937a9c7 100644 --- a/test/integration/test_remote_execution.py +++ b/test/integration/test_remote_execution.py @@ -2,7 +2,7 @@ import pytest -from test.integration.integration_test_fixtures import http_test_server_fixture +from test.integration.integration_test_fixtures import (http_test_server_fixture, server_config) from test.integration.utility import * diff --git a/tools/format_python_tools.sh b/tools/format_python_tools.sh index c58dccfb7..337a178cf 100755 --- a/tools/format_python_tools.sh +++ b/tools/format_python_tools.sh @@ -15,4 +15,11 @@ echo "Running Python format check..." python format_python_tools.py $1 echo "Running Python3 flake8 check..." -flake8 . --exclude=*/venv/* --count --select=E901,E999,F821,F822,F823 --show-source --statistics +EXCLUDE="--exclude=../benchmarks/tmp/*,*/venv/*" +flake8 . ${EXCLUDE} --count --select=E901,E999,F821,F822,F823 --show-source --statistics +# We raise the bar higher for benchmarks/ overall, but especially when it comes to docstrings. +# Check everything, except indentation and line length for now. +# Also, we ignore unused imports and redefinitions of unused, as those seems to raise false flags in test definitions. +flake8 ../benchmarks/ ${EXCLUDE} --docstring-convention pep257 --ignore=E114,E111,E501,F401,F811 --count --show-source --statistics +# Additional docstring checking based on Google's convention. +flake8 ../benchmarks/ ${EXCLUDE} --docstring-convention google --select=D --count --show-source --statistics diff --git a/tools/requirements.txt b/tools/requirements.txt index 8c4405010..c053a08de 100644 --- a/tools/requirements.txt +++ b/tools/requirements.txt @@ -1,2 +1,3 @@ flake8==3.6.0 -yapf==0.25.0 \ No newline at end of file +yapf==0.25.0 +flake8-docstrings==1.5.0