From 9d71e69b25b06c4b07d395c996c7b85e02e2fd37 Mon Sep 17 00:00:00 2001 From: phlax Date: Mon, 19 Jul 2021 06:33:36 +0100 Subject: [PATCH 01/57] tooling: Improve runner/checker log/error handling (#17388) Signed-off-by: Ryan Northey --- .github/dependabot.yml | 5 + bazel/repositories_extra.bzl | 5 + tools/base/BUILD | 11 +- tools/base/checker.py | 110 +++++-- tools/base/requirements.txt | 28 ++ tools/base/runner.py | 55 +++- tools/base/tests/test_checker.py | 307 +++++++++++++++---- tools/base/tests/test_runner.py | 74 +++-- tools/code_format/python_check.py | 6 +- tools/code_format/tests/test_python_check.py | 6 +- tools/dependency/pip_check.py | 11 +- tools/dependency/tests/test_pip_check.py | 13 +- tools/testing/all_pytests.py | 4 +- tools/testing/tests/test_all_pytests.py | 14 +- 14 files changed, 498 insertions(+), 151 deletions(-) create mode 100644 tools/base/requirements.txt diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d750b52010f8d..758bb344b9774 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -21,6 +21,11 @@ updates: schedule: interval: "daily" +- package-ecosystem: "pip" + directory: "/tools/base" + schedule: + interval: "daily" + - package-ecosystem: "pip" directory: "/tools/docs" schedule: diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index e7c34c4951cb1..93d442f7f5859 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -7,6 +7,11 @@ load("//bazel/external/cargo:crates.bzl", "raze_fetch_remote_crates") def _python_deps(): py_repositories() + pip_install( + name = "base_pip3", + requirements = "@envoy//tools/base:requirements.txt", + extra_pip_args = ["--require-hashes"], + ) pip_install( name = "config_validation_pip3", requirements = "@envoy//tools/config_validation:requirements.txt", diff --git a/tools/base/BUILD b/tools/base/BUILD index d38233b15eea3..a6a9934c7b83f 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -1,3 +1,4 @@ +load("@base_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_library") @@ -5,7 +6,15 @@ licenses(["notice"]) # Apache 2 envoy_package() -envoy_py_library("tools.base.runner") +envoy_py_library( + "tools.base.runner", + deps = [ + requirement("colorama"), + requirement("coloredlogs"), + requirement("frozendict"), + requirement("verboselogs"), + ], +) envoy_py_library("tools.base.utils") diff --git a/tools/base/checker.py b/tools/base/checker.py index aae4fc00a29d1..bcde297ba6f42 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -69,7 +69,9 @@ def paths(self) -> list: @property def show_summary(self) -> bool: """Show a summary at the end or not""" - return bool(self.args.summary or self.error_count or self.warning_count) + return bool( + not "exiting" in self.errors + and (self.args.summary or self.error_count or self.warning_count)) @property def status(self) -> dict: @@ -163,23 +165,38 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: "Paths to check. At least one path must be specified, or the `path` argument should be provided" ) - def error(self, name: str, errors: list, log: bool = True) -> int: + def error(self, name: str, errors: list, log: bool = True, log_type: str = "error") -> int: """Record (and log) errors for a check type""" self.errors[name] = self.errors.get(name, []) self.errors[name].extend(errors) - if log: - self.log.error("\n".join(errors)) + if not log: + return 1 + for message in errors: + getattr(self.log, log_type)(f"[{name}] {message}") return 1 + def exiting(self): + return self.error("exiting", ["Keyboard exit"], log_type="fatal") + def get_checks(self) -> Sequence[str]: """Get list of checks for this checker class filtered according to user args""" return ( self.checks if not self.args.check else [check for check in self.args.check if check in self.checks]) + def on_check_begin(self, check: str) -> None: + self.log.notice(f"[{check}] Running check") + def on_check_run(self, check: str) -> None: """Callback hook called after each check run""" - pass + if check in self.errors: + self.log.error(f"[{check}] Check failed") + elif "exiting" in self.errors: + pass + elif check in self.warnings: + self.log.warning(f"[{check}] Check has warnings") + else: + self.log.success(f"[{check}] Check completed successfully") def on_checks_begin(self) -> None: """Callback hook called before all checks""" @@ -194,28 +211,35 @@ def on_checks_complete(self) -> int: def run(self) -> int: """Run all configured checks and return the sum of their error counts""" checks = self.get_checks() - self.on_checks_begin() - for check in checks: - self.log.info(f"[CHECKS:{self.name}] {check}") - getattr(self, f"check_{check}")() - self.on_check_run(check) - return self.on_checks_complete() + try: + self.on_checks_begin() + for check in checks: + self.on_check_begin(check) + getattr(self, f"check_{check}")() + self.on_check_run(check) + except KeyboardInterrupt as e: + self.exiting() + finally: + result = self.on_checks_complete() + return result def succeed(self, name: str, success: list, log: bool = True) -> None: """Record (and log) success for a check type""" - self.success[name] = self.success.get(name, []) self.success[name].extend(success) - if log: - self.log.info("\n".join(success)) + if not log: + return + for message in success: + self.log.success(f"[{name}] {message}") def warn(self, name: str, warnings: list, log: bool = True) -> None: """Record (and log) warnings for a check type""" - self.warnings[name] = self.warnings.get(name, []) self.warnings[name].extend(warnings) - if log: - self.log.warning("\n".join(warnings)) + if not log: + return + for message in warnings: + self.log.warning(f"[{name}] {message}") class ForkingChecker(runner.ForkingRunner, Checker): @@ -245,19 +269,26 @@ def print_failed(self, problem_type): _out = [] _max = getattr(self, f"max_{problem_type}") for check, problems in getattr(self.checker, problem_type).items(): - _msg = f"[{problem_type.upper()}:{self.checker.name}] {check}" + _msg = f"{self.checker.name} {check}" _max = (min(len(problems), _max) if _max >= 0 else len(problems)) msg = ( f"{_msg}: (showing first {_max} of {len(problems)})" if (len(problems) > _max and _max > 0) else (f"{_msg}:" if _max != 0 else _msg)) _out.extend(self._section(msg, problems[:_max])) - if _out: - self.checker.log.error("\n".join(_out)) + if not _out: + return + output = ( + self.checker.log.warning if problem_type == "warnings" else self.checker.log.error) + output("\n".join(_out + [""])) def print_status(self) -> None: """Print summary status to stderr""" - self.checker.log.warning( - "\n".join(self._section(f"[SUMMARY:{self.checker.name}] {self.checker.status}"))) + if self.checker.errors: + self.checker.log.error(f"{self.checker.status}") + elif self.checker.warnings: + self.checker.log.warning(f"{self.checker.status}") + else: + self.checker.log.info(f"{self.checker.status}") def print_summary(self) -> None: """Write summary to stderr""" @@ -267,9 +298,9 @@ def print_summary(self) -> None: def _section(self, message: str, lines: list = None) -> list: """Print a summary section""" - section = ["", "-" * 80, "", f"{message}"] + section = ["Summary", "-" * 80, f"{message}"] if lines: - section += lines + section += [line.split("\n")[0] for line in lines] return section @@ -278,21 +309,34 @@ class AsyncChecker(Checker): async def _run(self) -> int: checks = self.get_checks() - await self.on_checks_begin() - for check in checks: - self.log.info(f"[CHECKS:{self.name}] {check}") - await getattr(self, f"check_{check}")() - await self.on_check_run(check) - return await self.on_checks_complete() + try: + await self.on_checks_begin() + for check in checks: + await self.on_check_begin(check) + await getattr(self, f"check_{check}")() + await self.on_check_run(check) + finally: + result = await self.on_checks_complete() + return result def run(self) -> int: - return asyncio.get_event_loop().run_until_complete(self._run()) + try: + return asyncio.get_event_loop().run_until_complete(self._run()) + except KeyboardInterrupt as e: + # This needs to be outside the loop to catch the a keyboard interrupt + # This means that a new loop has to be created to cleanup + result = self.exiting() + result = asyncio.get_event_loop().run_until_complete(self.on_checks_complete()) + return result + + async def on_check_begin(self, check: str) -> None: + super().on_check_begin(check) async def on_check_run(self, check: str) -> None: - pass + super().on_check_run(check) async def on_checks_begin(self) -> None: - pass + super().on_checks_begin() async def on_checks_complete(self) -> int: return super().on_checks_complete() diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt new file mode 100644 index 0000000000000..f6de56917c002 --- /dev/null +++ b/tools/base/requirements.txt @@ -0,0 +1,28 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --generate-hashes tools/base/requirements.txt +# +colorama==0.4.4 \ + --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ + --hash=sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2 + # via -r tools/base/requirements.txt +coloredlogs==15.0.1 \ + --hash=sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934 \ + --hash=sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0 + # via -r tools/base/requirements.txt +frozendict==2.0.3 \ + --hash=sha256:163c616188beb97fdc8ef6e73ec2ebd70a844d4cf19d2e383aa94d1b8376653d \ + --hash=sha256:58143e2d3d11699bc295d9e7e05f10dde99a727e2295d7f43542ecdc42c5ec70 + # via -r tools/base/requirements.txt +humanfriendly==9.2 \ + --hash=sha256:332da98c24cc150efcc91b5508b19115209272bfdf4b0764a56795932f854271 \ + --hash=sha256:f7dba53ac7935fd0b4a2fc9a29e316ddd9ea135fb3052d3d0279d10c18ff9c48 + # via + # -r tools/base/requirements.txt + # coloredlogs +verboselogs==1.7 \ + --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ + --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 + # via -r tools/base/requirements.txt diff --git a/tools/base/runner.py b/tools/base/runner.py index 58d542e02b5b1..300b9ca647a8b 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -10,8 +10,26 @@ from functools import cached_property, wraps from typing import Callable, Tuple, Optional, Union +from frozendict import frozendict + +import coloredlogs +import verboselogs + LOG_LEVELS = (("debug", logging.DEBUG), ("info", logging.INFO), ("warn", logging.WARN), ("error", logging.ERROR)) +LOG_FIELD_STYLES = frozendict( + name=frozendict(color="blue"), levelname=frozendict(color="cyan", bold=True)) +LOG_FMT = "%(name)s %(levelname)s %(message)s" +LOG_LEVEL_STYLES = frozendict( + critical=frozendict(bold=True, color="red"), + debug=frozendict(color="green"), + error=frozendict(color="red"), + info=frozendict(), + notice=frozendict(color="magenta", bold=True), + spam=frozendict(color="green", faint=True), + success=frozendict(bold=True, color="green"), + verbose=frozendict(color="blue"), + warning=frozendict(color="yellow")) def catches(errors: Union[Tuple[Exception], Exception]) -> Callable: @@ -72,18 +90,31 @@ def extra_args(self) -> list: """Unparsed args""" return self.parser.parse_known_args(self._args)[1] + @property + def log_field_styles(self): + return LOG_FIELD_STYLES + + @property + def log_fmt(self): + return LOG_FMT + + @property + def log_level_styles(self): + return LOG_LEVEL_STYLES + @cached_property def log(self) -> logging.Logger: """Instantiated logger""" + verboselogs.install() logger = logging.getLogger(self.name) logger.setLevel(self.log_level) - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setLevel(logging.DEBUG) - stdout_handler.addFilter(LogFilter()) - stderr_handler = logging.StreamHandler(sys.stderr) - stderr_handler.setLevel(logging.WARN) - logger.addHandler(stdout_handler) - logger.addHandler(stderr_handler) + coloredlogs.install( + field_styles=self.log_field_styles, + level_styles=self.log_level_styles, + fmt=self.log_fmt, + level='DEBUG', + logger=logger, + isatty=True) return logger @cached_property @@ -107,6 +138,16 @@ def parser(self) -> argparse.ArgumentParser: def path(self) -> str: return os.getcwd() + @cached_property + def stdout(self) -> logging.Logger: + """Log to stdout""" + logger = logging.getLogger("stdout") + logger.setLevel(self.log_level) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter("%(message)s")) + logger.addHandler(handler) + return logger + def add_arguments(self, parser: argparse.ArgumentParser) -> None: """Override this method to add custom arguments to the arg parser""" parser.add_argument( diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 696fbb024da19..9afee0a858f01 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -335,8 +335,9 @@ def test_checker_add_arguments(patches): @pytest.mark.parametrize("log", [True, False]) +@pytest.mark.parametrize("log_type", [None, "fatal"]) @pytest.mark.parametrize("errors", TEST_ERRORS) -def test_checker_error(patches, log, errors): +def test_checker_error(log, log_type, errors): checker = Checker("path1", "path2", "path3") log_mock = patch( "tools.base.checker.Checker.log", @@ -344,7 +345,10 @@ def test_checker_error(patches, log, errors): checker.errors = errors.copy() with log_mock as m_log: - assert checker.error("mycheck", ["err1", "err2", "err3"], log) == 1 + if log_type: + assert checker.error("mycheck", ["err1", "err2", "err3"], log, log_type=log_type) == 1 + else: + assert checker.error("mycheck", ["err1", "err2", "err3"], log) == 1 assert checker.errors["mycheck"] == errors.get("mycheck", []) + ["err1", "err2", "err3"] for k, v in errors.items(): @@ -352,10 +356,24 @@ def test_checker_error(patches, log, errors): assert checker.errors[k] == v if log: assert ( - list(m_log.return_value.error.call_args) - == [('err1\nerr2\nerr3',), {}]) + list(list(c) for c in getattr(m_log.return_value, log_type or "error").call_args_list) + == [[(f'[mycheck] err{i}',), {}] for i in range(1, 4)]) else: - assert not m_log.return_value.error.called + assert not getattr(m_log.return_value, log_type or "error").called + + +def test_checker_exiting(patches): + checker = Checker("path1", "path2", "path3") + patched = patches( + "Checker.error", + prefix="tools.base.checker") + + with patched as (m_error, ): + assert checker.exiting() == m_error.return_value + + assert ( + list(m_error.call_args) + == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) TEST_CHECKS = ( @@ -385,9 +403,56 @@ def test_checker_get_checks(checks): assert checker.get_checks() == checker.checks -def test_checker_on_check_run(): +def test_checker_on_check_begin(patches): checker = Checker("path1", "path2", "path3") - assert not checker.on_check_run("checkname") + patched = patches( + ("Checker.log", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + with patched as (m_log, ): + assert not checker.on_check_begin("checkname") + + assert ( + list(m_log.return_value.notice.call_args) + == [('[checkname] Running check',), {}]) + + +@pytest.mark.parametrize("errors", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) +@pytest.mark.parametrize("warnings", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) +def test_checker_on_check_run(patches, errors, warnings): + checker = Checker("path1", "path2", "path3") + patched = patches( + ("Checker.log", dict(new_callable=PropertyMock)), + prefix="tools.base.checker") + + check = "CHECK1" + checker.errors = errors + checker.warnings = warnings + + with patched as (m_log, ): + assert not checker.on_check_run(check) + + if check in errors: + assert ( + list(m_log.return_value.error.call_args) + == [('[CHECK1] Check failed',), {}]) + assert not m_log.return_value.warning.called + assert not m_log.return_value.success.called + return + + if check in warnings: + assert ( + list(m_log.return_value.warning.call_args) + == [('[CHECK1] Check has warnings',), {}]) + assert not m_log.return_value.error.called + assert not m_log.return_value.info.called + return + + assert ( + list(m_log.return_value.success.call_args) + == [(f'[{check}] Check completed successfully',), {}]) + assert not m_log.return_value.warning.called + assert not m_log.return_value.error.called def test_checker_on_checks_begin(): @@ -418,33 +483,62 @@ def test_checker_on_checks_complete(patches, failed, show_summary): assert not m_summary.return_value.print_summary.called -def test_checker_run(patches): +@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) +def test_checker_run(patches, raises): checker = DummyCheckerWithChecks("path1", "path2", "path3") patched = patches( + "Checker.exiting", "Checker.get_checks", + "Checker.on_check_begin", + "Checker.on_check_run", "Checker.on_checks_begin", "Checker.on_checks_complete", ("Checker.log", dict(new_callable=PropertyMock)), ("Checker.name", dict(new_callable=PropertyMock)), prefix="tools.base.checker") - with patched as (m_get, m_begin, m_complete, m_log, m_name): + with patched as (m_exit, m_get, m_check, m_run, m_begin, m_complete, m_log, m_name): m_get.return_value = ("check1", "check2") - assert checker.run() == m_complete.return_value - assert ( - list(m_get.call_args) - == [(), {}]) + if raises: + m_begin.side_effect = raises() + + if raises == KeyboardInterrupt: + result = checker.run() + + else: + with pytest.raises(raises): + checker.run() + else: + assert checker.run() == m_complete.return_value + assert ( list(m_begin.call_args) == [(), {}]) assert ( list(m_complete.call_args) == [(), {}]) + + if raises == KeyboardInterrupt: + assert ( + list(m_exit.call_args) + == [(), {}]) + return + + assert not m_exit.called + + if raises: + return + + assert ( + list(m_get.call_args) + == [(), {}]) assert ( - list(list(c) for c in m_log.return_value.info.call_args_list) - == [[(f"[CHECKS:{m_name.return_value}] check1",), {}], - [(f"[CHECKS:{m_name.return_value}] check2",), {}]]) + list(list(c) for c in m_check.call_args_list) + == [[(f'check{i}',), {}] for i in range(1, 3)]) + assert ( + list(list(c) for c in m_run.call_args_list) + == [[(f'check{i}',), {}] for i in range(1, 3)]) assert ( list(checker.check1.call_args) == [(), {}]) @@ -479,8 +573,8 @@ def test_checker_warn(patches, log, warns): assert checker.warnings[k] == v if log: assert ( - list(m_log.return_value.warning.call_args) - == [('warn1\nwarn2\nwarn3',), {}]) + list(list(c) for c in m_log.return_value.warning.call_args_list) + == [[(f'[mycheck] warn{i}',), {}] for i in range(1, 4)]) else: assert not m_log.return_value.warn.called @@ -511,10 +605,10 @@ def test_checker_succeed(patches, log, success): assert checker.success[k] == v if log: assert ( - list(m_log.return_value.info.call_args) - == [('success1\nsuccess2\nsuccess3',), {}]) + list(list(c) for c in m_log.return_value.success.call_args_list) + == [[(f'[mycheck] success{i}',), {}] for i in range(1, 4)]) else: - assert not m_log.return_value.info.called + assert not m_log.return_value.success.called # CheckerSummary tests @@ -569,32 +663,46 @@ def test_checker_summary_section(section): summary = CheckerSummary(checker) message, lines = section expected = [ - "", + "Summary", "-" * 80, - "", f"{message}"] if lines: expected += lines assert summary._section(message, lines) == expected -def test_checker_summary_print_status(patches): +@pytest.mark.parametrize("errors", (True, False)) +@pytest.mark.parametrize("warnings", (True, False)) +def test_checker_summary_print_status(patches, errors, warnings): checker = DummyChecker() summary = CheckerSummary(checker) - patched = patches( - "CheckerSummary._section", - prefix="tools.base.checker") - summary.checker = MagicMock() - with patched as (m_section, ): - m_section.return_value = ["A", "B", "C"] - summary.print_status() - assert ( - list(m_section.call_args) - == [(f"[SUMMARY:{summary.checker.name}] {summary.checker.status}",), {}]) + summary.checker.errors = errors + summary.checker.warnings = warnings + + assert not summary.print_status() + + if errors: + assert ( + list(summary.checker.log.error.call_args) + == [(f"{summary.checker.status}",), {}]) + assert not summary.checker.log.warning.called + assert not summary.checker.log.info.called + return + + if warnings: + assert ( + list(summary.checker.log.warning.call_args) + == [(f"{summary.checker.status}",), {}]) + assert not summary.checker.log.error.called + assert not summary.checker.log.info.called + return + assert ( - list(summary.checker.log.warning.call_args) - == [('A\nB\nC',), {}]) + list(summary.checker.log.info.call_args) + == [(f"{summary.checker.status}",), {}]) + assert not summary.checker.log.error.called + assert not summary.checker.log.warning.called @pytest.mark.parametrize("problem_type", ("errors", "warnings")) @@ -619,12 +727,17 @@ def test_checker_summary_print_failed(patches, problem_type, max_display, proble assert not summary.checker.log.error.called assert not m_section.called return + + output = ( + summary.checker.log.warning if problem_type == "warnings" else summary.checker.log.error) + assert ( - list(summary.checker.log.error.call_args) - == [("\n".join(['A\nB\nC'] * len(problems)),), {}]) + list(output.call_args) + == [("".join(['A\nB\nC\n'] * len(problems)),), {}]) + if max_display == 0: expected = [ - [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}", []), {}] + [(f"{summary.checker.name} {prob}", []), {}] for prob in problems] else: def _problems(prob): @@ -640,7 +753,7 @@ def _extra(prob): if max_display != 0 else "")) expected = [ - [(f"[{problem_type.upper()}:{summary.checker.name}] {prob}{_extra(prob)}", _problems(prob)), {}] + [(f"{summary.checker.name} {prob}{_extra(prob)}", _problems(prob)), {}] for prob in problems] assert ( list(list(c) for c in m_section.call_args_list) @@ -670,19 +783,49 @@ def test_asynchecker_constructor(): assert isinstance(checker, Checker) -def test_asynchecker_run(patches): +@pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) +def test_asynchecker_run(patches, raises): checker = AsyncChecker() patched = patches( "asyncio", + "Checker.exiting", ("AsyncChecker._run", dict(new_callable=MagicMock)), + ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), prefix="tools.base.checker") - with patched as (m_async, m_run): + with patched as (m_async, m_exit, m_run, m_complete): + if raises: + m_run.side_effect = raises + + if raises == KeyboardInterrupt: + result = checker.run() + else: + with pytest.raises(raises): + checker.run() + return + else: + assert ( + checker.run() + == m_async.get_event_loop.return_value.run_until_complete.return_value) + + if raises == KeyboardInterrupt: assert ( - checker.run() - == m_async.get_event_loop.return_value.run_until_complete.return_value) + list(m_exit.call_args) + == [(), {}]) + assert ( + list(m_async.get_event_loop.call_args_list[1]) + == [(), {}]) + assert ( + list(m_async.get_event_loop.return_value.run_until_complete.call_args) + == [(m_complete.return_value,), {}]) + assert ( + list(m_complete.call_args) + == [(), {}]) + assert result == m_async.get_event_loop.return_value.run_until_complete.return_value + return + assert not m_exit.called assert ( list(m_async.get_event_loop.call_args) == [(), {}]) @@ -695,17 +838,48 @@ def test_asynchecker_run(patches): @pytest.mark.asyncio -async def test_asynchecker_on_check_run(): +async def test_asynchecker_on_check_begin(patches): checker = AsyncChecker() + patched = patches( + "Checker.on_check_begin", + prefix="tools.base.checker") - assert not await checker.on_check_run("CHECKNAME") + with patched as (m_super, ): + assert not await checker.on_check_begin("CHECKNAME") + + assert ( + list(m_super.call_args) + == [('CHECKNAME',), {}]) @pytest.mark.asyncio -async def test_asynchecker_on_checks_begin(): +async def test_asynchecker_on_check_run(patches): checker = AsyncChecker() + patched = patches( + "Checker.on_check_run", + prefix="tools.base.checker") - assert not await checker.on_checks_begin() + with patched as (m_super, ): + assert not await checker.on_check_run("CHECKNAME") + + assert ( + list(m_super.call_args) + == [('CHECKNAME',), {}]) + + +@pytest.mark.asyncio +async def test_asynchecker_on_checks_begin(patches): + checker = AsyncChecker() + patched = patches( + "Checker.on_checks_begin", + prefix="tools.base.checker") + + with patched as (m_super, ): + assert not await checker.on_checks_begin() + + assert ( + list(m_super.call_args) + == [(), {}]) @pytest.mark.asyncio @@ -727,7 +901,8 @@ async def test_asynchecker_on_checks_complete(patches): @pytest.mark.asyncio -async def test_asynchecker__run(patches): +@pytest.mark.parametrize("raises", [True, False]) +async def test_asynchecker__run(patches, raises): _check1 = MagicMock() _check2 = MagicMock() _check3 = MagicMock() @@ -743,31 +918,46 @@ async def check_check2(self): async def check_check3(self): return _check3() + class SomeError(Exception): + pass + checker = AsyncCheckerWithChecks() patched = patches( "Checker.log", "Checker.get_checks", "AsyncChecker.on_checks_begin", + "AsyncChecker.on_check_begin", "AsyncChecker.on_check_run", "AsyncChecker.on_checks_complete", prefix="tools.base.checker") - with patched as (m_log, m_checks, m_begin, m_run, m_complete): + with patched as (m_log, m_checks, m_begin, m_check, m_run, m_complete): m_checks.return_value = ["check1", "check2", "check3"] - assert await checker._run() == m_complete.return_value + if raises: + m_begin.side_effect = SomeError("AN ERROR OCCURRED") + + with pytest.raises(SomeError): + await checker._run() + else: + assert await checker._run() == m_complete.return_value assert ( - list(m_checks.call_args) + list(m_begin.call_args) == [(), {}]) assert ( - list(m_begin.call_args) + list(m_complete.call_args) + == [(), {}]) + + if raises: + return + + assert ( + list(m_checks.call_args) == [(), {}]) assert ( - list(list(c) for c in m_log.info.call_args_list) - == [[('[CHECKS:AsyncCheckerWithChecks] check1',), {}], - [('[CHECKS:AsyncCheckerWithChecks] check2',), {}], - [('[CHECKS:AsyncCheckerWithChecks] check3',), {}]]) + list(list(c) for c in m_check.call_args_list) + == [[(f'check{i}',), {}] for i in range(1, 4)]) for check in [_check1, _check2, _check3]: assert ( list(check.call_args) @@ -775,6 +965,3 @@ async def check_check3(self): assert ( list(list(c) for c in m_run.call_args_list) == [[('check1',), {}], [('check2',), {}], [('check3',), {}]]) - assert ( - list(m_complete.call_args) - == [(), {}]) diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py index c9d3018f1e6a8..a1e4e4fd434e8 100644 --- a/tools/base/tests/test_runner.py +++ b/tools/base/tests/test_runner.py @@ -111,6 +111,9 @@ def test_catches(errors, raises, args, kwargs): def test_runner_constructor(): run = runner.Runner("path1", "path2", "path3") assert run._args == ("path1", "path2", "path3") + assert run.log_field_styles == runner.LOG_FIELD_STYLES + assert run.log_level_styles == runner.LOG_LEVEL_STYLES + assert run.log_fmt == runner.LOG_FMT def test_runner_args(): @@ -153,35 +156,39 @@ def test_runner_log(patches): run = runner.Runner("path1", "path2", "path3") patched = patches( "logging.getLogger", - "logging.StreamHandler", "LogFilter", + "coloredlogs", + "verboselogs", ("Runner.log_level", dict(new_callable=PropertyMock)), + ("Runner.log_level_styles", dict(new_callable=PropertyMock)), + ("Runner.log_field_styles", dict(new_callable=PropertyMock)), + ("Runner.log_fmt", dict(new_callable=PropertyMock)), ("Runner.name", dict(new_callable=PropertyMock)), prefix="tools.base.runner") - with patched as (m_logger, m_stream, m_filter, m_level, m_name): - loggers = (MagicMock(), MagicMock()) - m_stream.side_effect = loggers + with patched as patchy: + (m_logger, m_filter, m_color, m_verb, + m_level, m_lstyle, m_fstyle, m_fmt, m_name) = patchy assert run.log == m_logger.return_value + + assert ( + list(m_verb.install.call_args) + == [(), {}]) assert ( list(m_logger.return_value.setLevel.call_args) == [(m_level.return_value,), {}]) assert ( - list(list(c) for c in m_stream.call_args_list) - == [[(sys.stdout,), {}], - [(sys.stderr,), {}]]) - assert ( - list(loggers[0].setLevel.call_args) - == [(logging.DEBUG,), {}]) - assert ( - list(loggers[0].addFilter.call_args) - == [(m_filter.return_value,), {}]) - assert ( - list(loggers[1].setLevel.call_args) - == [(logging.WARN,), {}]) + list(m_logger.return_value.setLevel.call_args) + == [(m_level.return_value,), {}]) assert ( - list(list(c) for c in m_logger.return_value.addHandler.call_args_list) - == [[(loggers[0],), {}], [(loggers[1],), {}]]) + list(m_color.install.call_args) + == [(), + {'fmt': m_fmt.return_value, + 'isatty': True, + 'field_styles': m_fstyle.return_value, + 'level': 'DEBUG', + 'level_styles': m_lstyle.return_value, + 'logger': m_logger.return_value}]) assert "log" in run.__dict__ @@ -239,6 +246,37 @@ def test_checker_path(): == [(), {}]) +def test_checker_stdout(patches): + run = runner.Runner("path1", "path2", "path3") + + patched = patches( + "logging", + ("Runner.log_level", dict(new_callable=PropertyMock)), + prefix="tools.base.runner") + + with patched as (m_log, m_level): + assert run.stdout == m_log.getLogger.return_value + + assert ( + list(m_log.getLogger.call_args) + == [('stdout',), {}]) + assert ( + list(m_log.getLogger.return_value.setLevel.call_args) + == [(m_level.return_value,), {}]) + assert ( + list(m_log.StreamHandler.call_args) + == [(sys.stdout,), {}]) + assert ( + list(m_log.Formatter.call_args) + == [('%(message)s',), {}]) + assert ( + list(m_log.StreamHandler.return_value.setFormatter.call_args) + == [(m_log.Formatter.return_value,), {}]) + assert ( + list(m_log.getLogger.return_value.addHandler.call_args) + == [(m_log.StreamHandler.return_value,), {}]) + + def test_runner_add_arguments(): run = runner.Runner("path1", "path2", "path3") parser = MagicMock() diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index 512576bfec7cb..64f1e296f9560 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -94,7 +94,7 @@ def check_yapf(self) -> None: def on_check_run(self, check: str) -> None: if check not in self.failed and check not in self.warned: - self.succeed(check, [f"[CHECKS:{self.name}] {check}: success"]) + self.succeed(check, [check]) def on_checks_complete(self) -> int: if self.diff_file_path and self.has_failed: @@ -113,11 +113,11 @@ def yapf_format(self, python_file: str) -> tuple: def yapf_run(self, python_file: str) -> None: reformatted_source, encoding, changed = self.yapf_format(python_file) if not changed: - return self.succeed("yapf", [f"{python_file}: success"]) + return self.succeed("yapf", [python_file]) if self.fix: return self.warn("yapf", [f"{python_file}: reformatted"]) if reformatted_source: - return self.warn("yapf", [reformatted_source]) + return self.warn("yapf", [f"{python_file}: diff\n{reformatted_source}"]) self.error("yapf", [python_file]) def _strip_line(self, line) -> str: diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py index 0241c32fa45eb..ab22c8f9089e6 100644 --- a/tools/code_format/tests/test_python_check.py +++ b/tools/code_format/tests/test_python_check.py @@ -223,7 +223,7 @@ def test_python_on_check_run(patches, results): else: assert ( list(m_succeed.call_args) - == [(checkname, [f"[CHECKS:{m_name.return_value}] {checkname}: success"]), {}]) + == [(checkname, [checkname]), {}]) TEST_CHECKS_COMPLETE = ( @@ -321,7 +321,7 @@ def test_python_yapf_run(patches, fix, format_results): if not changed: assert ( list(m_succeed.call_args) - == [('yapf', ['FILENAME: success']), {}]) + == [('yapf', ['FILENAME']), {}]) assert not m_warn.called assert not m_error.called assert not m_fix.called @@ -339,7 +339,7 @@ def test_python_yapf_run(patches, fix, format_results): assert len(m_warn.call_args_list) == 1 assert ( list(m_warn.call_args) - == [('yapf', [reformat]), {}]) + == [('yapf', [f'FILENAME: diff\n{reformat}']), {}]) return assert not m_warn.called assert ( diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index f0347e372990d..c7165778cbf12 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -80,15 +80,12 @@ def check_dependabot(self) -> None: def dependabot_success(self, correct: list) -> None: self.succeed( - "dependabot", ([ - f"Correct dependabot config for {self.requirements_filename} in dir: {dirname}" - for dirname in sorted(correct) - ])) + "dependabot", + ([f"{self.requirements_filename}: {dirname}" for dirname in sorted(correct)])) def dependabot_errors(self, missing: list, msg: str) -> None: - self.error( - "dependabot", - ([f"[ERROR:{self.name}] (dependabot) {msg}: {dirname}" for dirname in sorted(missing)])) + for dirname in sorted(missing): + self.error("dependabot", [f"{msg}: {dirname}"]) def main(*args) -> int: diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py index 0cdf07987f49e..0e659676d8965 100644 --- a/tools/dependency/tests/test_pip_check.py +++ b/tools/dependency/tests/test_pip_check.py @@ -148,10 +148,7 @@ def test_pip_checker_dependabot_success(patches): assert ( list(m_succeed.call_args) == [('dependabot', - [f'Correct dependabot config for {m_fname.return_value} in dir: A', - f'Correct dependabot config for {m_fname.return_value} in dir: B', - f'Correct dependabot config for {m_fname.return_value} in dir: C', - f'Correct dependabot config for {m_fname.return_value} in dir: D']), {}]) + [f"{m_fname.return_value}: {x}" for x in sorted(success)]), {}]) def test_pip_checker_dependabot_errors(patches): @@ -169,12 +166,8 @@ def test_pip_checker_dependabot_errors(patches): checker.dependabot_errors(errors, MSG) assert ( - list(m_error.call_args) - == [('dependabot', - [f"[ERROR:{m_name.return_value}] (dependabot) {MSG}: A", - f"[ERROR:{m_name.return_value}] (dependabot) {MSG}: B", - f"[ERROR:{m_name.return_value}] (dependabot) {MSG}: C", - f"[ERROR:{m_name.return_value}] (dependabot) {MSG}: D"]), {}]) + list(list(c) for c in list(m_error.call_args_list)) + == [[('dependabot', [f'ERROR MESSAGE: {x}']), {}] for x in sorted(errors)]) def test_pip_checker_main(): diff --git a/tools/testing/all_pytests.py b/tools/testing/all_pytests.py index 058e2227a59ce..df94b8175f871 100644 --- a/tools/testing/all_pytests.py +++ b/tools/testing/all_pytests.py @@ -55,9 +55,9 @@ def check_pytests(self) -> int: for target in self.pytest_targets: try: self.bazel.run(target, *self.pytest_bazel_args) - self.succeed("pytest", [target]) + self.succeed("pytests", [target]) except runner.BazelRunError: - self.error("pytest", [f"{target} failed"]) + self.error("pytests", [f"{target} failed"]) def on_checks_begin(self): if self.cov_path and os.path.exists(self.cov_path): diff --git a/tools/testing/tests/test_all_pytests.py b/tools/testing/tests/test_all_pytests.py index a7272dd8b0ad5..453d80247af2a 100644 --- a/tools/testing/tests/test_all_pytests.py +++ b/tools/testing/tests/test_all_pytests.py @@ -165,15 +165,15 @@ def _run_bazel(target): [('check7',), {}]]) assert ( list(list(c) for c in m_error.call_args_list) - == [[('pytest', ['check3 failed']), {}], - [('pytest', ['check4 failed']), {}], - [('pytest', ['check6 failed']), {}]]) + == [[('pytests', ['check3 failed']), {}], + [('pytests', ['check4 failed']), {}], + [('pytests', ['check6 failed']), {}]]) assert ( list(list(c) for c in m_succeed.call_args_list) - == [[('pytest', ['check1']), {}], - [('pytest', ['check2']), {}], - [('pytest', ['check5']), {}], - [('pytest', ['check7']), {}]]) + == [[('pytests', ['check1']), {}], + [('pytests', ['check2']), {}], + [('pytests', ['check5']), {}], + [('pytests', ['check7']), {}]]) @pytest.mark.parametrize("exists", [True, False]) From 364c821f85aff8a42b0bf7fc0e3e5e4b6c98c86c Mon Sep 17 00:00:00 2001 From: Manish Kumar Date: Mon, 19 Jul 2021 20:19:13 +0530 Subject: [PATCH 02/57] Added max_requests_per_connection for downstream connection. (#14936) Signed-off-by: Manish Kumar --- api/envoy/config/cluster/v3/cluster.proto | 6 ++- .../config/cluster/v4alpha/cluster.proto | 15 +++---- api/envoy/config/core/v3/protocol.proto | 8 +++- api/envoy/config/core/v4alpha/protocol.proto | 8 +++- .../http/http_conn_man/stats.rst | 1 + docs/root/version_history/current.rst | 3 ++ .../envoy/config/cluster/v3/cluster.proto | 6 ++- .../config/cluster/v4alpha/cluster.proto | 6 ++- .../envoy/config/core/v3/protocol.proto | 8 +++- .../envoy/config/core/v4alpha/protocol.proto | 8 +++- source/common/http/conn_manager_config.h | 5 +++ source/common/http/conn_manager_impl.cc | 14 +++++++ source/common/http/conn_manager_impl.h | 2 + source/common/upstream/upstream_impl.cc | 11 ++++- .../network/http_connection_manager/config.cc | 4 +- .../network/http_connection_manager/config.h | 2 + source/server/admin/admin.h | 1 + .../http/conn_manager_impl_fuzz_test.cc | 1 + .../common/http/conn_manager_impl_test_base.h | 1 + test/common/upstream/upstream_impl_test.cc | 32 ++++++++++++++ test/config/utility.cc | 11 +++++ test/config/utility.h | 3 ++ .../http_connection_manager/config_test.cc | 36 ++++++++++++++++ test/integration/integration_test.cc | 8 +++- test/integration/protocol_integration_test.cc | 42 +++++++++++++++++++ test/mocks/http/mocks.h | 1 + 26 files changed, 222 insertions(+), 21 deletions(-) diff --git a/api/envoy/config/cluster/v3/cluster.proto b/api/envoy/config/cluster/v3/cluster.proto index 83a2a7e582e51..35bfa93ea352a 100644 --- a/api/envoy/config/cluster/v3/cluster.proto +++ b/api/envoy/config/cluster/v3/cluster.proto @@ -746,7 +746,11 @@ message Cluster { // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + google.protobuf.UInt32Value max_requests_per_connection = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; diff --git a/api/envoy/config/cluster/v4alpha/cluster.proto b/api/envoy/config/cluster/v4alpha/cluster.proto index 4a52adb8babb9..ebd007bca5780 100644 --- a/api/envoy/config/cluster/v4alpha/cluster.proto +++ b/api/envoy/config/cluster/v4alpha/cluster.proto @@ -631,11 +631,12 @@ message Cluster { [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } - reserved 12, 15, 7, 11, 35, 46, 29, 13, 14, 18, 45, 26, 47; + reserved 12, 15, 7, 11, 35, 9, 46, 29, 13, 14, 18, 45, 26, 47; - reserved "hosts", "tls_context", "extension_protocol_options", "upstream_http_protocol_options", - "common_http_protocol_options", "http_protocol_options", "http2_protocol_options", - "dns_resolvers", "use_tcp_for_dns_lookups", "protocol_selection", "track_timeout_budgets"; + reserved "hosts", "tls_context", "extension_protocol_options", "max_requests_per_connection", + "upstream_http_protocol_options", "common_http_protocol_options", "http_protocol_options", + "http2_protocol_options", "dns_resolvers", "use_tcp_for_dns_lookups", "protocol_selection", + "track_timeout_budgets"; // Configuration to use different transport sockets for different endpoints. // The entry of *envoy.transport_socket_match* in the @@ -751,12 +752,6 @@ message Cluster { // members will be considered healthy at all times. repeated core.v4alpha.HealthCheck health_checks = 8; - // Optional maximum requests for a single upstream connection. This parameter - // is respected by both the HTTP/1.1 and HTTP/2 connection pool - // implementations. If not specified, there is no limit. Setting this - // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; - // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; diff --git a/api/envoy/config/core/v3/protocol.proto b/api/envoy/config/core/v3/protocol.proto index d3b56a5ed7680..6589a3ed3a1a4 100644 --- a/api/envoy/config/core/v3/protocol.proto +++ b/api/envoy/config/core/v3/protocol.proto @@ -93,7 +93,7 @@ message AlternateProtocolsCacheOptions { google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; } -// [#next-free-field: 6] +// [#next-free-field: 7] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; @@ -157,6 +157,12 @@ message HttpProtocolOptions { // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; + + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + google.protobuf.UInt32Value max_requests_per_connection = 6; } // [#next-free-field: 8] diff --git a/api/envoy/config/core/v4alpha/protocol.proto b/api/envoy/config/core/v4alpha/protocol.proto index 50c47f006938f..37e5af0c72bd1 100644 --- a/api/envoy/config/core/v4alpha/protocol.proto +++ b/api/envoy/config/core/v4alpha/protocol.proto @@ -98,7 +98,7 @@ message AlternateProtocolsCacheOptions { google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; } -// [#next-free-field: 6] +// [#next-free-field: 7] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpProtocolOptions"; @@ -162,6 +162,12 @@ message HttpProtocolOptions { // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; + + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + google.protobuf.UInt32Value max_requests_per_connection = 6; } // [#next-free-field: 8] diff --git a/docs/root/configuration/http/http_conn_man/stats.rst b/docs/root/configuration/http/http_conn_man/stats.rst index 8df5877557b62..3a4d7a568c82f 100644 --- a/docs/root/configuration/http/http_conn_man/stats.rst +++ b/docs/root/configuration/http/http_conn_man/stats.rst @@ -37,6 +37,7 @@ statistics: downstream_cx_drain_close, Counter, Total connections closed due to draining downstream_cx_idle_timeout, Counter, Total connections closed due to idle timeout downstream_cx_max_duration_reached, Counter, Total connections closed due to max connection duration + downstream_cx_max_requests_reached, Counter, Total connections closed due to max requests per connection downstream_cx_overload_disable_keepalive, Counter, Total connections for which HTTP 1.x keepalive has been disabled due to Envoy overload downstream_flow_control_paused_reading_total, Counter, Total number of times reads were disabled due to flow control downstream_flow_control_resumed_reading_total, Counter, Total number of times reads were enabled on the connection due to flow control diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 80eb72a7049ff..cc6faa5ec0913 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -30,9 +30,12 @@ Removed Config or Runtime New Features ------------ * http: added :ref:`string_match ` in the header matcher. +* http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. Deprecated ---------- +* cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. * http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, :ref:`prefix_match `, :ref:`suffix_match ` and :ref:`contains_match ` are deprecated by :ref:`string_match `. + diff --git a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto index 42e86227156fe..137300708e375 100644 --- a/generated_api_shadow/envoy/config/cluster/v3/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v3/cluster.proto @@ -747,7 +747,11 @@ message Cluster { // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + google.protobuf.UInt32Value max_requests_per_connection = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; diff --git a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto index afdee8bc0f55f..3baa5c7ec0ac9 100644 --- a/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto +++ b/generated_api_shadow/envoy/config/cluster/v4alpha/cluster.proto @@ -755,7 +755,11 @@ message Cluster { // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. - google.protobuf.UInt32Value max_requests_per_connection = 9; + // + // .. attention:: + // This field has been deprecated in favor of the :ref:`max_requests_per_connection ` field. + google.protobuf.UInt32Value hidden_envoy_deprecated_max_requests_per_connection = 9 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; diff --git a/generated_api_shadow/envoy/config/core/v3/protocol.proto b/generated_api_shadow/envoy/config/core/v3/protocol.proto index d3b56a5ed7680..6589a3ed3a1a4 100644 --- a/generated_api_shadow/envoy/config/core/v3/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v3/protocol.proto @@ -93,7 +93,7 @@ message AlternateProtocolsCacheOptions { google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; } -// [#next-free-field: 6] +// [#next-free-field: 7] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; @@ -157,6 +157,12 @@ message HttpProtocolOptions { // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; + + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + google.protobuf.UInt32Value max_requests_per_connection = 6; } // [#next-free-field: 8] diff --git a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto index 2017020b3d946..f99ae27f14392 100644 --- a/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto +++ b/generated_api_shadow/envoy/config/core/v4alpha/protocol.proto @@ -99,7 +99,7 @@ message AlternateProtocolsCacheOptions { google.protobuf.UInt32Value max_entries = 2 [(validate.rules).uint32 = {gt: 0}]; } -// [#next-free-field: 6] +// [#next-free-field: 7] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpProtocolOptions"; @@ -163,6 +163,12 @@ message HttpProtocolOptions { // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; + + // Optional maximum requests for both upstream and downstream connections. + // If not specified, there is no limit. + // Setting this parameter to 1 will effectively disable keep alive. + // For HTTP/2 and HTTP/3, due to concurrent stream processing, the limit is approximate. + google.protobuf.UInt32Value max_requests_per_connection = 6; } // [#next-free-field: 8] diff --git a/source/common/http/conn_manager_config.h b/source/common/http/conn_manager_config.h index b56821b665d12..cfcb9884eec0a 100644 --- a/source/common/http/conn_manager_config.h +++ b/source/common/http/conn_manager_config.h @@ -35,6 +35,7 @@ namespace Http { COUNTER(downstream_cx_http3_total) \ COUNTER(downstream_cx_idle_timeout) \ COUNTER(downstream_cx_max_duration_reached) \ + COUNTER(downstream_cx_max_requests_reached) \ COUNTER(downstream_cx_overload_disable_keepalive) \ COUNTER(downstream_cx_protocol_error) \ COUNTER(downstream_cx_rx_bytes_total) \ @@ -495,6 +496,10 @@ class ConnectionManagerConfig { * header. */ virtual bool shouldStripTrailingHostDot() const PURE; + /** + * @return maximum requests for downstream. + */ + virtual uint64_t maxRequestsPerConnection() const PURE; }; } // namespace Http } // namespace Envoy diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 92cfb00102756..8050bb884077f 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -284,6 +284,20 @@ RequestDecoder& ConnectionManagerImpl::newStream(ResponseEncoder& response_encod ActiveStreamPtr new_stream(new ActiveStream(*this, response_encoder.getStream().bufferLimit(), std::move(downstream_request_account))); + accumulated_requests_++; + if (config_.maxRequestsPerConnection() > 0 && + accumulated_requests_ >= config_.maxRequestsPerConnection()) { + if (codec_->protocol() < Protocol::Http2) { + new_stream->state_.saw_connection_close_ = true; + // Prevent erroneous debug log of closing due to incoming connection close header. + drain_state_ = DrainState::Closing; + } else { + startDrainSequence(); + } + ENVOY_CONN_LOG(debug, "max requests per connection reached", read_callbacks_->connection()); + stats_.named_.downstream_cx_max_requests_reached_.inc(); + } + new_stream->state_.is_internally_created_ = is_internally_created; new_stream->response_encoder_ = &response_encoder; new_stream->response_encoder_->getStream().addCallbacks(*new_stream); diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 5b544890469f2..b83e0aa264a03 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -459,6 +459,8 @@ class ConnectionManagerImpl : Logger::Loggable, // Hop by hop headers should always be cleared for Envoy-as-a-proxy but will // not be for Envoy-mobile. bool clear_hop_by_hop_response_headers_{true}; + // The number of requests accumulated on the current connection. + uint64_t accumulated_requests_{}; }; } // namespace Http diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 01ced1e6fe2f3..b5ab427aa988d 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -713,8 +713,9 @@ ClusterInfoImpl::ClusterInfoImpl( extensionProtocolOptionsTyped( "envoy.extensions.upstreams.http.v3.HttpProtocolOptions"), factory_context.messageValidationVisitor())), - max_requests_per_connection_( - PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, max_requests_per_connection, 0)), + max_requests_per_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + http_protocol_options_->common_http_protocol_options_, max_requests_per_connection, + config.max_requests_per_connection().value())), max_response_headers_count_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( http_protocol_options_->common_http_protocol_options_, max_headers_count, runtime_.snapshot().getInteger(Http::MaxResponseHeadersCountOverrideKey, @@ -767,6 +768,12 @@ ClusterInfoImpl::ClusterInfoImpl( : absl::nullopt), factory_context_( std::make_unique(*stats_scope_, runtime, factory_context)) { + if (config.has_max_requests_per_connection() && + http_protocol_options_->common_http_protocol_options_.has_max_requests_per_connection()) { + throw EnvoyException("Only one of max_requests_per_connection from Cluster or " + "HttpProtocolOptions can be specified"); + } + switch (config.lb_policy()) { case envoy::config::cluster::v3::Cluster::ROUND_ROBIN: lb_type_ = LoadBalancerType::RoundRobin; diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 49b6bf8bb0208..4d9c4128fc8ff 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -330,7 +330,9 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( config.common_http_protocol_options().headers_with_underscores_action()), local_reply_(LocalReply::Factory::create(config.local_reply_config(), context)), path_with_escaped_slashes_action_(getPathWithEscapedSlashesAction(config, context)), - strip_trailing_host_dot_(config.strip_trailing_host_dot()) { + strip_trailing_host_dot_(config.strip_trailing_host_dot()), + max_requests_per_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( + config.common_http_protocol_options(), max_requests_per_connection, 0)) { // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated // idle_timeout field. // TODO(asraa): Remove when idle_timeout is removed. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index 26b2417ddba0a..b23fd8194c49f 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -217,6 +217,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, originalIpDetectionExtensions() const override { return original_ip_detection_extensions_; } + uint64_t maxRequestsPerConnection() const override { return max_requests_per_connection_; } private: enum class CodecType { HTTP1, HTTP2, HTTP3, AUTO }; @@ -311,6 +312,7 @@ class HttpConnectionManagerConfig : Logger::Loggable, const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: PathWithEscapedSlashesAction path_with_escaped_slashes_action_; const bool strip_trailing_host_dot_; + const uint64_t max_requests_per_connection_; }; /** diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 95db2bba4f8c7..2dbe42b200bfa 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -205,6 +205,7 @@ class AdminImpl : public Admin, return runCallback(path_and_query, response_headers, response, filter); }; } + uint64_t maxRequestsPerConnection() const override { return 0; } private: /** diff --git a/test/common/http/conn_manager_impl_fuzz_test.cc b/test/common/http/conn_manager_impl_fuzz_test.cc index 928289062dcef..484389ab62360 100644 --- a/test/common/http/conn_manager_impl_fuzz_test.cc +++ b/test/common/http/conn_manager_impl_fuzz_test.cc @@ -216,6 +216,7 @@ class FuzzConfig : public ConnectionManagerConfig { originalIpDetectionExtensions() const override { return ip_detection_extensions_; } + uint64_t maxRequestsPerConnection() const override { return 0; } const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager config_; diff --git a/test/common/http/conn_manager_impl_test_base.h b/test/common/http/conn_manager_impl_test_base.h index 53a19ce2ecd57..6225b61d854d2 100644 --- a/test/common/http/conn_manager_impl_test_base.h +++ b/test/common/http/conn_manager_impl_test_base.h @@ -153,6 +153,7 @@ class HttpConnectionManagerImplTest : public testing::Test, public ConnectionMan originalIpDetectionExtensions() const override { return ip_detection_extensions_; } + uint64_t maxRequestsPerConnection() const override { return 0; } Envoy::Event::SimulatedTimeSystem test_time_; NiceMock route_config_provider_; diff --git a/test/common/upstream/upstream_impl_test.cc b/test/common/upstream/upstream_impl_test.cc index 82f34bcc3ba24..969fd4211e966 100644 --- a/test/common/upstream/upstream_impl_test.cc +++ b/test/common/upstream/upstream_impl_test.cc @@ -3794,6 +3794,38 @@ TEST(HostPartitionTest, PartitionHostsImmediateFailureExcludeDisabled) { EXPECT_EQ(hosts[2], update_hosts_params.excluded_hosts_per_locality->get()[1][0]); } +TEST_F(ClusterInfoImplTest, MaxRequestsPerConnectionValidation) { + const std::string yaml = R"EOF( + name: cluster1 + type: STRICT_DNS + lb_policy: ROUND_ROBIN + max_requests_per_connection: 3 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + common_http_protocol_options: + max_requests_per_connection: 3 + use_downstream_protocol_config: {} +)EOF"; + + EXPECT_THROW_WITH_MESSAGE(makeCluster(yaml), EnvoyException, + "Only one of max_requests_per_connection from Cluster or " + "HttpProtocolOptions can be specified"); +} + +TEST_F(ClusterInfoImplTest, DeprecatedMaxRequestsPerConnection) { + const std::string yaml = R"EOF( + name: cluster1 + type: STRICT_DNS + lb_policy: ROUND_ROBIN + max_requests_per_connection: 3 +)EOF"; + + auto cluster = makeCluster(yaml); + + EXPECT_EQ(3U, cluster->info()->maxRequestsPerConnection()); +} + } // namespace } // namespace Upstream } // namespace Envoy diff --git a/test/config/utility.cc b/test/config/utility.cc index 70a3986632510..0ac40d6d3fffd 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -1017,6 +1017,17 @@ void ConfigHelper::setConnectTimeout(std::chrono::milliseconds timeout) { connect_timeout_set_ = true; } +void ConfigHelper::setDownstreamMaxRequestsPerConnection(uint64_t max_requests_per_connection) { + addConfigModifier( + [max_requests_per_connection]( + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + hcm.mutable_common_http_protocol_options() + ->mutable_max_requests_per_connection() + ->set_value(max_requests_per_connection); + }); +} + envoy::config::route::v3::VirtualHost ConfigHelper::createVirtualHost(const char* domain, const char* prefix, const char* cluster) { envoy::config::route::v3::VirtualHost virtual_host; diff --git a/test/config/utility.h b/test/config/utility.h index 1c764f11ee21e..55dba9b7768c0 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -211,6 +211,9 @@ class ConfigHelper { // Set the connect timeout on upstream connections. void setConnectTimeout(std::chrono::milliseconds timeout); + // Set the max_requests_per_connection for downstream through the HttpConnectionManager. + void setDownstreamMaxRequestsPerConnection(uint64_t max_requests_per_connection); + envoy::config::route::v3::VirtualHost createVirtualHost(const char* host, const char* route = "/", const char* cluster = "cluster_0"); diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index 67e2347038b79..bf073cd25a2a2 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -886,6 +886,42 @@ TEST_F(HttpConnectionManagerConfigTest, MaxRequestHeaderCountConfigurable) { EXPECT_EQ(200, config.maxRequestHeadersCount()); } +// Checking that default max_requests_per_connection is 0. +TEST_F(HttpConnectionManagerConfigTest, DefaultMaxRequestPerConnection) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_EQ(0, config.maxRequestsPerConnection()); +} + +// Check that max_requests_per_connection is configured. +TEST_F(HttpConnectionManagerConfigTest, MaxRequestPerConnectionConfigurable) { + const std::string yaml_string = R"EOF( + stat_prefix: ingress_http + common_http_protocol_options: + max_requests_per_connection: 5 + route_config: + name: local_route + http_filters: + - name: envoy.filters.http.router + )EOF"; + + HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string), context_, + date_provider_, route_config_provider_manager_, + scoped_routes_config_provider_manager_, http_tracer_manager_, + filter_config_provider_manager_); + EXPECT_EQ(5, config.maxRequestsPerConnection()); +} + TEST_F(HttpConnectionManagerConfigTest, ServerOverwrite) { const std::string yaml_string = R"EOF( stat_prefix: ingress_http diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 10c57fa1fac6d..6038ca7fae297 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -440,9 +440,15 @@ TEST_P(IntegrationTest, UpstreamDisconnectWithTwoRequests) { auto* static_resources = bootstrap.mutable_static_resources(); auto* cluster = static_resources->mutable_clusters(0); // Ensure we only have one connection upstream, one request active at a time. - cluster->mutable_max_requests_per_connection()->set_value(1); + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_common_http_protocol_options() + ->mutable_max_requests_per_connection() + ->set_value(1); + protocol_options.mutable_use_downstream_protocol_config(); auto* circuit_breakers = cluster->mutable_circuit_breakers(); circuit_breakers->add_thresholds()->mutable_max_connections()->set_value(1); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); }); initialize(); diff --git a/test/integration/protocol_integration_test.cc b/test/integration/protocol_integration_test.cc index 39a95383d33d3..8a31ce5dda486 100644 --- a/test/integration/protocol_integration_test.cc +++ b/test/integration/protocol_integration_test.cc @@ -2445,6 +2445,48 @@ TEST_P(DownstreamProtocolIntegrationTest, BasicMaxStreamTimeoutLegacy) { EXPECT_THAT(waitForAccessLog(access_log_name_), HasSubstr("max_duration_timeout")); } +TEST_P(DownstreamProtocolIntegrationTest, MaxRequestsPerConnectionReached) { + config_helper_.setDownstreamMaxRequestsPerConnection(2); + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + // Sending first request and waiting to complete the response. + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder.first; + auto response = std::move(encoder_decoder.second); + codec_client_->sendData(*request_encoder_, 1, true); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_max_requests_reached")->value(), + 0); + + // Sending second request and waiting to complete the response. + auto encoder_decoder_2 = codec_client_->startRequest(default_request_headers_); + request_encoder_ = &encoder_decoder_2.first; + auto response_2 = std::move(encoder_decoder_2.second); + codec_client_->sendData(*request_encoder_, 1, true); + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(default_response_headers_, true); + + ASSERT_TRUE(response_2->waitForEndStream()); + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response_2->complete()); + EXPECT_EQ(test_server_->counter("http.config_test.downstream_cx_max_requests_reached")->value(), + 1); + + if (downstream_protocol_ == Http::CodecType::HTTP1) { + EXPECT_EQ(nullptr, response->headers().Connection()); + EXPECT_EQ("close", response_2->headers().getConnectionValue()); + } else { + EXPECT_TRUE(codec_client_->sawGoAway()); + } + ASSERT_TRUE(codec_client_->waitForDisconnect()); +} + // Make sure that invalid authority headers get blocked at or before the HCM. TEST_P(DownstreamProtocolIntegrationTest, InvalidAuthority) { initialize(); diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 21c17e4f7ad9d..83cf041e07020 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -600,6 +600,7 @@ class MockConnectionManagerConfig : public ConnectionManagerConfig { pathWithEscapedSlashesAction, (), (const)); MOCK_METHOD(const std::vector&, originalIpDetectionExtensions, (), (const)); + MOCK_METHOD(uint64_t, maxRequestsPerConnection, (), (const)); std::unique_ptr internal_address_config_ = std::make_unique(); From 396892c737e5eb3d5d4bf0d6ba56b0c70e7b2a93 Mon Sep 17 00:00:00 2001 From: Xie Zhihao Date: Tue, 20 Jul 2021 01:31:31 +0800 Subject: [PATCH 03/57] docs: fix typo in listener filter chain match configuration example (#17398) Signed-off-by: Xie Zhihao --- api/envoy/api/v2/listener/listener_components.proto | 2 +- api/envoy/config/listener/v3/listener_components.proto | 2 +- api/envoy/config/listener/v4alpha/listener_components.proto | 2 +- .../envoy/api/v2/listener/listener_components.proto | 2 +- .../envoy/config/listener/v3/listener_components.proto | 2 +- .../envoy/config/listener/v4alpha/listener_components.proto | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/api/envoy/api/v2/listener/listener_components.proto b/api/envoy/api/v2/listener/listener_components.proto index a6791c86cd0be..08738962c5eee 100644 --- a/api/envoy/api/v2/listener/listener_components.proto +++ b/api/envoy/api/v2/listener/listener_components.proto @@ -230,7 +230,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 diff --git a/api/envoy/config/listener/v3/listener_components.proto b/api/envoy/config/listener/v3/listener_components.proto index 93acc4c94a666..e737b14b17456 100644 --- a/api/envoy/config/listener/v3/listener_components.proto +++ b/api/envoy/config/listener/v3/listener_components.proto @@ -290,7 +290,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 diff --git a/api/envoy/config/listener/v4alpha/listener_components.proto b/api/envoy/config/listener/v4alpha/listener_components.proto index 103fa23484f81..6fc16227542f9 100644 --- a/api/envoy/config/listener/v4alpha/listener_components.proto +++ b/api/envoy/config/listener/v4alpha/listener_components.proto @@ -280,7 +280,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 diff --git a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto index a6791c86cd0be..08738962c5eee 100644 --- a/generated_api_shadow/envoy/api/v2/listener/listener_components.proto +++ b/generated_api_shadow/envoy/api/v2/listener/listener_components.proto @@ -230,7 +230,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 diff --git a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto index 5de8b265d8806..1e7e205bfded9 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener_components.proto @@ -293,7 +293,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto index e4db4367d1c6a..48e068e4ae59f 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener_components.proto @@ -294,7 +294,7 @@ message FilterChain { // rules: // - destination_port_range: // start: 3306 -// end: 3306 +// end: 3307 // - destination_port_range: // start: 15000 // end: 15001 From ef1237fe2cef2976dad6b20a1dd8b4f8a3d7ba5d Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Mon, 19 Jul 2021 15:01:31 -0400 Subject: [PATCH 04/57] tools: exempting dependabot from unassigned warnings (#17401) Signed-off-by: Alyssa Wilk --- .github/actions/pr_notifier/pr_notifier.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/actions/pr_notifier/pr_notifier.py b/.github/actions/pr_notifier/pr_notifier.py index dcd7cca2c88d4..54bf3e9fc568e 100644 --- a/.github/actions/pr_notifier/pr_notifier.py +++ b/.github/actions/pr_notifier/pr_notifier.py @@ -128,8 +128,12 @@ def track_prs(): # If the PR is waiting, continue. if is_waiting(labels): continue + # Drafts are not covered by our SLO (repokitteh warns of this) if pr_info.draft: continue + # Don't warn for dependabot. + if pr_info.user.login == 'dependabot[bot]': + continue # Update the time based on the time zone delta from github's pr_age = pr_info.updated_at - datetime.timedelta(hours=4) From a1705b4179105141a4e76e186c0d8df22c602790 Mon Sep 17 00:00:00 2001 From: pradeepcrao <84025829+pradeepcrao@users.noreply.github.com> Date: Mon, 19 Jul 2021 20:41:34 +0000 Subject: [PATCH 05/57] Add local_end_stream_ to crash dump for H1 (#17367) Risk Level: low Testing: unit test improvements Docs Changes: n/a Release Notes: n/a Signed-off-by: Pradeep Rao --- source/common/http/http1/codec_impl.cc | 14 +++++++++----- source/common/http/http1/codec_impl.h | 1 + test/common/http/http1/codec_impl_test.cc | 3 ++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index d26328488f4d5..cb0af92e83bc9 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -899,11 +899,8 @@ void ConnectionImpl::dumpState(std::ostream& os, int indent_level) const { void ServerConnectionImpl::dumpAdditionalState(std::ostream& os, int indent_level) const { const char* spaces = spacesForLevel(indent_level); - os << DUMP_MEMBER_AS(active_request_.request_url_, - active_request_.has_value() && - !active_request_.value().request_url_.getStringView().empty() - ? active_request_.value().request_url_.getStringView() - : "null"); + + DUMP_DETAILS(active_request_); os << '\n'; // Dump header map, it may be null if it was moved to the request, and @@ -1228,6 +1225,13 @@ Status ServerConnectionImpl::checkHeaderNameForUnderscores() { return okStatus(); } +void ServerConnectionImpl::ActiveRequest::dumpState(std::ostream& os, int indent_level) const { + (void)indent_level; + os << DUMP_MEMBER_AS( + request_url_, !request_url_.getStringView().empty() ? request_url_.getStringView() : "null"); + os << DUMP_MEMBER(response_encoder_.local_end_stream_); +} + ClientConnectionImpl::ClientConnectionImpl(Network::Connection& connection, CodecStats& stats, ConnectionCallbacks&, const Http1Settings& settings, const uint32_t max_response_headers_count) diff --git a/source/common/http/http1/codec_impl.h b/source/common/http/http1/codec_impl.h index aa8125b440d26..d67412706a712 100644 --- a/source/common/http/http1/codec_impl.h +++ b/source/common/http/http1/codec_impl.h @@ -440,6 +440,7 @@ class ServerConnectionImpl : public ServerConnection, public ConnectionImpl { : response_encoder_(connection, connection.codec_settings_.stream_error_on_invalid_http_message_) {} + void dumpState(std::ostream& os, int indent_level) const; HeaderString request_url_; RequestDecoder* request_decoder_{}; ResponseEncoderImpl response_encoder_; diff --git a/test/common/http/http1/codec_impl_test.cc b/test/common/http/http1/codec_impl_test.cc index 59f8e1dc0dd1e..433acb4473a11 100644 --- a/test/common/http/http1/codec_impl_test.cc +++ b/test/common/http/http1/codec_impl_test.cc @@ -2159,7 +2159,8 @@ TEST_F(Http1ServerConnectionImplTest, ShouldDumpDispatchBufferWithoutAllocatingM // Check dump contents EXPECT_THAT(ostream.contents(), HasSubstr("buffered_body_.length(): 5, header_parsing_state_: " "Done, current_header_field_: , current_header_value_: " - "\n, active_request_.request_url_: null")); + "\nactive_request_: \n, request_url_: null" + ", response_encoder_.local_end_stream_: 0")); EXPECT_THAT(ostream.contents(), HasSubstr("current_dispatching_buffer_ front_slice length: 43 contents: \"POST / " "HTTP/1.1\\r\\nContent-Length: 5\\r\\n\\r\\nHello\"\n")); From 6acfb40bb877fc93483173c1d7ee6591632e8206 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Mon, 19 Jul 2021 16:51:13 -0400 Subject: [PATCH 06/57] thrift proxy: move more of Router into RequestOwner (#17376) This is PR 2/4 towards implementing shadow requests for thrift. This moves all the stats into the common RequestOwner interface so that they can be reused. The next, similar sized, PR will decouple UpstreamRequest from the Router so that it can be reused by the upcoming ShadowRequest class. The final PR should introduce the ShadowWriter and ShadowRequest classes and make use of RequestOwner to tie them together. Risk Level: low, refactor Testing: existing tests pass Docs Changes: n/a Release Notes: n/a Signed-off-by: Raul Gutierrez Segales --- .../network/thrift_proxy/router/router.h | 157 ++++++++++++++++++ .../thrift_proxy/router/router_impl.cc | 32 ++-- .../network/thrift_proxy/router/router_impl.h | 68 +------- 3 files changed, 176 insertions(+), 81 deletions(-) diff --git a/source/extensions/filters/network/thrift_proxy/router/router.h b/source/extensions/filters/network/thrift_proxy/router/router.h index 50135fe023402..603a08c4c0159 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router.h +++ b/source/extensions/filters/network/thrift_proxy/router/router.h @@ -88,11 +88,37 @@ class Config { using ConfigConstSharedPtr = std::shared_ptr; +#define ALL_THRIFT_ROUTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ + COUNTER(route_missing) \ + COUNTER(unknown_cluster) \ + COUNTER(upstream_rq_maintenance_mode) \ + COUNTER(no_healthy_upstream) + +struct RouterStats { + ALL_THRIFT_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) +}; + /** * This interface is used by an upstream request to communicate its state. */ class RequestOwner : public ProtocolConverter { public: + RequestOwner(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, + Stats::Scope& scope) + : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)), + stat_name_set_(scope.symbolTable().makeSet("thrift_proxy")), + symbol_table_(scope.symbolTable()), + upstream_rq_call_(stat_name_set_->add("thrift.upstream_rq_call")), + upstream_rq_oneway_(stat_name_set_->add("thrift.upstream_rq_oneway")), + upstream_rq_invalid_type_(stat_name_set_->add("thrift.upstream_rq_invalid_type")), + upstream_resp_reply_(stat_name_set_->add("thrift.upstream_resp_reply")), + upstream_resp_reply_success_(stat_name_set_->add("thrift.upstream_resp_success")), + upstream_resp_reply_error_(stat_name_set_->add("thrift.upstream_resp_error")), + upstream_resp_exception_(stat_name_set_->add("thrift.upstream_resp_exception")), + upstream_resp_invalid_type_(stat_name_set_->add("thrift.upstream_resp_invalid_type")), + upstream_rq_time_(stat_name_set_->add("thrift.upstream_rq_time")), + upstream_rq_size_(stat_name_set_->add("thrift.upstream_rq_size")), + upstream_resp_size_(stat_name_set_->add("thrift.upstream_resp_size")) {} ~RequestOwner() override = default; /** @@ -149,6 +175,137 @@ class RequestOwner : public ProtocolConverter { * @param unit Unit the unit of the duration. */ virtual void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) PURE; + + /** + * @return Upstream::ClusterManager& the cluster manager. + */ + Upstream::ClusterManager& clusterManager() { return cluster_manager_; } + + /** + * Common stats. + */ + RouterStats& stats() { return stats_; } + + /** + * Increment counter for received responses that are replies. + */ + void incResponseReply(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_resp_reply_}); + } + + /** + * Increment counter for request calls. + */ + void incRequestCall(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_rq_call_}); + } + + /** + * Increment counter for requests that are one way only. + */ + void incRequestOneWay(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_rq_oneway_}); + } + + /** + * Increment counter for requests that are invalid. + */ + void incRequestInvalid(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_rq_invalid_type_}); + } + + /** + * Increment counter for received responses that are replies that are successful. + */ + void incResponseReplySuccess(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_resp_reply_success_}); + } + + /** + * Increment counter for received responses that are replies that are an error. + */ + void incResponseReplyError(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_resp_reply_error_}); + } + + /** + * Increment counter for received responses that are exceptions. + */ + void incResponseException(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_resp_exception_}); + } + + /** + * Increment counter for received responses that are invalid. + */ + void incResponseInvalidType(const Upstream::ClusterInfo& cluster) { + incClusterScopeCounter(cluster, {upstream_resp_invalid_type_}); + } + + /** + * Record a value for the request size histogram. + */ + void recordUpstreamRequestSize(const Upstream::ClusterInfo& cluster, uint64_t value) { + recordClusterScopeHistogram(cluster, {upstream_rq_size_}, Stats::Histogram::Unit::Bytes, value); + } + + /** + * Record a value for the response size histogram. + */ + void recordUpstreamResponseSize(const Upstream::ClusterInfo& cluster, uint64_t value) { + recordClusterScopeHistogram(cluster, {upstream_resp_size_}, Stats::Histogram::Unit::Bytes, + value); + } + + /** + * Records the duration of the request for a given cluster. + * + * @param cluster ClusterInfo the cluster to record the duration for. + * @param value uint64_t the value of the duration. + * @param unit Unit the unit of the duration. + */ + void recordClusterResponseDuration(const Upstream::ClusterInfo& cluster, uint64_t value, + Stats::Histogram::Unit unit) { + recordClusterScopeHistogram(cluster, {upstream_rq_time_}, unit, value); + } + +private: + void incClusterScopeCounter(const Upstream::ClusterInfo& cluster, + const Stats::StatNameVec& names) const { + const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names); + cluster.statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); + } + + void recordClusterScopeHistogram(const Upstream::ClusterInfo& cluster, + const Stats::StatNameVec& names, Stats::Histogram::Unit unit, + uint64_t value) const { + const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names); + cluster.statsScope() + .histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit) + .recordValue(value); + } + + RouterStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return RouterStats{ALL_THRIFT_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), + POOL_GAUGE_PREFIX(scope, prefix), + POOL_HISTOGRAM_PREFIX(scope, prefix))}; + } + + Upstream::ClusterManager& cluster_manager_; + RouterStats stats_; + Stats::StatNameSetPtr stat_name_set_; + Stats::SymbolTable& symbol_table_; + const Stats::StatName upstream_rq_call_; + const Stats::StatName upstream_rq_oneway_; + const Stats::StatName upstream_rq_invalid_type_; + const Stats::StatName upstream_resp_reply_; + const Stats::StatName upstream_resp_reply_success_; + const Stats::StatName upstream_resp_reply_error_; + const Stats::StatName upstream_resp_exception_; + const Stats::StatName upstream_resp_invalid_type_; + const Stats::StatName upstream_rq_time_; + const Stats::StatName upstream_rq_size_; + const Stats::StatName upstream_resp_size_; }; } // namespace Router diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc index 66acf6ca0786e..77e2e18970304 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc @@ -214,7 +214,7 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { route_ = callbacks_->route(); if (!route_) { ENVOY_STREAM_LOG(debug, "no route match for method '{}'", *callbacks_, metadata->methodName()); - stats_.route_missing_.inc(); + stats().route_missing_.inc(); callbacks_->sendLocalReply( AppException(AppExceptionType::UnknownMethod, fmt::format("no route for method '{}'", metadata->methodName())), @@ -225,10 +225,10 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { route_entry_ = route_->routeEntry(); const std::string& cluster_name = route_entry_->clusterName(); - Upstream::ThreadLocalCluster* cluster = cluster_manager_.getThreadLocalCluster(cluster_name); + Upstream::ThreadLocalCluster* cluster = clusterManager().getThreadLocalCluster(cluster_name); if (!cluster) { ENVOY_STREAM_LOG(debug, "unknown cluster '{}'", *callbacks_, cluster_name); - stats_.unknown_cluster_.inc(); + stats().unknown_cluster_.inc(); callbacks_->sendLocalReply(AppException(AppExceptionType::InternalError, fmt::format("unknown cluster '{}'", cluster_name)), true); @@ -240,20 +240,20 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { metadata->methodName()); switch (metadata->messageType()) { case MessageType::Call: - incClusterScopeCounter({upstream_rq_call_}); + incRequestCall(*cluster_); break; case MessageType::Oneway: - incClusterScopeCounter({upstream_rq_oneway_}); + incRequestOneWay(*cluster_); break; default: - incClusterScopeCounter({upstream_rq_invalid_type_}); + incRequestInvalid(*cluster_); break; } if (cluster_->maintenanceMode()) { - stats_.upstream_rq_maintenance_mode_.inc(); + stats().upstream_rq_maintenance_mode_.inc(); callbacks_->sendLocalReply( AppException(AppExceptionType::InternalError, fmt::format("maintenance mode for cluster '{}'", cluster_name)), @@ -282,7 +282,7 @@ FilterStatus Router::messageBegin(MessageMetadataSharedPtr metadata) { auto conn_pool_data = cluster->tcpConnPool(Upstream::ResourcePriority::Default, this); if (!conn_pool_data) { - stats_.no_healthy_upstream_.inc(); + stats().no_healthy_upstream_.inc(); callbacks_->sendLocalReply( AppException(AppExceptionType::InternalError, fmt::format("no healthy upstream for '{}'", cluster_name)), @@ -316,7 +316,7 @@ FilterStatus Router::messageEnd() { upstream_request_buffer_); request_size_ += transport_buffer.length(); - recordClusterScopeHistogram({upstream_rq_size_}, Stats::Histogram::Unit::Bytes, request_size_); + recordUpstreamRequestSize(*cluster_, request_size_); upstream_request_->conn_data_->connection().write(transport_buffer, false); upstream_request_->onRequestComplete(); @@ -355,31 +355,31 @@ void Router::onUpstreamData(Buffer::Instance& data, bool end_stream) { ThriftFilters::ResponseStatus status = callbacks_->upstreamData(data); if (status == ThriftFilters::ResponseStatus::Complete) { ENVOY_STREAM_LOG(debug, "response complete", *callbacks_); - recordClusterScopeHistogram({upstream_resp_size_}, Stats::Histogram::Unit::Bytes, - response_size_); + + recordUpstreamResponseSize(*cluster_, response_size_); switch (callbacks_->responseMetadata()->messageType()) { case MessageType::Reply: - incClusterScopeCounter({upstream_resp_reply_}); + incResponseReply(*cluster_); if (callbacks_->responseSuccess()) { upstream_request_->upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestSuccess); - incClusterScopeCounter({upstream_resp_reply_success_}); + incResponseReplySuccess(*cluster_); } else { upstream_request_->upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestFailed); - incClusterScopeCounter({upstream_resp_reply_error_}); + incResponseReplyError(*cluster_); } break; case MessageType::Exception: upstream_request_->upstream_host_->outlierDetector().putResult( Upstream::Outlier::Result::ExtOriginRequestFailed); - incClusterScopeCounter({upstream_resp_exception_}); + incResponseException(*cluster_); break; default: - incClusterScopeCounter({upstream_resp_invalid_type_}); + incResponseInvalidType(*cluster_); break; } upstream_request_->onResponseComplete(); diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index ab916d5bc29e3..502a2659cff6b 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -159,16 +159,6 @@ class RouteMatcher { std::vector routes_; }; -#define ALL_THRIFT_ROUTER_STATS(COUNTER, GAUGE, HISTOGRAM) \ - COUNTER(route_missing) \ - COUNTER(unknown_cluster) \ - COUNTER(upstream_rq_maintenance_mode) \ - COUNTER(no_healthy_upstream) - -struct RouterStats { - ALL_THRIFT_ROUTER_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_HISTOGRAM_STRUCT) -}; - class Router : public Tcp::ConnectionPool::UpstreamCallbacks, public Upstream::LoadBalancerContextBase, public RequestOwner, @@ -177,21 +167,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, public: Router(Upstream::ClusterManager& cluster_manager, const std::string& stat_prefix, Stats::Scope& scope) - : cluster_manager_(cluster_manager), stats_(generateStats(stat_prefix, scope)), - stat_name_set_(scope.symbolTable().makeSet("thrift_proxy")), - symbol_table_(scope.symbolTable()), - upstream_rq_call_(stat_name_set_->add("thrift.upstream_rq_call")), - upstream_rq_oneway_(stat_name_set_->add("thrift.upstream_rq_oneway")), - upstream_rq_invalid_type_(stat_name_set_->add("thrift.upstream_rq_invalid_type")), - upstream_resp_reply_(stat_name_set_->add("thrift.upstream_resp_reply")), - upstream_resp_reply_success_(stat_name_set_->add("thrift.upstream_resp_success")), - upstream_resp_reply_error_(stat_name_set_->add("thrift.upstream_resp_error")), - upstream_resp_exception_(stat_name_set_->add("thrift.upstream_resp_exception")), - upstream_resp_invalid_type_(stat_name_set_->add("thrift.upstream_resp_invalid_type")), - upstream_rq_time_(stat_name_set_->add("thrift.upstream_rq_time")), - upstream_rq_size_(stat_name_set_->add("thrift.upstream_rq_size")), - upstream_resp_size_(stat_name_set_->add("thrift.upstream_resp_size")), - passthrough_supported_(false) {} + : RequestOwner(cluster_manager, stat_prefix, scope), passthrough_supported_(false) {} ~Router() override = default; @@ -211,7 +187,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, callbacks_->sendLocalReply(response, end_stream); } void recordResponseDuration(uint64_t value, Stats::Histogram::Unit unit) override { - recordClusterScopeHistogram({upstream_rq_time_}, unit, value); + recordClusterResponseDuration(*cluster_, value, unit); } // RequestOwner::ProtocolConverter @@ -223,10 +199,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, // Upstream::LoadBalancerContext const Network::Connection* downstreamConnection() const override; const Envoy::Router::MetadataMatchCriteria* metadataMatchCriteria() override { - if (route_entry_) { - return route_entry_->metadataMatchCriteria(); - } - return nullptr; + return route_entry_ ? route_entry_->metadataMatchCriteria() : nullptr; } // Tcp::ConnectionPool::UpstreamCallbacks @@ -280,42 +253,7 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, MonotonicTime downstream_request_complete_time_; }; - // Stats - void incClusterScopeCounter(const Stats::StatNameVec& names) const { - const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names); - cluster_->statsScope().counterFromStatName(Stats::StatName(stat_name_storage.get())).inc(); - } - - void recordClusterScopeHistogram(const Stats::StatNameVec& names, Stats::Histogram::Unit unit, - uint64_t count) const { - const Stats::SymbolTable::StoragePtr stat_name_storage = symbol_table_.join(names); - cluster_->statsScope() - .histogramFromStatName(Stats::StatName(stat_name_storage.get()), unit) - .recordValue(count); - } - void cleanup(); - RouterStats generateStats(const std::string& prefix, Stats::Scope& scope) { - return RouterStats{ALL_THRIFT_ROUTER_STATS(POOL_COUNTER_PREFIX(scope, prefix), - POOL_GAUGE_PREFIX(scope, prefix), - POOL_HISTOGRAM_PREFIX(scope, prefix))}; - } - - Upstream::ClusterManager& cluster_manager_; - RouterStats stats_; - Stats::StatNameSetPtr stat_name_set_; - Stats::SymbolTable& symbol_table_; - const Stats::StatName upstream_rq_call_; - const Stats::StatName upstream_rq_oneway_; - const Stats::StatName upstream_rq_invalid_type_; - const Stats::StatName upstream_resp_reply_; - const Stats::StatName upstream_resp_reply_success_; - const Stats::StatName upstream_resp_reply_error_; - const Stats::StatName upstream_resp_exception_; - const Stats::StatName upstream_resp_invalid_type_; - const Stats::StatName upstream_rq_time_; - const Stats::StatName upstream_rq_size_; - const Stats::StatName upstream_resp_size_; ThriftFilters::DecoderFilterCallbacks* callbacks_{}; RouteConstSharedPtr route_{}; From e0380add8104a83c0ca893d34a46df43b0e02eb2 Mon Sep 17 00:00:00 2001 From: tyxia <72890320+tyxia@users.noreply.github.com> Date: Mon, 19 Jul 2021 21:12:33 -0400 Subject: [PATCH 07/57] The ORIGINAL_DST_LB load balancing policy is deprecated, use CLUSTER_PROVIDED policy instead when configuring an original destination cluster. (#17230) Risk Level: low Testing: bazel test //test/... Signed-off-by: Tianyu Xia --- .../common/upstream/original_dst_cluster.cc | 14 ++--- source/common/upstream/upstream_impl.cc | 16 ----- .../upstream/cluster_manager_impl_test.cc | 59 +++---------------- .../proxy_proto_integration_test.cc | 31 ---------- 4 files changed, 13 insertions(+), 107 deletions(-) diff --git a/source/common/upstream/original_dst_cluster.cc b/source/common/upstream/original_dst_cluster.cc index 27f6f7887de9d..25e8ba1908873 100644 --- a/source/common/upstream/original_dst_cluster.cc +++ b/source/common/upstream/original_dst_cluster.cc @@ -183,14 +183,12 @@ OriginalDstClusterFactory::createClusterImpl( const envoy::config::cluster::v3::Cluster& cluster, ClusterFactoryContext& context, Server::Configuration::TransportSocketFactoryContextImpl& socket_factory_context, Stats::ScopePtr&& stats_scope) { - if (cluster.lb_policy() != - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB && - cluster.lb_policy() != envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { - throw EnvoyException(fmt::format( - "cluster: LB policy {} is not valid for Cluster type {}. Only 'CLUSTER_PROVIDED' or " - "'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(cluster.lb_policy()), - envoy::config::cluster::v3::Cluster::DiscoveryType_Name(cluster.type()))); + if (cluster.lb_policy() != envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { + throw EnvoyException( + fmt::format("cluster: LB policy {} is not valid for Cluster type {}. Only " + "'CLUSTER_PROVIDED' is allowed with cluster type 'ORIGINAL_DST'", + envoy::config::cluster::v3::Cluster::LbPolicy_Name(cluster.lb_policy()), + envoy::config::cluster::v3::Cluster::DiscoveryType_Name(cluster.type()))); } // TODO(mattklein123): The original DST load balancer type should be deprecated and instead diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index b5ab427aa988d..63c68663ee0f3 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -787,22 +787,6 @@ ClusterInfoImpl::ClusterInfoImpl( case envoy::config::cluster::v3::Cluster::RING_HASH: lb_type_ = LoadBalancerType::RingHash; break; - case envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB: - if (config.type() != envoy::config::cluster::v3::Cluster::ORIGINAL_DST) { - throw EnvoyException( - fmt::format("cluster: LB policy {} is not valid for Cluster type {}. 'ORIGINAL_DST_LB' " - "is allowed only with cluster type 'ORIGINAL_DST'", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()), - envoy::config::cluster::v3::Cluster::DiscoveryType_Name(config.type()))); - } - if (config.has_lb_subset_config()) { - throw EnvoyException( - fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", - envoy::config::cluster::v3::Cluster::LbPolicy_Name(config.lb_policy()))); - } - - lb_type_ = LoadBalancerType::ClusterProvided; - break; case envoy::config::cluster::v3::Cluster::MAGLEV: lb_type_ = LoadBalancerType::Maglev; break; diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index fa5f863ae53ff..0596f6348f6d4 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -502,32 +502,7 @@ TEST_F(ClusterManagerImplTest, OriginalDstLbRestriction) { EXPECT_THROW_WITH_MESSAGE( create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, "cluster: LB policy ROUND_ROBIN is not valid for Cluster type ORIGINAL_DST. Only " - "'CLUSTER_PROVIDED' or 'ORIGINAL_DST_LB' is allowed with cluster type 'ORIGINAL_DST'"); -} - -TEST_F(ClusterManagerImplTest, DEPRECATED_FEATURE_TEST(OriginalDstLbRestriction2)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - static_resources: - clusters: - - name: cluster_1 - connect_timeout: 0.250s - type: static - lb_policy: original_dst_lb - load_assignment: - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: 127.0.0.1 - port_value: 11001 - )EOF"; - - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, - "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB is not " - "valid for Cluster type STATIC. " - "'ORIGINAL_DST_LB' is allowed only with cluster type 'ORIGINAL_DST'"); + "'CLUSTER_PROVIDED' is allowed with cluster type 'ORIGINAL_DST'"); } class ClusterManagerSubsetInitializationTest @@ -545,7 +520,9 @@ class ClusterManagerSubsetInitializationTest for (int i = first; i <= last; i++) { if (envoy::config::cluster::v3::Cluster::LbPolicy_IsValid(i)) { auto policy = static_cast(i); - if (policy != envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { + if (policy != + envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB && + policy != envoy::config::cluster::v3::Cluster::LOAD_BALANCING_POLICY_CONFIG) { policies.push_back(policy); } } @@ -591,16 +568,14 @@ TEST_P(ClusterManagerSubsetInitializationTest, SubsetLoadBalancerInitialization) const std::string& policy_name = envoy::config::cluster::v3::Cluster::LbPolicy_Name(GetParam()); std::string cluster_type = "type: STATIC"; - if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB) { - cluster_type = "type: ORIGINAL_DST"; - } else if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { + + if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { // This custom cluster type is registered by linking test/integration/custom/static_cluster.cc. cluster_type = "cluster_type: { name: envoy.clusters.custom_static_with_lb }"; } const std::string yaml = fmt::format(yamlPattern, cluster_type, policy_name); - if (GetParam() == envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB || - GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { + if (GetParam() == envoy::config::cluster::v3::Cluster::CLUSTER_PROVIDED) { EXPECT_THROW_WITH_MESSAGE( create(parseBootstrapFromV3Yaml(yaml)), EnvoyException, fmt::format("cluster: LB policy {} cannot be combined with lb_subset_config", @@ -627,26 +602,6 @@ INSTANTIATE_TEST_SUITE_P(ClusterManagerSubsetInitializationTest, testing::ValuesIn(ClusterManagerSubsetInitializationTest::lbPolicies()), ClusterManagerSubsetInitializationTest::paramName); -TEST_F(ClusterManagerImplTest, DEPRECATED_FEATURE_TEST(SubsetLoadBalancerOriginalDstRestriction)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml = R"EOF( - static_resources: - clusters: - - name: cluster_1 - connect_timeout: 0.250s - type: original_dst - lb_policy: original_dst_lb - lb_subset_config: - fallback_policy: ANY_ENDPOINT - subset_selectors: - - keys: [ "x" ] - )EOF"; - - EXPECT_THROW_WITH_MESSAGE(create(parseBootstrapFromV3Yaml(yaml, false)), EnvoyException, - "cluster: LB policy hidden_envoy_deprecated_ORIGINAL_DST_LB cannot be " - "combined with lb_subset_config"); -} - TEST_F(ClusterManagerImplTest, SubsetLoadBalancerClusterProvidedLbRestriction) { const std::string yaml = R"EOF( static_resources: diff --git a/test/integration/proxy_proto_integration_test.cc b/test/integration/proxy_proto_integration_test.cc index 83d983edbc069..118f48296f878 100644 --- a/test/integration/proxy_proto_integration_test.cc +++ b/test/integration/proxy_proto_integration_test.cc @@ -160,37 +160,6 @@ TEST_P(ProxyProtoIntegrationTest, AccessLog) { EXPECT_EQ(tokens[1], "1.2.3.4:12345"); } -TEST_P(ProxyProtoIntegrationTest, DEPRECATED_FEATURE_TEST(OriginalDst)) { - // Change the cluster to an original destination cluster. An original destination cluster - // ignores the configured hosts, and instead uses the restored destination address from the - // incoming (server) connection as the destination address for the outgoing (client) connection. - config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { - auto* cluster = bootstrap.mutable_static_resources()->mutable_clusters(0); - cluster->clear_load_assignment(); - cluster->set_type(envoy::config::cluster::v3::Cluster::ORIGINAL_DST); - cluster->set_lb_policy( - envoy::config::cluster::v3::Cluster::hidden_envoy_deprecated_ORIGINAL_DST_LB); - }); - - ConnectionCreationFunction creator = [&]() -> Network::ClientConnectionPtr { - Network::ClientConnectionPtr conn = makeClientConnection(lookupPort("http")); - // Create proxy protocol line that has the fake upstream address as the destination address. - // This address will become the "restored" address for the server connection and will - // be used as the destination address by the original destination cluster. - std::string proxyLine = fmt::format( - "PROXY {} {} 65535 {}\r\n", - GetParam() == Network::Address::IpVersion::v4 ? "TCP4 1.2.3.4" : "TCP6 1:2:3::4", - Network::Test::getLoopbackAddressString(GetParam()), - fake_upstreams_[0]->localAddress()->ip()->port()); - - Buffer::OwnedImpl buf(proxyLine); - conn->write(buf, false); - return conn; - }; - - testRouterRequestAndResponseWithBody(1024, 512, false, false, &creator); -} - TEST_P(ProxyProtoIntegrationTest, ClusterProvided) { // Change the cluster to an original destination cluster. An original destination cluster // ignores the configured hosts, and instead uses the restored destination address from the From ba474ac375418d5529a30a9510a8ff5f0a5ada9a Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Mon, 19 Jul 2021 20:03:48 -0600 Subject: [PATCH 08/57] listener: make reuse port the default (#17259) 1) Deprecate existing reuse_port field 2) Add new enable_reuse_port field which uses a WKT 3) Make the new default hot restart aware so the default is not changed during hot restart. 4) Allow the default to be reverted using the "envoy.reloadable_features.listener_reuse_port_default_enabled" feature flag. 5) Change listener init so that almost all error handling occurs on the main thread. This a) vastly simplifies error handling and b) makes it so that we pre-create all sockets on the main thread and can use them all during hot restart. 6) Change hot restart to pass reuse port sockets by socket/worker index. This works around a race condition in which a draining listener has a new connection on its accept queue, but it's never accepted by the old process worker. It will be dropped. By passing all sockets (even reuse port sockets) we make sure the accept queue is fully processed. Fixes https://github.com/envoyproxy/envoy/issues/15794 Risk Level: High, scary stuff involving hot restart and listener init Testing: New and existing tests. It was very hard to get the tests to pass which gives me more confidence. Docs Changes: N/A Release Notes: Added Platform Specific Features: N/A Signed-off-by: Matt Klein --- api/envoy/config/listener/v3/listener.proto | 29 +- .../config/listener/v4alpha/listener.proto | 30 +- .../envoyproxy_io_proxy_http3_downstream.yaml | 1 - .../udp_filters/_include/udp-proxy.yaml | 1 - .../arch_overview/operations/hot_restart.rst | 15 + docs/root/version_history/current.rst | 13 + envoy/event/dispatcher.h | 5 +- envoy/network/connection_handler.h | 5 + envoy/network/exception.h | 10 +- envoy/network/listener.h | 26 +- envoy/server/hot_restart.h | 13 +- envoy/server/instance.h | 9 + envoy/server/listener_manager.h | 30 +- envoy/server/worker.h | 4 +- examples/udp/envoy.yaml | 1 - .../envoy/config/listener/v3/listener.proto | 29 +- .../config/listener/v4alpha/listener.proto | 30 +- source/common/event/dispatcher_impl.cc | 6 +- source/common/event/dispatcher_impl.h | 4 +- source/common/network/listen_socket_impl.cc | 10 +- source/common/network/listen_socket_impl.h | 9 +- source/common/network/tcp_listener_impl.cc | 30 +- source/common/network/tcp_listener_impl.h | 6 +- source/common/network/udp_listener_impl.cc | 7 - source/common/quic/active_quic_listener.cc | 87 ++- source/common/quic/active_quic_listener.h | 17 +- source/common/runtime/runtime_features.cc | 1 + source/docs/listener.md | 27 + .../server/active_raw_udp_listener_config.h | 3 +- source/server/active_tcp_listener.cc | 12 +- source/server/active_tcp_listener.h | 3 +- source/server/active_udp_listener.cc | 25 +- source/server/active_udp_listener.h | 3 +- source/server/admin/admin.cc | 4 +- source/server/admin/admin.h | 11 +- source/server/config_validation/dispatcher.cc | 3 +- source/server/config_validation/dispatcher.h | 2 +- source/server/config_validation/server.h | 6 +- source/server/connection_handler_impl.cc | 4 +- source/server/hot_restart.proto | 4 + source/server/hot_restart_impl.cc | 8 +- source/server/hot_restart_impl.h | 4 +- source/server/hot_restart_nop_impl.h | 6 +- source/server/hot_restarting_child.cc | 14 +- source/server/hot_restarting_child.h | 4 +- source/server/hot_restarting_parent.cc | 11 +- source/server/listener_impl.cc | 203 ++++--- source/server/listener_impl.h | 79 ++- source/server/listener_manager_impl.cc | 235 ++++---- source/server/listener_manager_impl.h | 18 +- source/server/server.cc | 26 +- source/server/server.h | 10 +- source/server/worker_impl.cc | 19 +- test/common/http/codec_client_test.cc | 7 +- test/common/network/connection_impl_test.cc | 25 +- test/common/network/dns_impl_test.cc | 6 +- test/common/network/listener_impl_test.cc | 81 +-- test/common/network/udp_fuzz.cc | 2 + test/common/network/udp_listener_impl_test.cc | 28 +- test/common/quic/active_quic_listener_test.cc | 23 +- test/config/integration/server.yaml | 2 +- test/config/utility.cc | 7 +- test/config/utility.h | 2 +- .../proxy_protocol_regression_test.cc | 6 +- .../proxy_protocol/proxy_protocol_test.cc | 12 +- .../dns_filter/dns_filter_integration_test.cc | 1 - .../udp_proxy/udp_proxy_integration_test.cc | 8 +- .../transport_sockets/tls/ssl_socket_test.cc | 107 ++-- test/integration/fake_upstream.cc | 13 +- test/integration/fake_upstream.h | 10 +- test/integration/hotrestart_test.sh | 15 +- test/integration/http_integration.cc | 2 +- test/integration/http_integration.h | 1 - test/integration/integration_test.cc | 26 +- .../listener_lds_integration_test.cc | 3 - .../integration/quic_http_integration_test.cc | 3 - test/integration/xds_integration_test.cc | 28 + test/mocks/event/mocks.h | 8 +- test/mocks/event/wrapped_dispatcher.h | 6 +- test/mocks/network/mocks.cc | 7 +- test/mocks/network/mocks.h | 7 +- test/mocks/server/hot_restart.cc | 3 + test/mocks/server/hot_restart.h | 5 +- test/mocks/server/instance.cc | 1 + test/mocks/server/instance.h | 1 + .../server/listener_component_factory.cc | 5 +- .../mocks/server/listener_component_factory.h | 4 +- test/mocks/server/worker.cc | 2 +- test/mocks/server/worker.h | 4 +- test/server/connection_handler_test.cc | 176 +++--- .../listener_manager_impl_quic_only_test.cc | 6 +- test/server/listener_manager_impl_test.cc | 522 +++++++++--------- test/server/listener_manager_impl_test.h | 36 +- test/server/server_test.cc | 2 + .../test_data/server/runtime_bootstrap.yaml | 1 + test/server/worker_impl_test.cc | 28 +- test/test_common/network_utility.h | 15 + 97 files changed, 1299 insertions(+), 1130 deletions(-) create mode 100644 source/docs/listener.md diff --git a/api/envoy/config/listener/v3/listener.proto b/api/envoy/config/listener/v3/listener.proto index b5bda9562cee8..a5cd4bfe976f7 100644 --- a/api/envoy/config/listener/v3/listener.proto +++ b/api/envoy/config/listener/v3/listener.proto @@ -35,7 +35,7 @@ message ListenerCollection { repeated xds.core.v3.CollectionEntry entries = 1; } -// [#next-free-field: 29] +// [#next-free-field: 30] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -255,17 +255,30 @@ message Listener { // enable the balance config in Y1 and Y2 to balance the connections among the workers. ConnectionBalanceConfig connection_balance_config = 20; + // Deprecated. Use `enable_reuse_port` instead. + bool reuse_port = 21 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; + // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + google.protobuf.BoolValue enable_reuse_port = 29; // Configuration for :ref:`access logs ` // emitted by this listener. diff --git a/api/envoy/config/listener/v4alpha/listener.proto b/api/envoy/config/listener/v4alpha/listener.proto index b4c353db9f267..e26160cb2a4ae 100644 --- a/api/envoy/config/listener/v4alpha/listener.proto +++ b/api/envoy/config/listener/v4alpha/listener.proto @@ -37,7 +37,7 @@ message ListenerCollection { repeated xds.core.v3.CollectionEntry entries = 1; } -// [#next-free-field: 29] +// [#next-free-field: 30] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -97,9 +97,9 @@ message Listener { "envoy.config.listener.v3.Listener.InternalListenerConfig"; } - reserved 14, 23, 7; + reserved 14, 23, 7, 21; - reserved "deprecated_v1"; + reserved "deprecated_v1", "reuse_port"; // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically @@ -260,14 +260,24 @@ message Listener { // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. + // + // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + google.protobuf.BoolValue enable_reuse_port = 29; // Configuration for :ref:`access logs ` // emitted by this listener. diff --git a/configs/envoyproxy_io_proxy_http3_downstream.yaml b/configs/envoyproxy_io_proxy_http3_downstream.yaml index bb643dda3573e..a42ae5010a45e 100644 --- a/configs/envoyproxy_io_proxy_http3_downstream.yaml +++ b/configs/envoyproxy_io_proxy_http3_downstream.yaml @@ -56,7 +56,6 @@ static_resources: protocol: UDP address: 0.0.0.0 port_value: 10000 - reuse_port: true udp_listener_config: quic_options: {} downstream_socket_config: diff --git a/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml b/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml index bcffbb1404df5..0bb16425f370b 100644 --- a/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml +++ b/docs/root/configuration/listeners/udp_filters/_include/udp-proxy.yaml @@ -7,7 +7,6 @@ admin: static_resources: listeners: - name: listener_0 - reuse_port: true address: socket_address: protocol: UDP diff --git a/docs/root/intro/arch_overview/operations/hot_restart.rst b/docs/root/intro/arch_overview/operations/hot_restart.rst index 25b5ef29a38f3..9802609fca2a7 100644 --- a/docs/root/intro/arch_overview/operations/hot_restart.rst +++ b/docs/root/intro/arch_overview/operations/hot_restart.rst @@ -37,3 +37,18 @@ independently. .. note:: This feature is not supported on Windows. + +Socket handling +--------------- + +By default, Envoy uses :ref:`reuse_port +` sockets on Linux for better +performance. This feature workers correctly during hot restart because Envoy passes each socket +to the new process by worker index. Thus, no connections are dropped in the accept queues of +the draining process. + +.. attention:: + + In the uncommon case in which concurrency changes during hot restart, no connections will be + dropped if concurrency increases. However, if concurrency decreases some connections may be + dropped in the accept queues of the old process workers. diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index cc6faa5ec0913..ef1983f2996d7 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -14,6 +14,16 @@ Minor Behavior Changes which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Setting the `envoy.http.headermap.lazy_map_min_size` runtime feature to a non-negative number will override the default value. +* listener: added the :ref:`enable_reuse_port ` + field and changed the default for reuse_port from false to true, as the feature is now well + supported on the majority of production Linux kernels in use. The default change is aware of hot + restart, as otherwise the change would not be backwards compatible between restarts. This means + that hot restarting on to a new binary will retain the default of false until the binary undergoes + a full restart. To retain the previous behavior, either explicitly set the new configuration + field to false, or set the runtime feature flag `envoy.reloadable_features.listener_reuse_port_default_enabled` + to false. As part of this change, the use of reuse_port for TCP listeners on both macOS and + Windows has been disabled due to suboptimal behavior. See the field documentation for more + information. Bug Fixes --------- @@ -38,4 +48,7 @@ Deprecated * http: the HeaderMatcher fields :ref:`exact_match `, :ref:`safe_regex_match `, :ref:`prefix_match `, :ref:`suffix_match ` and :ref:`contains_match ` are deprecated by :ref:`string_match `. +* listener: :ref:`reuse_port ` has been + deprecated in favor of :ref:`enable_reuse_port `. + At the same time, the default has been changed from false to true. See above for more information. diff --git a/envoy/event/dispatcher.h b/envoy/event/dispatcher.h index 95220830c5567..9b887a0c4b268 100644 --- a/envoy/event/dispatcher.h +++ b/envoy/event/dispatcher.h @@ -241,12 +241,11 @@ class Dispatcher : public DispatcherBase, public ScopeTracker { * @param socket supplies the socket to listen on. * @param cb supplies the callbacks to invoke for listener events. * @param bind_to_port controls whether the listener binds to a transport port or not. - * @param backlog_size controls listener pending connections backlog * @return Network::ListenerPtr a new listener that is owned by the caller. */ virtual Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, - Network::TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t backlog_size) PURE; + Network::TcpListenerCallbacks& cb, + bool bind_to_port) PURE; /** * Creates a logical udp listener on a specific port. diff --git a/envoy/network/connection_handler.h b/envoy/network/connection_handler.h index e5c48a359ed6a..beabccddf8cf2 100644 --- a/envoy/network/connection_handler.h +++ b/envoy/network/connection_handler.h @@ -218,6 +218,11 @@ class ActiveUdpListenerFactory { * @return true if the UDP passing through listener doesn't form stateful connections. */ virtual bool isTransportConnectionless() const PURE; + + /** + * @return socket options specific to this factory that should be applied to all sockets. + */ + virtual const Network::Socket::OptionsSharedPtr& socketOptions() const PURE; }; using ActiveUdpListenerFactoryPtr = std::unique_ptr; diff --git a/envoy/network/exception.h b/envoy/network/exception.h index 45fef6b2a3326..7226216f499e2 100644 --- a/envoy/network/exception.h +++ b/envoy/network/exception.h @@ -6,20 +6,20 @@ namespace Envoy { namespace Network { /** - * Thrown when there is a runtime error creating/binding a listener. + * Thrown when a socket option cannot be applied. */ -class CreateListenerException : public EnvoyException { +class SocketOptionException : public EnvoyException { public: - CreateListenerException(const std::string& what) : EnvoyException(what) {} + SocketOptionException(const std::string& what) : EnvoyException(what) {} }; /** * Thrown when there is a runtime error binding a socket. */ -class SocketBindException : public CreateListenerException { +class SocketBindException : public EnvoyException { public: SocketBindException(const std::string& what, int error_number) - : CreateListenerException(what), error_number_(error_number) {} + : EnvoyException(what), error_number_(error_number) {} // This can't be called errno because otherwise the standard errno macro expansion replaces it. int errorNumber() const { return error_number_; } diff --git a/envoy/network/listener.h b/envoy/network/listener.h index d4377278d59a2..ad2b69a6c7502 100644 --- a/envoy/network/listener.h +++ b/envoy/network/listener.h @@ -25,6 +25,9 @@ namespace Network { class ActiveUdpListenerFactory; class UdpListenerWorkerRouter; +class ListenSocketFactory; +using ListenSocketFactoryPtr = std::unique_ptr; + /** * ListenSocketFactory is a member of ListenConfig to provide listen socket. * Listeners created from the same ListenConfig instance have listening sockets @@ -36,10 +39,12 @@ class ListenSocketFactory { /** * Called during actual listener creation. + * @param worker_index supplies the worker index to get the socket for. All sockets are created + * ahead of time. * @return the socket to be used for a certain listener, which might be shared * with other listeners of the same config on other worker threads. */ - virtual SocketSharedPtr getListenSocket() PURE; + virtual SocketSharedPtr getListenSocket(uint32_t worker_index) PURE; /** * @return the type of the socket getListenSocket() returns. @@ -53,12 +58,23 @@ class ListenSocketFactory { virtual const Address::InstanceConstSharedPtr& localAddress() const PURE; /** - * @return the socket shared by worker threads if any; otherwise return null. + * Clone this socket factory so it can be used by a new listener (e.g., if the address is shared). */ - virtual SocketOptRef sharedSocket() const PURE; -}; + virtual ListenSocketFactoryPtr clone() const PURE; + + /** + * Close all sockets. This is used during draining scenarios. + */ + virtual void closeAllSockets() PURE; -using ListenSocketFactorySharedPtr = std::shared_ptr; + /** + * Perform any initialization that must occur immediately prior to using the listen socket on + * workers. For example, the actual listen() call, post listen socket options, etc. This is done + * so that all error handling can occur on the main thread and the gap between performing these + * actions and using the socket is minimized. + */ + virtual void doFinalPreWorkerInit() PURE; +}; /** * Configuration for a UDP listener. diff --git a/envoy/server/hot_restart.h b/envoy/server/hot_restart.h index 7525fa6a0f4fb..aecf84a0c1b69 100644 --- a/envoy/server/hot_restart.h +++ b/envoy/server/hot_restart.h @@ -28,6 +28,11 @@ class HotRestart { uint64_t parent_connections_ = 0; }; + struct AdminShutdownResponse { + time_t original_start_time_; + bool enable_reuse_port_default_; + }; + virtual ~HotRestart() = default; /** @@ -40,9 +45,11 @@ class HotRestart { * Retrieve a listening socket on the specified address from the parent process. The socket will * be duplicated across process boundaries. * @param address supplies the address of the socket to duplicate, e.g. tcp://127.0.0.1:5000. + * @param worker_index supplies the socket/worker index to fetch. When using reuse_port sockets + * each socket is fetched individually to ensure no connection loss. * @return int the fd or -1 if there is no bound listen port in the parent. */ - virtual int duplicateParentListenSocket(const std::string& address) PURE; + virtual int duplicateParentListenSocket(const std::string& address, uint32_t worker_index) PURE; /** * Initialize the parent logic of our restarter. Meant to be called after initialization of a @@ -54,9 +61,9 @@ class HotRestart { /** * Shutdown admin processing in the parent process if applicable. This allows admin processing * to start up in the new process. - * @param original_start_time will be filled with information from our parent, if retrieved. + * @return response if the parent is alive. */ - virtual void sendParentAdminShutdownRequest(time_t& original_start_time) PURE; + virtual absl::optional sendParentAdminShutdownRequest() PURE; /** * Tell our parent process to gracefully terminate itself. diff --git a/envoy/server/instance.h b/envoy/server/instance.h index 90886faf0013f..880aa4889b376 100644 --- a/envoy/server/instance.h +++ b/envoy/server/instance.h @@ -260,6 +260,15 @@ class Instance { */ virtual void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) PURE; + + /** + * Return the default for whether reuse_port is enabled or not. This was added as part of + * fixing https://github.com/envoyproxy/envoy/issues/15794. It is required to know what the + * default was of parent processes during hot restart was, because otherwise switching the + * default on the fly will break existing deployments. + * TODO(mattklein123): This can be removed when version 1.20.0 is no longer supported. + */ + virtual bool enableReusePortDefault() PURE; }; } // namespace Server diff --git a/envoy/server/listener_manager.h b/envoy/server/listener_manager.h index 5fa8c2503e67a..329afe168e643 100644 --- a/envoy/server/listener_manager.h +++ b/envoy/server/listener_manager.h @@ -34,20 +34,6 @@ class LdsApi { using LdsApiPtr = std::unique_ptr; -struct ListenSocketCreationParams { - ListenSocketCreationParams(bool bind_to_port, bool duplicate_parent_socket = true) - : bind_to_port(bind_to_port), duplicate_parent_socket(duplicate_parent_socket) {} - - // For testing. - bool operator==(const ListenSocketCreationParams& rhs) const; - bool operator!=(const ListenSocketCreationParams& rhs) const; - - // whether to actually bind the socket. - bool bind_to_port; - // whether to duplicate socket from hot restart parent. - bool duplicate_parent_socket; -}; - /** * Factory for creating listener components. */ @@ -63,19 +49,29 @@ class ListenerComponentFactory { virtual LdsApiPtr createLdsApi(const envoy::config::core::v3::ConfigSource& lds_config, const xds::core::v3::ResourceLocator* lds_resources_locator) PURE; + enum class BindType { + // The listener will not bind. + NoBind, + // The listener will bind a socket shared by all workers. + NoReusePort, + // The listener will use reuse_port sockets independently on each worker. + ReusePort + }; + /** * Creates a socket. * @param address supplies the socket's address. * @param socket_type the type of socket (stream or datagram) to create. * @param options to be set on the created socket just before calling 'bind()'. - * @param params used to control how a socket being created. + * @param bind_type supplies the bind type of the listen socket. + * @param worker_index supplies the socket/worker index of the new socket. * @return Network::SocketSharedPtr an initialized and potentially bound socket. */ virtual Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) PURE; + const Network::Socket::OptionsSharedPtr& options, BindType bind_type, + uint32_t worker_index) PURE; /** * Creates a list of filter factories. diff --git a/envoy/server/worker.h b/envoy/server/worker.h index 911cde97f9f90..36701a8eac326 100644 --- a/envoy/server/worker.h +++ b/envoy/server/worker.h @@ -19,10 +19,8 @@ class Worker { /** * Completion called when a listener has been added on a worker and is listening for new * connections. - * @param success supplies whether the addition was successful or not. FALSE can be returned - * when there is a race condition between bind() and listen(). */ - using AddListenerCompletion = std::function; + using AddListenerCompletion = std::function; /** * Add a listener to the worker and replace the previous listener if any. If the previous listener diff --git a/examples/udp/envoy.yaml b/examples/udp/envoy.yaml index 9937bba45f1f9..e516eff5fd94b 100644 --- a/examples/udp/envoy.yaml +++ b/examples/udp/envoy.yaml @@ -1,7 +1,6 @@ static_resources: listeners: - name: listener_0 - reuse_port: true address: socket_address: protocol: UDP diff --git a/generated_api_shadow/envoy/config/listener/v3/listener.proto b/generated_api_shadow/envoy/config/listener/v3/listener.proto index b5bda9562cee8..a5cd4bfe976f7 100644 --- a/generated_api_shadow/envoy/config/listener/v3/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/listener.proto @@ -35,7 +35,7 @@ message ListenerCollection { repeated xds.core.v3.CollectionEntry entries = 1; } -// [#next-free-field: 29] +// [#next-free-field: 30] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; @@ -255,17 +255,30 @@ message Listener { // enable the balance config in Y1 and Y2 to balance the connections among the workers. ConnectionBalanceConfig connection_balance_config = 20; + // Deprecated. Use `enable_reuse_port` instead. + bool reuse_port = 21 [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; + // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + google.protobuf.BoolValue enable_reuse_port = 29; // Configuration for :ref:`access logs ` // emitted by this listener. diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto index 7618d5a229f4f..ccd900b6f4d34 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/listener.proto @@ -38,7 +38,7 @@ message ListenerCollection { repeated xds.core.v3.CollectionEntry entries = 1; } -// [#next-free-field: 29] +// [#next-free-field: 30] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; @@ -260,17 +260,31 @@ message Listener { // enable the balance config in Y1 and Y2 to balance the connections among the workers. ConnectionBalanceConfig connection_balance_config = 20; + // Deprecated. Use `enable_reuse_port` instead. + bool hidden_envoy_deprecated_reuse_port = 21 + [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; + // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number - // of connections. When this flag is set to false, all worker threads share one socket. + // of connections. When this flag is set to false, all worker threads share one socket. This field + // defaults to true. + // + // .. attention:: + // + // Although this field defaults to true, it has different behavior on different platforms. See + // the following text for more information. // - // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart - // (see `3rd paragraph in 'soreuseport' commit message - // `_). - // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket - // `_. - bool reuse_port = 21; + // * On Linux, reuse_port is respected for both TCP and UDP listeners. It also works correctly + // with hot restart. + // * On macOS, reuse_port for TCP does not do what it does on Linux. Instead of load balancing, + // the last socket wins and receives all connections/packets. For TCP, reuse_port is force + // disabled and the user is warned. For UDP, it is enabled, but only one worker will receive + // packets. For QUIC/H3, SW routing will send packets to other workers. For "raw" UDP, only + // a single worker will currently receive packets. + // * On Windows, reuse_port for TCP has undefined behavior. It is force disabled and the user + // is warned similar to macOS. It is left enabled for UDP with undefined behavior currently. + google.protobuf.BoolValue enable_reuse_port = 29; // Configuration for :ref:`access logs ` // emitted by this listener. diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index f12399d2df666..c0c8118895872 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -198,10 +198,10 @@ Filesystem::WatcherPtr DispatcherImpl::createFilesystemWatcher() { Network::ListenerPtr DispatcherImpl::createListener(Network::SocketSharedPtr&& socket, Network::TcpListenerCallbacks& cb, - bool bind_to_port, uint32_t backlog_size) { + bool bind_to_port) { ASSERT(isThreadSafe()); - return std::make_unique( - *this, api_.randomGenerator(), std::move(socket), cb, bind_to_port, backlog_size); + return std::make_unique(*this, api_.randomGenerator(), + std::move(socket), cb, bind_to_port); } Network::UdpListenerPtr diff --git a/source/common/event/dispatcher_impl.h b/source/common/event/dispatcher_impl.h index 601bc206c1c31..d98b5267c1e5c 100644 --- a/source/common/event/dispatcher_impl.h +++ b/source/common/event/dispatcher_impl.h @@ -73,8 +73,8 @@ class DispatcherImpl : Logger::Loggable, uint32_t events) override; Filesystem::WatcherPtr createFilesystemWatcher() override; Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, - Network::TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t backlog_size) override; + Network::TcpListenerCallbacks& cb, + bool bind_to_port) override; Network::UdpListenerPtr createUdpListener(Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb, const envoy::config::core::v3::UdpSocketConfig& config) override; diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 6f00446460ee0..283ef3fac76de 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -35,17 +35,13 @@ Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstShar void ListenSocketImpl::setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options) { if (!Network::Socket::applyOptions(options, *this, envoy::config::core::v3::SocketOption::STATE_PREBIND)) { - throw CreateListenerException("ListenSocket: Setting socket options failed"); + throw SocketOptionException("ListenSocket: Setting socket options failed"); } } -void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& options, - bool bind_to_port) { +void ListenSocketImpl::setupSocket(const Network::Socket::OptionsSharedPtr& options) { setListenSocketOptions(options); - - if (bind_to_port) { - bind(address_provider_->localAddress()); - } + bind(address_provider_->localAddress()); } UdsListenSocket::UdsListenSocket(const Address::InstanceConstSharedPtr& address) diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index a237e2d5b7e4d..b3f10a13c14d7 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -30,7 +30,7 @@ class ListenSocketImpl : public SocketImpl { address_provider_->localAddress())); } - void setupSocket(const Network::Socket::OptionsSharedPtr& options, bool bind_to_port); + void setupSocket(const Network::Socket::OptionsSharedPtr& options); void setListenSocketOptions(const Network::Socket::OptionsSharedPtr& options); Api::SysCallIntResult bind(Network::Address::InstanceConstSharedPtr address) override; }; @@ -58,18 +58,17 @@ template class NetworkListenSocket : public ListenSocketImpl { if (bind_to_port) { RELEASE_ASSERT(io_handle_->isOpen(), ""); setPrebindSocketOptions(); + setupSocket(options); } else { // If the tcp listener does not bind to port, we test that the ip family is supported. if (auto ip = address->ip(); ip != nullptr) { RELEASE_ASSERT( Network::SocketInterfaceSingleton::get().ipFamilySupported(ip->ipv4() ? AF_INET : AF_INET6), - fmt::format( - "Creating listen socket address {} but the address familiy is not supported", - address->asStringView())); + fmt::format("Creating listen socket address {} but the address family is not supported", + address->asStringView())); } } - setupSocket(options, bind_to_port); } NetworkListenSocket(IoHandlePtr&& io_handle, const Address::InstanceConstSharedPtr& address, diff --git a/source/common/network/tcp_listener_impl.cc b/source/common/network/tcp_listener_impl.cc index 8760e5f65eb84..a3f79f72df6b6 100644 --- a/source/common/network/tcp_listener_impl.cc +++ b/source/common/network/tcp_listener_impl.cc @@ -96,31 +96,17 @@ void TcpListenerImpl::onSocketEvent(short flags) { } } -void TcpListenerImpl::setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket) { - ASSERT(bind_to_port_); - - socket.ioHandle().listen(backlog_size_); - - // Although onSocketEvent drains to completion, use level triggered mode to avoid potential - // loss of the trigger due to transient accept errors. - socket.ioHandle().initializeFileEvent( - dispatcher, [this](uint32_t events) -> void { onSocketEvent(events); }, - Event::FileTriggerType::Level, Event::FileReadyType::Read); - - if (!Network::Socket::applyOptions(socket.options(), socket, - envoy::config::core::v3::SocketOption::STATE_LISTENING)) { - throw CreateListenerException(fmt::format("cannot set post-listen socket option on socket: {}", - socket.addressProvider().localAddress()->asString())); - } -} - TcpListenerImpl::TcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random, SocketSharedPtr socket, TcpListenerCallbacks& cb, - bool bind_to_port, uint32_t backlog_size) - : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), backlog_size_(backlog_size), - random_(random), bind_to_port_(bind_to_port), reject_fraction_(0.0) { + bool bind_to_port) + : BaseListenerImpl(dispatcher, std::move(socket)), cb_(cb), random_(random), + bind_to_port_(bind_to_port), reject_fraction_(0.0) { if (bind_to_port) { - setupServerSocket(dispatcher, *socket_); + // Although onSocketEvent drains to completion, use level triggered mode to avoid potential + // loss of the trigger due to transient accept errors. + socket_->ioHandle().initializeFileEvent( + dispatcher, [this](uint32_t events) -> void { onSocketEvent(events); }, + Event::FileTriggerType::Level, Event::FileReadyType::Read); } } diff --git a/source/common/network/tcp_listener_impl.h b/source/common/network/tcp_listener_impl.h index bf5826038c844..27ecfeec949b6 100644 --- a/source/common/network/tcp_listener_impl.h +++ b/source/common/network/tcp_listener_impl.h @@ -17,8 +17,7 @@ namespace Network { class TcpListenerImpl : public BaseListenerImpl { public: TcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random, - SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t backlog_size); + SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port); ~TcpListenerImpl() override { if (bind_to_port_) { socket_->ioHandle().resetFileEvents(); @@ -31,10 +30,7 @@ class TcpListenerImpl : public BaseListenerImpl { static const absl::string_view GlobalMaxCxRuntimeKey; protected: - void setupServerSocket(Event::DispatcherImpl& dispatcher, Socket& socket); - TcpListenerCallbacks& cb_; - const uint32_t backlog_size_; private: void onSocketEvent(short flags); diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index 44179ec4b0e52..d21ebb0add8a8 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -37,13 +37,6 @@ UdpListenerImpl::UdpListenerImpl(Event::DispatcherImpl& dispatcher, SocketShared socket_->ioHandle().initializeFileEvent( dispatcher, [this](uint32_t events) -> void { onSocketEvent(events); }, Event::PlatformDefaultTriggerType, Event::FileReadyType::Read | Event::FileReadyType::Write); - - if (!Network::Socket::applyOptions(socket_->options(), *socket_, - envoy::config::core::v3::SocketOption::STATE_BOUND)) { - throw CreateListenerException( - fmt::format("cannot set post-bound socket option on socket: {}", - socket_->addressProvider().localAddress()->asString())); - } } UdpListenerImpl::~UdpListenerImpl() { socket_->ioHandle().resetFileEvents(); } diff --git a/source/common/quic/active_quic_listener.cc b/source/common/quic/active_quic_listener.cc index f8389c5a54b64..840224c2a4829 100644 --- a/source/common/quic/active_quic_listener.cc +++ b/source/common/quic/active_quic_listener.cc @@ -1,13 +1,12 @@ #include "source/common/quic/active_quic_listener.h" -#include "envoy/network/exception.h" - -#if defined(__linux__) -#include -#endif - #include +#include "envoy/extensions/quic/crypto_stream/v3/crypto_stream.pb.h" +#include "envoy/extensions/quic/proof_source/v3/proof_source.pb.h" +#include "envoy/network/exception.h" + +#include "source/common/config/utility.h" #include "source/common/http/utility.h" #include "source/common/network/socket_option_impl.h" #include "source/common/quic/envoy_quic_alarm_factory.h" @@ -16,12 +15,8 @@ #include "source/common/quic/envoy_quic_packet_writer.h" #include "source/common/quic/envoy_quic_proof_source.h" #include "source/common/quic/envoy_quic_utils.h" -#include "source/common/quic/envoy_quic_utils.h" -#include "source/common/config/utility.h" #include "source/common/quic/quic_network_connection.h" #include "source/common/runtime/runtime_features.h" -#include "envoy/extensions/quic/crypto_stream/v3/crypto_stream.pb.h" -#include "envoy/extensions/quic/proof_source/v3/proof_source.pb.h" namespace Envoy { namespace Quic { @@ -29,14 +24,14 @@ namespace Quic { ActiveQuicListener::ActiveQuicListener( uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, Network::UdpConnectionHandler& parent, Network::ListenerConfig& listener_config, - const quic::QuicConfig& quic_config, Network::Socket::OptionsSharedPtr options, - bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled, - QuicStatNames& quic_stat_names, uint32_t packets_received_to_connection_count_ratio, + const quic::QuicConfig& quic_config, bool kernel_worker_routing, + const envoy::config::core::v3::RuntimeFeatureFlag& enabled, QuicStatNames& quic_stat_names, + uint32_t packets_received_to_connection_count_ratio, EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, EnvoyQuicProofSourceFactoryInterface& proof_source_factory) : ActiveQuicListener(worker_index, concurrency, dispatcher, parent, - listener_config.listenSocketFactory().getListenSocket(), listener_config, - quic_config, std::move(options), kernel_worker_routing, enabled, + listener_config.listenSocketFactory().getListenSocket(worker_index), + listener_config, quic_config, kernel_worker_routing, enabled, quic_stat_names, packets_received_to_connection_count_ratio, crypto_server_stream_factory, proof_source_factory) {} @@ -44,9 +39,8 @@ ActiveQuicListener::ActiveQuicListener( uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, Network::UdpConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing, - const envoy::config::core::v3::RuntimeFeatureFlag& enabled, QuicStatNames& quic_stat_names, - uint32_t packets_to_read_to_connection_count_ratio, + bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled, + QuicStatNames& quic_stat_names, uint32_t packets_to_read_to_connection_count_ratio, EnvoyQuicCryptoServerStreamFactoryInterface& crypto_server_stream_factory, EnvoyQuicProofSourceFactoryInterface& proof_source_factory) : Server::ActiveUdpListenerBase( @@ -68,17 +62,6 @@ ActiveQuicListener::ActiveQuicListener( if (Runtime::LoaderSingleton::getExisting()) { enabled_.emplace(Runtime::FeatureFlag(enabled, Runtime::LoaderSingleton::get())); } - if (options != nullptr) { - const bool ok = Network::Socket::applyOptions( - options, listen_socket_, envoy::config::core::v3::SocketOption::STATE_BOUND); - if (!ok) { - // TODO(fcoras): consider removing the fd from the log message - ENVOY_LOG(warn, "Failed to apply socket options to socket {} on listener {} after binding", - listen_socket_.ioHandle().fdDoNotUse(), listener_config.name()); - throw Network::CreateListenerException("Failed to apply socket options."); - } - listen_socket_.addOptions(options); - } quic::QuicRandom* const random = quic::QuicRandom::GetInstance(); random->RandBytes(random_seed_, sizeof(random_seed_)); @@ -285,13 +268,6 @@ ActiveQuicListenerFactory::ActiveQuicListenerFactory( } proof_source_factory_ = Config::Utility::getAndCheckFactory( proof_source_config); -} - -Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::createActiveUdpListener( - uint32_t worker_index, Network::UdpConnectionHandler& parent, Event::Dispatcher& disptacher, - Network::ListenerConfig& config) { - bool kernel_worker_routing = false; - std::unique_ptr options = std::make_unique(); #if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) // This BPF filter reads the 1st word of QUIC connection id in the UDP payload and mods it by the @@ -306,7 +282,7 @@ Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::crea // Any packet that doesn't belong to any of the three packet header types are dispatched // based on 5-tuple source/destination addresses. // SPELLCHECKER(off) - std::vector filter = { + filter_ = { {0x80, 0, 0, 0000000000}, // ld len {0x35, 0, 9, 0x00000009}, // jlt #0x9, packet_too_short {0x30, 0, 0, 0000000000}, // ldb [0] @@ -324,24 +300,21 @@ Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::crea {0x16, 0, 0, 0000000000}, // ret a }; // SPELLCHECKER(on) - sock_fprog prog; - // This option only needs to be applied once to any one of the sockets in SO_REUSEPORT socket - // group. One of the listener will be created with this socket option. if (Runtime::runtimeFeatureEnabled( "envoy.reloadable_features.prefer_quic_kernel_bpf_packet_routing")) { - absl::call_once(install_bpf_once_, [&]() { - if (concurrency_ > 1) { - prog.len = filter.size(); - prog.filter = filter.data(); - options->push_back(std::make_shared( - envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_ATTACH_REUSEPORT_CBPF, - absl::string_view(reinterpret_cast(&prog), sizeof(prog)))); - } else { - ENVOY_LOG(info, "Not applying BPF because concurrency is 1"); - } - }); - - kernel_worker_routing = true; + if (concurrency_ > 1) { + // Note that this option refers to the BPF program data above, which must live until the + // option is used. The program is kept as a member variable for this purpose. + prog_.len = filter_.size(); + prog_.filter = filter_.data(); + options_->push_back(std::make_shared( + envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_ATTACH_REUSEPORT_CBPF, + absl::string_view(reinterpret_cast(&prog_), sizeof(prog_)))); + } else { + ENVOY_LOG(info, "Not applying BPF because concurrency is 1"); + } + + kernel_worker_routing_ = true; }; #else @@ -350,11 +323,15 @@ Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::crea "not implemented by Envoy on this platform. QUIC performance may be degraded."); } #endif +} +Network::ConnectionHandler::ActiveUdpListenerPtr ActiveQuicListenerFactory::createActiveUdpListener( + uint32_t worker_index, Network::UdpConnectionHandler& parent, Event::Dispatcher& disptacher, + Network::ListenerConfig& config) { ASSERT(crypto_server_stream_factory_.has_value()); return std::make_unique( - worker_index, concurrency_, disptacher, parent, config, quic_config_, std::move(options), - kernel_worker_routing, enabled_, quic_stat_names_, packets_to_read_to_connection_count_ratio_, + worker_index, concurrency_, disptacher, parent, config, quic_config_, kernel_worker_routing_, + enabled_, quic_stat_names_, packets_to_read_to_connection_count_ratio_, crypto_server_stream_factory_.value(), proof_source_factory_.value()); } diff --git a/source/common/quic/active_quic_listener.h b/source/common/quic/active_quic_listener.h index 39ff028a22fd2..d4c558a3728d0 100644 --- a/source/common/quic/active_quic_listener.h +++ b/source/common/quic/active_quic_listener.h @@ -13,6 +13,10 @@ #include "source/server/active_udp_listener.h" #include "source/server/connection_handler_impl.h" +#if defined(__linux__) +#include +#endif + namespace Envoy { namespace Quic { @@ -27,7 +31,7 @@ class ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase, ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, Network::UdpConnectionHandler& parent, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing, + bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled, QuicStatNames& quic_stat_names, uint32_t packets_to_read_to_connection_count_ratio, @@ -37,7 +41,7 @@ class ActiveQuicListener : public Envoy::Server::ActiveUdpListenerBase, ActiveQuicListener(uint32_t worker_index, uint32_t concurrency, Event::Dispatcher& dispatcher, Network::UdpConnectionHandler& parent, Network::SocketSharedPtr listen_socket, Network::ListenerConfig& listener_config, const quic::QuicConfig& quic_config, - Network::Socket::OptionsSharedPtr options, bool kernel_worker_routing, + bool kernel_worker_routing, const envoy::config::core::v3::RuntimeFeatureFlag& enabled, QuicStatNames& quic_stat_names, uint32_t packets_to_read_to_connection_count_ratio, @@ -100,6 +104,7 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, createActiveUdpListener(uint32_t worker_index, Network::UdpConnectionHandler& parent, Event::Dispatcher& disptacher, Network::ListenerConfig& config) override; bool isTransportConnectionless() const override { return false; } + const Network::Socket::OptionsSharedPtr& socketOptions() const override { return options_; } private: friend class ActiveQuicListenerFactoryPeer; @@ -110,10 +115,16 @@ class ActiveQuicListenerFactory : public Network::ActiveUdpListenerFactory, proof_source_factory_; quic::QuicConfig quic_config_; const uint32_t concurrency_; - absl::once_flag install_bpf_once_; envoy::config::core::v3::RuntimeFeatureFlag enabled_; QuicStatNames& quic_stat_names_; const uint32_t packets_to_read_to_connection_count_ratio_; + const Network::Socket::OptionsSharedPtr options_{std::make_shared()}; + bool kernel_worker_routing_{}; + +#if defined(SO_ATTACH_REUSEPORT_CBPF) && defined(__linux__) + sock_fprog prog_; + std::vector filter_; +#endif }; } // namespace Quic diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 9d7782377a43b..f497453df0cb9 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -73,6 +73,7 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.http_transport_failure_reason_in_body", "envoy.reloadable_features.improved_stream_limit_handling", "envoy.reloadable_features.internal_redirects_with_body", + "envoy.reloadable_features.listener_reuse_port_default_enabled", "envoy.reloadable_features.listener_wildcard_match_ip_family", "envoy.reloadable_features.new_tcp_connection_pool", "envoy.reloadable_features.no_chunked_encoding_header_for_304", diff --git a/source/docs/listener.md b/source/docs/listener.md new file mode 100644 index 0000000000000..0815c82957745 --- /dev/null +++ b/source/docs/listener.md @@ -0,0 +1,27 @@ +# Listener implementation + +## Socket handling + +Envoy uses the following procedure for creating sockets and assigning them to workers. + +1. When a listener is created, a socket is pre-created for every worker on the main thread. This + allows most errors to be caught early on in the listener creation process (e.g., bad socket + option, unable to bind, etc.). + * If using reuse_port, a unique socket is created for every worker. + * If not using reuse_port, a unique socket is created for worker 0, and then that socket + is duplicated for all other workers. +2. In the case of a listener update in which the address/socket is to be shared (i.e., a named + listener that has configuration settings changed but listens on the same address will share + sockets with the draining listener), all sockets are duplicated regardless of whether using + reuse_port or not. This keeps the logic equivalent, and also makes sure that when using + reuse_port no TCP connections are dropped during the drain process (in the race condition between + assigning a queue and closing the listening socket on the old listener). +3. In the case of hot restart, all sockets are requested by worker index. If the old process has + a match, the socket is duplicated and sent to the new process. Thus, hot restart and a listener + update effectively share the same update process. This is so that no connections are dropped + when using reuse_port. +4. A consequence of every listener having dedicated sockets (whether duplicated or not) is that + a listener can close() its sockets when removed without concern for other listeners. + +For more information, see `ListenSocketFactoryImpl` and its use within `ListenerImpl` and +`ListenerManagerImpl`. diff --git a/source/server/active_raw_udp_listener_config.h b/source/server/active_raw_udp_listener_config.h index b24b08897685f..7b9bcf890acfa 100644 --- a/source/server/active_raw_udp_listener_config.h +++ b/source/server/active_raw_udp_listener_config.h @@ -12,11 +12,12 @@ class ActiveRawUdpListenerFactory : public Network::ActiveUdpListenerFactory { Network::ConnectionHandler::ActiveUdpListenerPtr createActiveUdpListener(uint32_t worker_index, Network::UdpConnectionHandler& parent, Event::Dispatcher& disptacher, Network::ListenerConfig& config) override; - bool isTransportConnectionless() const override { return true; } + const Network::Socket::OptionsSharedPtr& socketOptions() const override { return options_; } private: const uint32_t concurrency_; + const Network::Socket::OptionsSharedPtr options_{std::make_shared()}; }; } // namespace Server diff --git a/source/server/active_tcp_listener.cc b/source/server/active_tcp_listener.cc index d48ae15f26244..bed9854998b76 100644 --- a/source/server/active_tcp_listener.cc +++ b/source/server/active_tcp_listener.cc @@ -26,12 +26,12 @@ void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_in } // namespace ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, - Network::ListenerConfig& config) - : ActiveTcpListener( - parent, - parent.dispatcher().createListener(config.listenSocketFactory().getListenSocket(), *this, - config.bindToPort(), config.tcpBacklogSize()), - config) {} + Network::ListenerConfig& config, uint32_t worker_index) + : ActiveTcpListener(parent, + parent.dispatcher().createListener( + config.listenSocketFactory().getListenSocket(worker_index), *this, + config.bindToPort()), + config) {} ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerPtr&& listener, diff --git a/source/server/active_tcp_listener.h b/source/server/active_tcp_listener.h index 469e1677c999e..9898f3ae45909 100644 --- a/source/server/active_tcp_listener.h +++ b/source/server/active_tcp_listener.h @@ -34,7 +34,8 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, public Network::BalancedConnectionHandler, Logger::Loggable { public: - ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config); + ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config, + uint32_t worker_index); ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); ~ActiveTcpListener() override; diff --git a/source/server/active_udp_listener.cc b/source/server/active_udp_listener.cc index c20d2809aa07f..9cb3fc4d4bd81 100644 --- a/source/server/active_udp_listener.cc +++ b/source/server/active_udp_listener.cc @@ -68,7 +68,8 @@ ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concu Event::Dispatcher& dispatcher, Network::ListenerConfig& config) : ActiveRawUdpListener(worker_index, concurrency, parent, - config.listenSocketFactory().getListenSocket(), dispatcher, config) {} + config.listenSocketFactory().getListenSocket(worker_index), dispatcher, + config) {} ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, Network::UdpConnectionHandler& parent, @@ -96,16 +97,14 @@ ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concu Network::UdpListenerPtr&& listener, Network::ListenerConfig& config) : ActiveUdpListenerBase(worker_index, concurrency, parent, listen_socket, std::move(listener), - &config), - read_filter_(nullptr) { - // Create the filter chain on creating a new udp listener + &config) { + // Create the filter chain on creating a new udp listener. config_->filterChainFactory().createUdpListenerFilterChain(*this, *this); - // If filter is nullptr, fail the creation of the listener + // If filter is nullptr warn that we will be dropping packets. This is an edge case and should + // only happen due to a bad factory. It's not worth adding per-worker error handling for this. if (read_filter_ == nullptr) { - throw Network::CreateListenerException( - fmt::format("Cannot create listener as no read filter registered for the udp listener: {} ", - config_->name())); + ENVOY_LOG(warn, "UDP listener has no filters. Packets will be dropped."); } // Create udp_packet_writer @@ -113,7 +112,11 @@ ActiveRawUdpListener::ActiveRawUdpListener(uint32_t worker_index, uint32_t concu listen_socket_.ioHandle(), config.listenerScope()); } -void ActiveRawUdpListener::onDataWorker(Network::UdpRecvData&& data) { read_filter_->onData(data); } +void ActiveRawUdpListener::onDataWorker(Network::UdpRecvData&& data) { + if (read_filter_ != nullptr) { + read_filter_->onData(data); + } +} void ActiveRawUdpListener::onReadReady() {} @@ -127,7 +130,9 @@ void ActiveRawUdpListener::onWriteReady(const Network::Socket&) { } void ActiveRawUdpListener::onReceiveError(Api::IoError::IoErrorCode error_code) { - read_filter_->onReceiveError(error_code); + if (read_filter_ != nullptr) { + read_filter_->onReceiveError(error_code); + } } void ActiveRawUdpListener::addReadFilter(Network::UdpListenerReadFilterPtr&& filter) { diff --git a/source/server/active_udp_listener.h b/source/server/active_udp_listener.h index 520663ac4ddf8..eef7ca228e738 100644 --- a/source/server/active_udp_listener.h +++ b/source/server/active_udp_listener.h @@ -61,7 +61,8 @@ class ActiveUdpListenerBase : public ActiveListenerImplBase, */ class ActiveRawUdpListener : public ActiveUdpListenerBase, public Network::UdpListenerFilterManager, - public Network::UdpReadFilterCallbacks { + public Network::UdpReadFilterCallbacks, + Logger::Loggable { public: ActiveRawUdpListener(uint32_t worker_index, uint32_t concurrency, Network::UdpConnectionHandler& parent, Event::Dispatcher& dispatcher, diff --git a/source/server/admin/admin.cc b/source/server/admin/admin.cc index 4e3ee65c2813a..314e2c298bf86 100644 --- a/source/server/admin/admin.cc +++ b/source/server/admin/admin.cc @@ -127,7 +127,9 @@ void AdminImpl::startHttpListener(const std::list& } null_overload_manager_.start(); socket_ = std::make_shared(address, socket_options, true); - socket_factory_ = std::make_shared(socket_); + // TODO(mattklein123): We lost error handling along the way for the listen() call. Add it back. + socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE); + socket_factory_ = std::make_unique(socket_); listener_ = std::make_unique(*this, std::move(listener_scope)); ENVOY_LOG(info, "admin address: {}", socket().addressProvider().localAddress()->asString()); if (!address_out_path.empty()) { diff --git a/source/server/admin/admin.h b/source/server/admin/admin.h index 2dbe42b200bfa..de0cf7a55e2ab 100644 --- a/source/server/admin/admin.h +++ b/source/server/admin/admin.h @@ -318,19 +318,18 @@ class AdminImpl : public Admin, // Network::ListenSocketFactory Network::Socket::Type socketType() const override { return socket_->socketType(); } - const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->addressProvider().localAddress(); } - - Network::SocketSharedPtr getListenSocket() override { + Network::SocketSharedPtr getListenSocket(uint32_t) override { // This is only supposed to be called once. RELEASE_ASSERT(!socket_create_, "AdminListener's socket shouldn't be shared."); socket_create_ = true; return socket_; } - - Network::SocketOptRef sharedSocket() const override { return absl::nullopt; } + Network::ListenSocketFactoryPtr clone() const override { return nullptr; } + void closeAllSockets() override {} + void doFinalPreWorkerInit() override {} private: Network::SocketSharedPtr socket_; @@ -448,7 +447,7 @@ class AdminImpl : public Admin, ConfigTrackerImpl config_tracker_; const Network::FilterChainSharedPtr admin_filter_chain_; Network::SocketSharedPtr socket_; - Network::ListenSocketFactorySharedPtr socket_factory_; + Network::ListenSocketFactoryPtr socket_factory_; AdminListenerPtr listener_; const AdminInternalAddressConfig internal_address_config_; const LocalReply::LocalReplyPtr local_reply_; diff --git a/source/server/config_validation/dispatcher.cc b/source/server/config_validation/dispatcher.cc index a4aacf9dd25e7..36010fd3107ae 100644 --- a/source/server/config_validation/dispatcher.cc +++ b/source/server/config_validation/dispatcher.cc @@ -22,8 +22,7 @@ Network::DnsResolverSharedPtr ValidationDispatcher::createDnsResolver( } Network::ListenerPtr ValidationDispatcher::createListener(Network::SocketSharedPtr&&, - Network::TcpListenerCallbacks&, bool, - uint32_t) { + Network::TcpListenerCallbacks&, bool) { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } diff --git a/source/server/config_validation/dispatcher.h b/source/server/config_validation/dispatcher.h index 5857df0811d7e..2d2f1a21416b2 100644 --- a/source/server/config_validation/dispatcher.h +++ b/source/server/config_validation/dispatcher.h @@ -27,7 +27,7 @@ class ValidationDispatcher : public DispatcherImpl { const std::vector& resolvers, const envoy::config::core::v3::DnsResolverOptions& dns_resolver_options) override; Network::ListenerPtr createListener(Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&, - bool bind_to_port, uint32_t backlog_size) override; + bool bind_to_port) override; protected: std::shared_ptr dns_resolver_{ diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index a25cfb7573b57..a569f76a1fdf1 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -108,6 +108,7 @@ class ValidationInstance final : Logger::Loggable, ProtobufMessage::ValidationContext& messageValidationContext() override { return validation_context_; } + bool enableReusePortDefault() override { return true; } Configuration::StatsConfig& statsConfig() override { return config_.statsConfig(); } envoy::config::bootstrap::v3::Bootstrap& bootstrap() override { NOT_IMPLEMENTED_GCOVR_EXCL_LINE; } @@ -145,9 +146,12 @@ class ValidationInstance final : Logger::Loggable, Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr, Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, - const ListenSocketCreationParams&) override { + ListenerComponentFactory::BindType, + uint32_t) override { // Returned sockets are not currently used so we can return nothing here safely vs. a // validation mock. + // TODO(mattklein123): The fact that this returns nullptr makes the production code more + // convoluted than it needs to be. Fix this to return a mock in a follow up. return nullptr; } DrainManagerPtr createDrainManager(envoy::config::listener::v3::Listener::DrainType) override { diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index 14961a5c18747..c79387f8c2033 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -38,7 +38,9 @@ void ConnectionHandlerImpl::addListener(absl::optional overridden_list } NOT_REACHED_GCOVR_EXCL_LINE; } - auto tcp_listener = std::make_unique(*this, config); + // worker_index_ doesn't have a value on the main thread for the admin server. + auto tcp_listener = std::make_unique( + *this, config, worker_index_.has_value() ? *worker_index_ : 0); details.typed_listener_ = *tcp_listener; details.listener_ = std::move(tcp_listener); } else { diff --git a/source/server/hot_restart.proto b/source/server/hot_restart.proto index 9630b0566ea2f..2439eacdf4631 100644 --- a/source/server/hot_restart.proto +++ b/source/server/hot_restart.proto @@ -7,6 +7,7 @@ message HotRestartMessage { message Request { message PassListenSocket { string address = 1; + uint32 worker_index = 2; } message ShutdownAdmin { } @@ -32,6 +33,9 @@ message HotRestartMessage { } message ShutdownAdmin { uint64 original_start_time_unix_seconds = 1; + // See the comments on Server::Instance::enableReusePortDefault() for why this exists. The + // default is false for backwards compatibility. + bool enable_reuse_port_default = 2; } message Span { uint32 first = 1; diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index ab9e28f38618a..3b2160c9eb63e 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -114,16 +114,16 @@ void HotRestartImpl::drainParentListeners() { shmem_->flags_ &= ~SHMEM_FLAGS_INITIALIZING; } -int HotRestartImpl::duplicateParentListenSocket(const std::string& address) { - return as_child_.duplicateParentListenSocket(address); +int HotRestartImpl::duplicateParentListenSocket(const std::string& address, uint32_t worker_index) { + return as_child_.duplicateParentListenSocket(address, worker_index); } void HotRestartImpl::initialize(Event::Dispatcher& dispatcher, Server::Instance& server) { as_parent_.initialize(dispatcher, server); } -void HotRestartImpl::sendParentAdminShutdownRequest(time_t& original_start_time) { - as_child_.sendParentAdminShutdownRequest(original_start_time); +absl::optional HotRestartImpl::sendParentAdminShutdownRequest() { + return as_child_.sendParentAdminShutdownRequest(); } void HotRestartImpl::sendParentTerminateRequest() { as_child_.sendParentTerminateRequest(); } diff --git a/source/server/hot_restart_impl.h b/source/server/hot_restart_impl.h index 63545c8fd1b0c..e097e675694fb 100644 --- a/source/server/hot_restart_impl.h +++ b/source/server/hot_restart_impl.h @@ -102,9 +102,9 @@ class HotRestartImpl : public HotRestart { // Server::HotRestart void drainParentListeners() override; - int duplicateParentListenSocket(const std::string& address) override; + int duplicateParentListenSocket(const std::string& address, uint32_t worker_index) override; void initialize(Event::Dispatcher& dispatcher, Server::Instance& server) override; - void sendParentAdminShutdownRequest(time_t& original_start_time) override; + absl::optional sendParentAdminShutdownRequest() override; void sendParentTerminateRequest() override; ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot& stats_store) override; void shutdown() override; diff --git a/source/server/hot_restart_nop_impl.h b/source/server/hot_restart_nop_impl.h index 28369b3b1ac6e..6e8b9a4ace44d 100644 --- a/source/server/hot_restart_nop_impl.h +++ b/source/server/hot_restart_nop_impl.h @@ -17,9 +17,11 @@ class HotRestartNopImpl : public Server::HotRestart { public: // Server::HotRestart void drainParentListeners() override {} - int duplicateParentListenSocket(const std::string&) override { return -1; } + int duplicateParentListenSocket(const std::string&, uint32_t) override { return -1; } void initialize(Event::Dispatcher&, Server::Instance&) override {} - void sendParentAdminShutdownRequest(time_t&) override {} + absl::optional sendParentAdminShutdownRequest() override { + return absl::nullopt; + } void sendParentTerminateRequest() override {} ServerStatsFromParent mergeParentStatsIfAny(Stats::StoreRoot&) override { return {}; } void shutdown() override {} diff --git a/source/server/hot_restarting_child.cc b/source/server/hot_restarting_child.cc index ff49a173d5e0e..c060113c29cae 100644 --- a/source/server/hot_restarting_child.cc +++ b/source/server/hot_restarting_child.cc @@ -18,13 +18,15 @@ HotRestartingChild::HotRestartingChild(int base_id, int restart_epoch, bindDomainSocket(restart_epoch_, "child", socket_path, socket_mode); } -int HotRestartingChild::duplicateParentListenSocket(const std::string& address) { +int HotRestartingChild::duplicateParentListenSocket(const std::string& address, + uint32_t worker_index) { if (restart_epoch_ == 0 || parent_terminated_) { return -1; } HotRestartMessage wrapped_request; wrapped_request.mutable_request()->mutable_pass_listen_socket()->set_address(address); + wrapped_request.mutable_request()->mutable_pass_listen_socket()->set_worker_index(worker_index); sendHotRestartMessage(parent_address_, wrapped_request); std::unique_ptr wrapped_reply = receiveHotRestartMessage(Blocking::Yes); @@ -59,9 +61,10 @@ void HotRestartingChild::drainParentListeners() { sendHotRestartMessage(parent_address_, wrapped_request); } -void HotRestartingChild::sendParentAdminShutdownRequest(time_t& original_start_time) { +absl::optional +HotRestartingChild::sendParentAdminShutdownRequest() { if (restart_epoch_ == 0 || parent_terminated_) { - return; + return absl::nullopt; } HotRestartMessage wrapped_request; @@ -71,7 +74,10 @@ void HotRestartingChild::sendParentAdminShutdownRequest(time_t& original_start_t std::unique_ptr wrapped_reply = receiveHotRestartMessage(Blocking::Yes); RELEASE_ASSERT(replyIsExpectedType(wrapped_reply.get(), HotRestartMessage::Reply::kShutdownAdmin), "Hot restart parent did not respond as expected to ShutdownParentAdmin."); - original_start_time = wrapped_reply->reply().shutdown_admin().original_start_time_unix_seconds(); + return HotRestart::AdminShutdownResponse{ + static_cast( + wrapped_reply->reply().shutdown_admin().original_start_time_unix_seconds()), + wrapped_reply->reply().shutdown_admin().enable_reuse_port_default()}; } void HotRestartingChild::sendParentTerminateRequest() { diff --git a/source/server/hot_restarting_child.h b/source/server/hot_restarting_child.h index f2a7056874d2d..b2b4b1bbf8324 100644 --- a/source/server/hot_restarting_child.h +++ b/source/server/hot_restarting_child.h @@ -14,10 +14,10 @@ class HotRestartingChild : HotRestartingBase { HotRestartingChild(int base_id, int restart_epoch, const std::string& socket_path, mode_t socket_mode); - int duplicateParentListenSocket(const std::string& address); + int duplicateParentListenSocket(const std::string& address, uint32_t worker_index); std::unique_ptr getParentStats(); void drainParentListeners(); - void sendParentAdminShutdownRequest(time_t& original_start_time); + absl::optional sendParentAdminShutdownRequest(); void sendParentTerminateRequest(); void mergeParentStats(Stats::Store& stats_store, const envoy::HotRestartMessage::Reply::Stats& stats_proto); diff --git a/source/server/hot_restarting_parent.cc b/source/server/hot_restarting_parent.cc index d399bc7b16ac9..14c012b70cbca 100644 --- a/source/server/hot_restarting_parent.cc +++ b/source/server/hot_restarting_parent.cc @@ -95,6 +95,8 @@ HotRestartMessage HotRestartingParent::Internal::shutdownAdmin() { HotRestartMessage wrapped_reply; wrapped_reply.mutable_reply()->mutable_shutdown_admin()->set_original_start_time_unix_seconds( server_->startTimeFirstEpoch()); + wrapped_reply.mutable_reply()->mutable_shutdown_admin()->set_enable_reuse_port_default( + server_->enableReusePortDefault()); return wrapped_reply; } @@ -107,10 +109,13 @@ HotRestartingParent::Internal::getListenSocketsForChild(const HotRestartMessage: for (const auto& listener : server_->listenerManager().listeners()) { Network::ListenSocketFactory& socket_factory = listener.get().listenSocketFactory(); if (*socket_factory.localAddress() == *addr && listener.get().bindToPort()) { - if (socket_factory.sharedSocket().has_value()) { - // Pass the socket to the new process if it is already shared across workers. + // worker_index() will default to 0 if not set which is the behavior before this field + // was added. Thus, this should be safe for both roll forward and roll back. + if (request.pass_listen_socket().worker_index() < server_->options().concurrency()) { wrapped_reply.mutable_reply()->mutable_pass_listen_socket()->set_fd( - socket_factory.sharedSocket()->get().ioHandle().fdDoNotUse()); + socket_factory.getListenSocket(request.pass_listen_socket().worker_index()) + ->ioHandle() + .fdDoNotUse()); } break; } diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index c4ed9ca729a31..a01633616315d 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -61,50 +61,77 @@ bool shouldBindToPort(const envoy::config::listener::v3::Listener& config) { } } // namespace -ListenSocketFactoryImpl::ListenSocketFactoryImpl(ListenerComponentFactory& factory, - Network::Address::InstanceConstSharedPtr address, - Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - bool bind_to_port, - const std::string& listener_name, bool reuse_port) +ListenSocketFactoryImpl::ListenSocketFactoryImpl( + ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, + Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, + const std::string& listener_name, uint32_t tcp_backlog_size, + ListenerComponentFactory::BindType bind_type, uint32_t num_sockets) : factory_(factory), local_address_(address), socket_type_(socket_type), options_(options), - bind_to_port_(bind_to_port), listener_name_(listener_name), reuse_port_(reuse_port) { + listener_name_(listener_name), tcp_backlog_size_(tcp_backlog_size), bind_type_(bind_type) { - bool create_socket = false; if (local_address_->type() == Network::Address::Type::Ip) { - if (socket_type_ == Network::Socket::Type::Datagram) { - ASSERT(reuse_port_ == true); - } - - if (reuse_port_ == false) { - // create a socket which will be used by all worker threads - create_socket = true; - } else if (local_address_->ip()->port() == 0) { - // port is 0, need to create a socket here for reserving a real port number, - // then all worker threads should use same port. - create_socket = true; + if (socket_type == Network::Socket::Type::Datagram) { + ASSERT(bind_type_ == ListenerComponentFactory::BindType::ReusePort); } } else { ASSERT(local_address_->type() == Network::Address::Type::Pipe); // Listeners with Unix domain socket always use shared socket. - create_socket = true; + // TODO(mattklein123): This should be blocked at the config parsing layer instead of getting + // here and disabling reuse_port. + if (bind_type_ == ListenerComponentFactory::BindType::ReusePort) { + bind_type_ = ListenerComponentFactory::BindType::NoReusePort; + } } - if (create_socket) { - socket_ = createListenSocketAndApplyOptions(); - } + sockets_.push_back(createListenSocketAndApplyOptions(factory, socket_type, 0)); - if (socket_ && local_address_->ip() && local_address_->ip()->port() == 0) { - local_address_ = socket_->addressProvider().localAddress(); + if (sockets_[0] != nullptr && local_address_->ip() && local_address_->ip()->port() == 0) { + local_address_ = sockets_[0]->addressProvider().localAddress(); } - ENVOY_LOG(debug, "Set listener {} socket factory local address to {}", listener_name_, + ENVOY_LOG(debug, "Set listener {} socket factory local address to {}", listener_name, local_address_->asString()); -} -Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOptions() { - // socket might be nullptr depending on factory_ implementation. - Network::SocketSharedPtr socket = factory_.createListenSocket( - local_address_, socket_type_, options_, {bind_to_port_, !reuse_port_}); + // Now create the remainder of the sockets that will be used by the rest of the workers. + for (uint32_t i = 1; i < num_sockets; i++) { + if (bind_type_ != ListenerComponentFactory::BindType::ReusePort && sockets_[0] != nullptr) { + sockets_.push_back(sockets_[0]->duplicate()); + } else { + sockets_.push_back(createListenSocketAndApplyOptions(factory, socket_type, i)); + } + } + ASSERT(sockets_.size() == num_sockets); +} + +ListenSocketFactoryImpl::ListenSocketFactoryImpl(const ListenSocketFactoryImpl& factory_to_clone) + : factory_(factory_to_clone.factory_), local_address_(factory_to_clone.local_address_), + socket_type_(factory_to_clone.socket_type_), options_(factory_to_clone.options_), + listener_name_(factory_to_clone.listener_name_), + tcp_backlog_size_(factory_to_clone.tcp_backlog_size_), + bind_type_(factory_to_clone.bind_type_) { + for (auto& socket : factory_to_clone.sockets_) { + // In the cloning case we always duplicate() the socket. This makes sure that during listener + // update/drain we don't lose any incoming connections when using reuse_port. Specifically on + // Linux the use of SO_REUSEPORT causes the kernel to allocate a separate queue for each socket + // on the same address. Incoming connections are immediately assigned to one of these queues. + // If connections are in the queue when the socket is closed, they are closed/reset, not sent to + // another queue. So avoid making extra queues in the kernel, even temporarily. + // + // TODO(mattklein123): In the current code as long as the address matches, the socket factory + // will be cloned, effectively ignoring any changed socket options. The code should probably + // block any updates to listeners that use the same address but different socket options. + // (It's possible we could handle changing some socket options, but this would be tricky and + // probably not worth the difficulty.) + sockets_.push_back(socket->duplicate()); + } +} + +Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOptions( + ListenerComponentFactory& factory, Network::Socket::Type socket_type, uint32_t worker_index) { + // Socket might be nullptr when doing server validation. + // TODO(mattklein123): See the comment in the validation code. Make that code not return nullptr + // so this code can be simpler. + Network::SocketSharedPtr socket = + factory.createListenSocket(local_address_, socket_type, options_, bind_type_, worker_index); // Binding is done by now. ENVOY_LOG(debug, "Create listen socket for listener {} on address {}", listener_name_, @@ -116,45 +143,44 @@ Network::SocketSharedPtr ListenSocketFactoryImpl::createListenSocketAndApplyOpti fmt::format("{}: Setting socket options {}", listener_name_, ok ? "succeeded" : "failed"); if (!ok) { ENVOY_LOG(warn, "{}", message); - throw Network::CreateListenerException(message); + throw Network::SocketOptionException(message); } else { ENVOY_LOG(debug, "{}", message); } // Add the options to the socket_ so that STATE_LISTENING options can be - // set in the worker after listen()/evconnlistener_new() is called. + // set after listen() is called and immediately before the workers start running. socket->addOptions(options_); } return socket; } -Network::SocketSharedPtr ListenSocketFactoryImpl::getListenSocket() { - if (!reuse_port_) { - // We want to maintain the invariance that listeners do not share the same - // underlying socket. For that reason we return a socket based on a duplicated - // file descriptor. - return socket_->duplicate(); - } - - Network::SocketSharedPtr socket; - absl::call_once(steal_once_, [this, &socket]() { - if (socket_) { - // If a listener's port is set to 0, socket_ should be created for reserving a port - // number, it is handed over to the first worker thread came here. - // There are several reasons for doing this: - // - for UDP, once a socket being bound, it begins to receive packets, it can't be - // left unused, and closing it will lost packets received by it. - // - port number should be reserved before adding listener to active_listeners_ list, - // otherwise admin API /listeners might return 0 as listener's port. - socket = std::move(socket_); - } - }); +Network::SocketSharedPtr ListenSocketFactoryImpl::getListenSocket(uint32_t worker_index) { + // Per the TODO above, sockets at this point can never be null. That only happens in the + // config validation path. + ASSERT(worker_index < sockets_.size() && sockets_[worker_index] != nullptr); + return sockets_[worker_index]; +} - if (socket) { - return socket; +void ListenSocketFactoryImpl::doFinalPreWorkerInit() { + if (bind_type_ == ListenerComponentFactory::BindType::NoBind || + socket_type_ != Network::Socket::Type::Stream) { + return; } - return createListenSocketAndApplyOptions(); + for (auto& socket : sockets_) { + // TODO(mattklein123): At some point we lost error handling on this call which I think can + // technically fail (at least according to lingering code comments). Add error handling on this + // in a follow up. + socket->ioHandle().listen(tcp_backlog_size_); + + if (!Network::Socket::applyOptions(socket->options(), *socket, + envoy::config::core::v3::SocketOption::STATE_LISTENING)) { + throw Network::SocketOptionException( + fmt::format("cannot set post-listen socket option on socket: {}", + socket->addressProvider().localAddress()->asString())); + } + } } ListenerFactoryContextBaseImpl::ListenerFactoryContextBaseImpl( @@ -261,6 +287,7 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, parent.factory_.createDrainManager(config.drain_type()))), filter_chain_manager_(address_, listener_factory_context_->parentFactoryContext(), initManager()), + reuse_port_(getReusePortOrDefault(parent_.server_, config_)), cx_limit_runtime_key_("envoy.resource_limits.listener." + config_.name() + ".connection_limit"), open_connections_(std::make_shared( @@ -289,8 +316,10 @@ ListenerImpl::ListenerImpl(const envoy::config::listener::v3::Listener& config, buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); - buildListenSocketOptions(socket_type); + // buildUdpListenerFactory() must come before buildListenSocketOptions() because the UDP + // listener factory can provide additional options. buildUdpListenerFactory(socket_type, concurrency); + buildListenSocketOptions(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); @@ -338,6 +367,7 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, origin.listener_factory_context_->listener_factory_context_base_, this, *this)), filter_chain_manager_(address_, origin.listener_factory_context_->parentFactoryContext(), initManager(), origin.filter_chain_manager_), + reuse_port_(origin.reuse_port_), local_init_watcher_(fmt::format("Listener-local-init-watcher {}", name), [this] { ASSERT(workers_started_); @@ -346,8 +376,10 @@ ListenerImpl::ListenerImpl(ListenerImpl& origin, quic_stat_names_(parent_.quicStatNames()) { buildAccessLog(); auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); - buildListenSocketOptions(socket_type); + // buildUdpListenerFactory() must come before buildListenSocketOptions() because the UDP + // listener factory can provide additional options. buildUdpListenerFactory(socket_type, concurrency); + buildListenSocketOptions(socket_type); createListenerFilterFactories(socket_type); validateFilterChains(socket_type); buildFilterChains(); @@ -371,7 +403,7 @@ void ListenerImpl::buildUdpListenerFactory(Network::Socket::Type socket_type, if (socket_type != Network::Socket::Type::Datagram) { return; } - if (!config_.reuse_port() && concurrency > 1) { + if (!reuse_port_ && concurrency > 1) { throw EnvoyException("Listening on UDP when concurrency is > 1 without the SO_REUSEPORT " "socket option results in " "unstable packet proxying. Configure the reuse_port listener option or " @@ -419,7 +451,7 @@ void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { if (PROTOBUF_GET_WRAPPED_OR_DEFAULT(config_, freebind, false)) { addListenSocketOptions(Network::SocketOptionFactory::buildIpFreebindOptions()); } - if (config_.reuse_port()) { + if (reuse_port_) { addListenSocketOptions(Network::SocketOptionFactory::buildReusePortOptions()); } if (!config_.socket_options().empty()) { @@ -436,6 +468,11 @@ void ListenerImpl::buildListenSocketOptions(Network::Socket::Type socket_type) { // Needed to receive gso_size option addListenSocketOptions(Network::SocketOptionFactory::buildUdpGroOptions()); } + + // Additional factory specific options. + ASSERT(udp_listener_config_->listener_factory_ != nullptr, + "buildUdpListenerFactory() must run first"); + addListenSocketOptions(udp_listener_config_->listener_factory_->socketOptions()); } } @@ -670,9 +707,9 @@ ListenerImpl::~ListenerImpl() { Init::Manager& ListenerImpl::initManager() { return *dynamic_init_manager_; } -void ListenerImpl::setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory) { +void ListenerImpl::setSocketFactory(Network::ListenSocketFactoryPtr&& socket_factory) { ASSERT(!socket_factory_); - socket_factory_ = socket_factory; + socket_factory_ = std::move(socket_factory); } bool ListenerImpl::supportUpdateFilterChain(const envoy::config::listener::v3::Listener& config, @@ -736,6 +773,46 @@ void ListenerImpl::diffFilterChain(const ListenerImpl& another_listener, } } +bool ListenerImpl::getReusePortOrDefault(Server::Instance& server, + const envoy::config::listener::v3::Listener& config) { + bool initial_reuse_port_value = [&server, &config]() { + // If someone set the new field, adhere to it. + if (config.has_enable_reuse_port()) { + if (config.reuse_port()) { + ENVOY_LOG(warn, + "both enable_reuse_port and reuse_port set on listener '{}', preferring " + "enable_reuse_port.", + config.name()); + } + + return config.enable_reuse_port().value(); + } + + // If someone set the old field to true, adhere to it. + if (config.reuse_port()) { + return true; + } + + // Otherwise use the server default which depends on hot restart. + return server.enableReusePortDefault(); + }(); + +#ifndef __linux__ + const auto socket_type = Network::Utility::protobufAddressSocketType(config.address()); + if (initial_reuse_port_value && socket_type == Network::Socket::Type::Stream) { + // reuse_port is the default on Linux for TCP. On other platforms even if set it is disabled + // and the user is warned. For UDP it's always the default even if not effective. + ENVOY_LOG(warn, + "reuse_port was configured for TCP listener '{}' and is being force disabled because " + "Envoy is not running on Linux. See the documentation for more information.", + config.name()); + initial_reuse_port_value = false; + } +#endif + + return initial_reuse_port_value; +} + bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, const envoy::config::listener::v3::Listener& rhs) { Protobuf::util::MessageDifferencer differencer; diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index 934d85a963d24..d2706136e60f2 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -20,8 +20,6 @@ #include "source/common/quic/quic_stat_names.h" #include "source/server/filter_chain_manager_impl.h" -#include "absl/base/call_once.h" - namespace Envoy { namespace Server { @@ -42,49 +40,51 @@ class ListenSocketFactoryImpl : public Network::ListenSocketFactory, ListenSocketFactoryImpl(ListenerComponentFactory& factory, Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, bool bind_to_port, - const std::string& listener_name, bool reuse_port); + const Network::Socket::OptionsSharedPtr& options, + const std::string& listener_name, uint32_t tcp_backlog_size, + ListenerComponentFactory::BindType bind_type, uint32_t num_sockets); // Network::ListenSocketFactory Network::Socket::Type socketType() const override { return socket_type_; } const Network::Address::InstanceConstSharedPtr& localAddress() const override { return local_address_; } - - Network::SocketSharedPtr getListenSocket() override; - - /** - * @return the socket shared by worker threads; otherwise return nullopt. - */ - Network::SocketOptRef sharedSocket() const override { - // If listen socket doesn't bind to port, consider it not shared. - if (!bind_to_port_) { - return absl::nullopt; - } - if (!reuse_port_) { - ASSERT(socket_ != nullptr); - return *socket_; + Network::SocketSharedPtr getListenSocket(uint32_t worker_index) override; + Network::ListenSocketFactoryPtr clone() const override { + return absl::WrapUnique(new ListenSocketFactoryImpl(*this)); + } + void closeAllSockets() override { + for (auto& socket : sockets_) { + socket->close(); } - // If reuse_port is true, always return nullopt, even socket_ is created for reserving - // port number. - return absl::nullopt; } - -protected: - Network::SocketSharedPtr createListenSocketAndApplyOptions(); + void doFinalPreWorkerInit() override; private: + ListenSocketFactoryImpl(const ListenSocketFactoryImpl& factory_to_clone); + + Network::SocketSharedPtr createListenSocketAndApplyOptions(ListenerComponentFactory& factory, + Network::Socket::Type socket_type, + uint32_t worker_index); + ListenerComponentFactory& factory_; // Initially, its port number might be 0. Once a socket is created, its port // will be set to the binding port. Network::Address::InstanceConstSharedPtr local_address_; - Network::Socket::Type socket_type_; + const Network::Socket::Type socket_type_; const Network::Socket::OptionsSharedPtr options_; - bool bind_to_port_; const std::string listener_name_; - const bool reuse_port_; - Network::SocketSharedPtr socket_; - absl::once_flag steal_once_; + const uint32_t tcp_backlog_size_; + ListenerComponentFactory::BindType bind_type_; + // One socket for each worker, pre-created before the workers fetch the sockets. There are + // 3 different cases: + // 1) All are null when doing config validation. + // 2) A single socket has been duplicated for each worker (no reuse_port). + // 3) A unique socket for each worker (reuse_port). + // + // TODO(mattklein123): If a listener does not bind, it still has a socket. This is confusing + // and not needed and can be cleaned up. + std::vector sockets_; }; // TODO(mattklein123): Consider getting rid of pre-worker start and post-worker start code by @@ -266,28 +266,21 @@ class ListenerImpl final : public Network::ListenerConfig, bool blockUpdate(uint64_t new_hash) { return new_hash == hash_ || !added_via_api_; } bool blockRemove() { return !added_via_api_; } - /** - * Called when a listener failed to be actually created on a worker. - * @return TRUE if we have seen more than one worker failure. - */ - bool onListenerCreateFailure() { - bool ret = saw_listener_create_failure_; - saw_listener_create_failure_ = true; - return ret; - } - Network::Address::InstanceConstSharedPtr address() const { return address_; } const envoy::config::listener::v3::Listener& config() const { return config_; } - const Network::ListenSocketFactorySharedPtr& getSocketFactory() const { return socket_factory_; } + const Network::ListenSocketFactory& getSocketFactory() const { return *socket_factory_; } void debugLog(const std::string& message); void initialize(); DrainManager& localDrainManager() const { return listener_factory_context_->listener_factory_context_base_->drainManager(); } - void setSocketFactory(const Network::ListenSocketFactorySharedPtr& socket_factory); + void setSocketFactory(Network::ListenSocketFactoryPtr&& socket_factory); void setSocketAndOptions(const Network::SocketSharedPtr& socket); const Network::Socket::OptionsSharedPtr& listenSocketOptions() { return listen_socket_options_; } const std::string& versionInfo() const { return version_info_; } + bool reusePort() const { return reuse_port_; } + static bool getReusePortOrDefault(Server::Instance& server, + const envoy::config::listener::v3::Listener& config); // Network::ListenerConfig Network::FilterChainManager& filterChainManager() override { return filter_chain_manager_; } @@ -386,7 +379,7 @@ class ListenerImpl final : public Network::ListenerConfig, ListenerManagerImpl& parent_; Network::Address::InstanceConstSharedPtr address_; - Network::ListenSocketFactorySharedPtr socket_factory_; + Network::ListenSocketFactoryPtr socket_factory_; const bool bind_to_port_; const bool hand_off_restored_destination_connections_; const uint32_t per_connection_buffer_limit_bytes_; @@ -408,7 +401,6 @@ class ListenerImpl final : public Network::ListenerConfig, std::vector udp_listener_filter_factories_; std::vector access_logs_; DrainManagerPtr local_drain_manager_; - bool saw_listener_create_failure_{}; const envoy::config::listener::v3::Listener config_; const std::string version_info_; Network::Socket::OptionsSharedPtr listen_socket_options_; @@ -418,6 +410,7 @@ class ListenerImpl final : public Network::ListenerConfig, Network::ConnectionBalancerSharedPtr connection_balancer_; std::shared_ptr listener_factory_context_; FilterChainManagerImpl filter_chain_manager_; + const bool reuse_port_; // Per-listener connection limits are only specified via runtime. // diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 1ce0a0aba68df..6d837c407eb42 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -77,15 +77,6 @@ void fillState(envoy::admin::v3::ListenersConfigDump::DynamicListenerState& stat } } // namespace -bool ListenSocketCreationParams::operator==(const ListenSocketCreationParams& rhs) const { - return (bind_to_port == rhs.bind_to_port) && - (duplicate_parent_socket == rhs.duplicate_parent_socket); -} - -bool ListenSocketCreationParams::operator!=(const ListenSocketCreationParams& rhs) const { - return !operator==(rhs); -} - std::vector ProdListenerComponentFactory::createNetworkFilterFactoryList_( const Protobuf::RepeatedPtrField& filters, Server::Configuration::FilterChainFactoryContext& filter_chain_factory_context) { @@ -187,14 +178,13 @@ Network::ListenerFilterMatcherSharedPtr ProdListenerComponentFactory::createList Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, const ListenSocketCreationParams& params) { + const Network::Socket::OptionsSharedPtr& options, BindType bind_type, uint32_t worker_index) { ASSERT(address->type() == Network::Address::Type::Ip || address->type() == Network::Address::Type::Pipe); ASSERT(socket_type == Network::Socket::Type::Stream || socket_type == Network::Socket::Type::Datagram); - // For each listener config we share a single socket among all threaded listeners. - // First we try to get the socket from our parent if applicable. + // First we try to get the socket from our parent if applicable in each case below. if (address->type() == Network::Address::Type::Pipe) { if (socket_type != Network::Socket::Type::Stream) { // This could be implemented in the future, since Unix domain sockets @@ -204,7 +194,7 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( fmt::format("socket type {} not supported for pipes", toString(socket_type))); } const std::string addr = fmt::format("unix://{}", address->asString()); - const int fd = server_.hotRestart().duplicateParentListenSocket(addr); + const int fd = server_.hotRestart().duplicateParentListenSocket(addr, worker_index); Network::IoHandlePtr io_handle = std::make_unique(fd); if (io_handle->isOpen()) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); @@ -218,8 +208,8 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( : std::string(Network::Utility::UDP_SCHEME); const std::string addr = absl::StrCat(scheme, address->asString()); - if (params.bind_to_port && params.duplicate_parent_socket) { - const int fd = server_.hotRestart().duplicateParentListenSocket(addr); + if (bind_type != BindType::NoBind) { + const int fd = server_.hotRestart().duplicateParentListenSocket(addr, worker_index); if (fd != -1) { ENVOY_LOG(debug, "obtained socket for address {} from parent", addr); Network::IoHandlePtr io_handle = std::make_unique(fd); @@ -232,9 +222,11 @@ Network::SocketSharedPtr ProdListenerComponentFactory::createListenSocket( } if (socket_type == Network::Socket::Type::Stream) { - return std::make_shared(address, options, params.bind_to_port); + return std::make_shared(address, options, + bind_type != BindType::NoBind); } else { - return std::make_shared(address, options, params.bind_to_port); + return std::make_shared(address, options, + bind_type != BindType::NoBind); } } @@ -457,18 +449,18 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( ASSERT(workers_started_); new_listener->debugLog("update warming listener"); if (*(*existing_warming_listener)->address() != *new_listener->address()) { - setNewOrDrainingSocketFactory(name, config.address(), *new_listener, config.reuse_port()); + setNewOrDrainingSocketFactory(name, config.address(), *new_listener); } else { - new_listener->setSocketFactory((*existing_warming_listener)->getSocketFactory()); + new_listener->setSocketFactory((*existing_warming_listener)->getSocketFactory().clone()); } *existing_warming_listener = std::move(new_listener); } else if (existing_active_listener != active_listeners_.end()) { // In this case we have no warming listener, so what we do depends on whether workers // have been started or not. if (*(*existing_active_listener)->address() != *new_listener->address()) { - setNewOrDrainingSocketFactory(name, config.address(), *new_listener, config.reuse_port()); + setNewOrDrainingSocketFactory(name, config.address(), *new_listener); } else { - new_listener->setSocketFactory((*existing_active_listener)->getSocketFactory()); + new_listener->setSocketFactory((*existing_active_listener)->getSocketFactory().clone()); } if (workers_started_) { new_listener->debugLog("add warming listener"); @@ -480,7 +472,7 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( } else { // We have no warming or active listener so we need to make a new one. What we do depends on // whether workers have been started or not. - setNewOrDrainingSocketFactory(name, config.address(), *new_listener, config.reuse_port()); + setNewOrDrainingSocketFactory(name, config.address(), *new_listener); if (workers_started_) { new_listener->debugLog("add warming listener"); warming_listeners_.emplace_back(std::move(new_listener)); @@ -513,17 +505,6 @@ bool ListenerManagerImpl::hasListenerWithAddress(const ListenerList& list, return false; } -bool ListenerManagerImpl::shareSocketWithOtherListener( - const ListenerList& list, const Network::ListenSocketFactorySharedPtr& socket_factory) { - ASSERT(socket_factory->sharedSocket().has_value()); - for (const auto& listener : list) { - if (listener->getSocketFactory() == socket_factory) { - return true; - } - } - return false; -} - void ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) { // First add the listener to the draining list. std::list::iterator draining_it = draining_listeners_.emplace( @@ -536,30 +517,13 @@ void ListenerManagerImpl::drainListener(ListenerImplPtr&& listener) { // Tell all workers to stop accepting new connections on this listener. draining_it->listener_->debugLog("draining listener"); const uint64_t listener_tag = draining_it->listener_->listenerTag(); - stopListener( - *draining_it->listener_, - [this, - share_socket = draining_it->listener_->listenSocketFactory().sharedSocket().has_value(), - listener_tag]() { - if (!share_socket) { - // Each listener has its individual socket and closes the socket on its own. - return; - } - for (auto& listener : draining_listeners_) { - if (listener.listener_->listenerTag() == listener_tag) { - // Handle the edge case when new listener is added for the same address as the drained - // one. In this case the socket is shared between both listeners so one should avoid - // closing it. - const auto& socket_factory = listener.listener_->getSocketFactory(); - if (!shareSocketWithOtherListener(active_listeners_, socket_factory) && - !shareSocketWithOtherListener(warming_listeners_, socket_factory)) { - // Close the socket iff it is not used anymore. - ASSERT(listener.listener_->listenSocketFactory().sharedSocket().has_value()); - listener.listener_->listenSocketFactory().sharedSocket()->get().close(); - } - } - } - }); + stopListener(*draining_it->listener_, [this, listener_tag]() { + for (auto& listener : draining_listeners_) { + if (listener.listener_->listenerTag() == listener_tag) { + listener.listener_->listenSocketFactory().closeAllSockets(); + } + } + }); // Start the drain sequence which completes when the listener's drain manager has completed // draining at whatever the server configured drain times are. @@ -628,57 +592,48 @@ ListenerManagerImpl::listeners(ListenerState state) { return ret; } +bool ListenerManagerImpl::doFinalPreWorkerListenerInit(ListenerImpl& listener) { + TRY_ASSERT_MAIN_THREAD { + listener.listenSocketFactory().doFinalPreWorkerInit(); + return true; + } + END_TRY + catch (EnvoyException& e) { + ENVOY_LOG(error, "final pre-worker listener init for listener '{}' failed: {}", listener.name(), + e.what()); + return false; + } +} + void ListenerManagerImpl::addListenerToWorker(Worker& worker, absl::optional overridden_listener, ListenerImpl& listener, ListenerCompletionCallback completion_callback) { if (overridden_listener.has_value()) { ENVOY_LOG(debug, "replacing existing listener {}", overridden_listener.value()); - worker.addListener(overridden_listener, listener, [this, completion_callback](bool) -> void { - server_.dispatcher().post([this, completion_callback]() -> void { - stats_.listener_create_success_.inc(); - if (completion_callback) { - completion_callback(); - } - }); - }); - return; } - worker.addListener( - overridden_listener, listener, [this, &listener, completion_callback](bool success) -> void { - // The add listener completion runs on the worker thread. Post back to the main thread to - // avoid locking. - server_.dispatcher().post([this, success, &listener, completion_callback]() -> void { - // It is possible for a listener to get added on 1 worker but not the others. The below - // check with onListenerCreateFailure() is there to ensure we execute the - // removal/logging/stats at most once on failure. Note also that drain/removal can race - // with addition. It's guaranteed that workers process remove after add so this should be - // fine. - // - // TODO(mattklein123): We should consider rewriting how listener sockets are added to - // workers, especially in the case of reuse port. If we were to create all needed - // listener sockets on the main thread (even in the case of reuse port) we could catch - // almost all socket errors here. This would both greatly simplify the logic and allow - // for xDS NACK in most cases. - if (!success && !listener.onListenerCreateFailure()) { - ENVOY_LOG(error, "listener '{}' failed to listen on address '{}' on worker", - listener.name(), listener.listenSocketFactory().localAddress()->asString()); - stats_.listener_create_failure_.inc(); - removeListenerInternal(listener.name(), false); - } - if (success) { - stats_.listener_create_success_.inc(); - } - if (completion_callback) { - completion_callback(); - } - }); - }); + worker.addListener(overridden_listener, listener, [this, completion_callback]() -> void { + // The add listener completion runs on the worker thread. Post back to the main thread to + // avoid locking. + server_.dispatcher().post([this, completion_callback]() -> void { + stats_.listener_create_success_.inc(); + if (completion_callback) { + completion_callback(); + } + }); + }); } void ListenerManagerImpl::onListenerWarmed(ListenerImpl& listener) { // The warmed listener should be added first so that the worker will accept new connections // when it stops listening on the old listener. + if (!doFinalPreWorkerListenerInit(listener)) { + incListenerCreateFailureStat(); + // TODO(mattklein123): Technically we don't need to remove the active listener if one exists. + // The following call will remove both. + removeListenerInternal(listener.name(), true); + return; + } for (const auto& worker : workers_) { addListenerToWorker(*worker, absl::nullopt, listener, nullptr); } @@ -846,10 +801,18 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog, std::function>(workers_.size() * active_listeners_.size()); - for (const auto& worker : workers_) { - ENVOY_LOG(debug, "starting worker {}", i); - ASSERT(warming_listeners_.empty()); - for (const auto& listener : active_listeners_) { + ASSERT(warming_listeners_.empty()); + // We need to protect against inline deletion so have to use iterators directly. + for (auto listener_it = active_listeners_.begin(); listener_it != active_listeners_.end();) { + auto& listener = *listener_it; + listener_it++; + + if (!doFinalPreWorkerListenerInit(*listener)) { + incListenerCreateFailureStat(); + removeListenerInternal(listener->name(), false); + continue; + } + for (const auto& worker : workers_) { addListenerToWorker(*worker, absl::nullopt, *listener, [this, listeners_pending_init, callback]() { if (--(*listeners_pending_init) == 0) { @@ -858,6 +821,9 @@ void ListenerManagerImpl::startWorkers(GuardDog& guard_dog, std::functionstart(guard_dog, worker_started_running); if (enable_dispatcher_stats_) { worker->initializeStats(*scope_); @@ -901,21 +867,14 @@ void ListenerManagerImpl::stopListeners(StopListenersType stop_listeners_type) { // Close the socket once all workers stopped accepting its connections. // This allows clients to fast fail instead of waiting in the accept queue. const uint64_t listener_tag = listener.listenerTag(); - stopListener(listener, - [this, share_socket = listener.listenSocketFactory().sharedSocket().has_value(), - listener_tag]() { - stats_.listener_stopped_.inc(); - if (!share_socket) { - // Each listener has its own socket and closes the socket - // on its own. - return; - } - for (auto& listener : active_listeners_) { - if (listener->listenerTag() == listener_tag) { - listener->listenSocketFactory().sharedSocket()->get().close(); - } - } - }); + stopListener(listener, [this, listener_tag]() { + stats_.listener_stopped_.inc(); + for (auto& listener : active_listeners_) { + if (listener->listenerTag() == listener_tag) { + listener->listenSocketFactory().closeAllSockets(); + } + } + }); } } } @@ -997,7 +956,7 @@ Network::DrainableFilterChainSharedPtr ListenerFilterChainFactoryBuilder::buildF void ListenerManagerImpl::setNewOrDrainingSocketFactory( const std::string& name, const envoy::config::core::v3::Address& proto_address, - ListenerImpl& listener, bool reuse_port) { + ListenerImpl& listener) { // Typically we catch address issues when we try to bind to the same address multiple times. // However, for listeners that do not bind we must check to make sure we are not duplicating. This // is an edge case and nothing will explicitly break, but there is no possibility that two @@ -1013,39 +972,47 @@ void ListenerManagerImpl::setNewOrDrainingSocketFactory( } // Search through draining listeners to see if there is a listener that has a socket factory for - // the same address we are configured for and doesn't use SO_REUSEPORT. This is an edge case, but + // the same address we are configured for. This is an edge case, but // may happen if a listener is removed and then added back with a same or different name and // intended to listen on the same address. This should work and not fail. - Network::ListenSocketFactorySharedPtr draining_listen_socket_factory; + const Network::ListenSocketFactory* draining_listen_socket_factory = nullptr; auto existing_draining_listener = std::find_if( draining_listeners_.cbegin(), draining_listeners_.cend(), [&listener](const DrainingListener& draining_listener) { - return draining_listener.listener_->listenSocketFactory().sharedSocket().has_value() && - draining_listener.listener_->listenSocketFactory().sharedSocket()->get().isOpen() && + return draining_listener.listener_->listenSocketFactory().getListenSocket(0)->isOpen() && *listener.address() == *draining_listener.listener_->listenSocketFactory().localAddress(); }); if (existing_draining_listener != draining_listeners_.cend()) { - draining_listen_socket_factory = existing_draining_listener->listener_->getSocketFactory(); + draining_listen_socket_factory = &existing_draining_listener->listener_->getSocketFactory(); } - Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(proto_address); - listener.setSocketFactory( - draining_listen_socket_factory - ? draining_listen_socket_factory - : createListenSocketFactory(proto_address, listener, - (socket_type == Network::Socket::Type::Datagram) || - reuse_port)); + listener.setSocketFactory(draining_listen_socket_factory != nullptr + ? draining_listen_socket_factory->clone() + : createListenSocketFactory(proto_address, listener)); } -Network::ListenSocketFactorySharedPtr ListenerManagerImpl::createListenSocketFactory( - const envoy::config::core::v3::Address& proto_address, ListenerImpl& listener, - bool reuse_port) { +Network::ListenSocketFactoryPtr ListenerManagerImpl::createListenSocketFactory( + const envoy::config::core::v3::Address& proto_address, ListenerImpl& listener) { Network::Socket::Type socket_type = Network::Utility::protobufAddressSocketType(proto_address); - return std::make_shared( - factory_, listener.address(), socket_type, listener.listenSocketOptions(), - listener.bindToPort(), listener.name(), reuse_port); + ListenerComponentFactory::BindType bind_type = ListenerComponentFactory::BindType::NoBind; + if (listener.bindToPort()) { + bind_type = listener.reusePort() ? ListenerComponentFactory::BindType::ReusePort + : ListenerComponentFactory::BindType::NoReusePort; + } + TRY_ASSERT_MAIN_THREAD { + return std::make_unique( + factory_, listener.address(), socket_type, listener.listenSocketOptions(), listener.name(), + listener.tcpBacklogSize(), bind_type, server_.options().concurrency()); + } + END_TRY + catch (const EnvoyException& e) { + ENVOY_LOG(error, "listener '{}' failed to bind or apply socket options: {}", listener.name(), + e.what()); + incListenerCreateFailureStat(); + throw e; + } } ApiListenerOptRef ListenerManagerImpl::apiListener() { diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index eefde4c3af893..2484a23fe2efd 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -94,7 +94,7 @@ class ProdListenerComponentFactory : public ListenerComponentFactory, Network::SocketSharedPtr createListenSocket(Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) override; + BindType bind_type, uint32_t worker_index) override; DrainManagerPtr createDrainManager(envoy::config::listener::v3::Listener::DrainType drain_type) override; @@ -228,6 +228,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable overridden_listener, ListenerImpl& listener, ListenerCompletionCallback completion_callback); @@ -235,9 +236,6 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable(initial_config.admin().profilePath(), *this); loadServerFlags(initial_config.flagsPath()); @@ -929,5 +938,20 @@ ProtobufTypes::MessagePtr InstanceImpl::dumpBootstrapConfig() { return config_dump; } +bool InstanceImpl::enableReusePortDefault() { + ASSERT(enable_reuse_port_default_.has_value()); + switch (enable_reuse_port_default_.value()) { + case ReusePortDefault::True: + return true; + case ReusePortDefault::False: + return false; + case ReusePortDefault::Runtime: + return Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.listener_reuse_port_default_enabled"); + } + + NOT_REACHED_GCOVR_EXCL_LINE; +} + } // namespace Server } // namespace Envoy diff --git a/source/server/server.h b/source/server/server.h index ee714ef688a46..945567740bb7e 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -274,23 +274,19 @@ class InstanceImpl final : Logger::Loggable, LocalInfo::LocalInfo& localInfo() const override { return *local_info_; } TimeSource& timeSource() override { return time_source_; } void flushStats() override; - Configuration::StatsConfig& statsConfig() override { return config_.statsConfig(); } envoy::config::bootstrap::v3::Bootstrap& bootstrap() override { return bootstrap_; } - Configuration::ServerFactoryContext& serverFactoryContext() override { return server_contexts_; } - Configuration::TransportSocketFactoryContext& transportSocketFactoryContext() override { return server_contexts_; } - ProtobufMessage::ValidationContext& messageValidationContext() override { return validation_context_; } - void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { http_context_.setDefaultTracingConfig(tracing_config); } + bool enableReusePortDefault() override; // ServerLifecycleNotifier ServerLifecycleNotifier::HandlePtr registerCallback(Stage stage, StageCallback callback) override; @@ -298,6 +294,8 @@ class InstanceImpl final : Logger::Loggable, registerCallback(Stage stage, StageCallbackWithCompletion callback) override; private: + enum class ReusePortDefault { True, False, Runtime }; + ProtobufTypes::MessagePtr dumpBootstrapConfig(); void flushStatsInternal(); void updateServerStats(); @@ -384,8 +382,8 @@ class InstanceImpl final : Logger::Loggable, // whenever we have support for histogram merge across hot restarts. Stats::TimespanPtr initialization_timer_; ListenerHooks& hooks_; - ServerFactoryContextImpl server_contexts_; + absl::optional enable_reuse_port_default_; bool stats_flush_in_progress_ : 1; diff --git a/source/server/worker_impl.cc b/source/server/worker_impl.cc index ae15ced3c0856..55fc17a281490 100644 --- a/source/server/worker_impl.cc +++ b/source/server/worker_impl.cc @@ -39,22 +39,10 @@ WorkerImpl::WorkerImpl(ThreadLocal::Instance& tls, ListenerHooks& hooks, void WorkerImpl::addListener(absl::optional overridden_listener, Network::ListenerConfig& listener, AddListenerCompletion completion) { - // All listener additions happen via post. However, we must deal with the case where the listener - // can not be created on the worker. There is a race condition where 2 processes can successfully - // bind to an address, but then fail to listen() with `EADDRINUSE`. During initial startup, we - // want to surface this. dispatcher_->post([this, overridden_listener, &listener, completion]() -> void { - // TODO(chaoqin-li1123): Make add listener return a error status instead of catching an - // exception. - TRY_NEEDS_AUDIT { - handler_->addListener(overridden_listener, listener); - hooks_.onWorkerListenerAdded(); - completion(true); - } - catch (const Network::CreateListenerException& e) { - ENVOY_LOG(error, "failed to add listener on worker: {}", e.what()); - completion(false); - } + handler_->addListener(overridden_listener, listener); + hooks_.onWorkerListenerAdded(); + completion(); }); } @@ -117,7 +105,6 @@ void WorkerImpl::stop() { } void WorkerImpl::stopListener(Network::ListenerConfig& listener, std::function completion) { - ASSERT(thread_); const uint64_t listener_tag = listener.listenerTag(); dispatcher_->post([this, listener_tag, completion]() -> void { handler_->stopListeners(listener_tag); diff --git a/test/common/http/codec_client_test.cc b/test/common/http/codec_client_test.cc index 25a854defb3bc..202fbc7ab71ea 100644 --- a/test/common/http/codec_client_test.cc +++ b/test/common/http/codec_client_test.cc @@ -297,13 +297,12 @@ class CodecNetworkTest : public Event::TestUsingSimulatedTime, public: CodecNetworkTest() : api_(Api::createApiForTest()), stream_info_(api_->timeSource(), nullptr) { dispatcher_ = api_->allocateDispatcher("test_thread"); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->addressProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), nullptr); - upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true, - ENVOY_TCP_BACKLOG_SIZE); + upstream_listener_ = dispatcher_->createListener(std::move(socket), listener_callbacks_, true); client_connection_ = client_connection.get(); client_connection_->addConnectionCallbacks(client_callbacks_); diff --git a/test/common/network/connection_impl_test.cc b/test/common/network/connection_impl_test.cc index 25bf57e1f8921..39262ba7164e8 100644 --- a/test/common/network/connection_impl_test.cc +++ b/test/common/network/connection_impl_test.cc @@ -134,10 +134,9 @@ class ConnectionImplTest : public testing::TestWithParam { if (dispatcher_ == nullptr) { dispatcher_ = api_->allocateDispatcher("test_thread"); } - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - listener_ = - dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = std::make_unique( *dispatcher_, socket_->addressProvider().localAddress(), source_address_, Network::Test::createRawBufferSocket(), socket_options_); @@ -376,8 +375,8 @@ TEST_P(ConnectionImplTest, ImmediateConnectError) { // Using a broadcast/multicast address as the connection destinations address causes an // immediate error return from connect(). Address::InstanceConstSharedPtr broadcast_address; - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); if (socket_->addressProvider().localAddress()->ip()->version() == Address::IpVersion::v4) { broadcast_address = std::make_shared("224.0.0.1", 0); } else { @@ -1340,10 +1339,9 @@ TEST_P(ConnectionImplTest, BindFailureTest) { new Network::Address::Ipv6Instance(address_string, 0, nullptr)}; } dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - listener_ = - dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( socket_->addressProvider().localAddress(), source_address_, @@ -2811,10 +2809,9 @@ class ReadBufferLimitTest : public ConnectionImplTest { void readBufferLimitTest(uint32_t read_buffer_limit, uint32_t expected_chunk_size) { const uint32_t buffer_size = 256 * 1024; dispatcher_ = api_->allocateDispatcher("test_thread"); - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - listener_ = - dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); client_connection_ = dispatcher_->createClientConnection( socket_->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), diff --git a/test/common/network/dns_impl_test.cc b/test/common/network/dns_impl_test.cc index 11a6cfb2001fb..ff513d7128108 100644 --- a/test/common/network/dns_impl_test.cc +++ b/test/common/network/dns_impl_test.cc @@ -437,9 +437,9 @@ class DnsImplTest : public testing::TestWithParam { void SetUp() override { // Instantiate TestDnsServer and listen on a random port on the loopback address. server_ = std::make_unique(*dispatcher_); - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - listener_ = dispatcher_->createListener(socket_, *server_, true, ENVOY_TCP_BACKLOG_SIZE); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + listener_ = dispatcher_->createListener(socket_, *server_, true); updateDnsResolverOptions(); if (setResolverInConstructor()) { resolver_ = dispatcher_->createDnsResolver({socket_->addressProvider().localAddress()}, diff --git a/test/common/network/listener_impl_test.cc b/test/common/network/listener_impl_test.cc index 15deae2a43210..562b5cba4ec68 100644 --- a/test/common/network/listener_impl_test.cc +++ b/test/common/network/listener_impl_test.cc @@ -32,11 +32,10 @@ static void errorCallbackTest(Address::IpVersion version) { Api::ApiPtr api = Api::createApiForTest(); Event::DispatcherPtr dispatcher(api->allocateDispatcher("test_thread")); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version)); Network::MockTcpListenerCallbacks listener_callbacks; - Network::ListenerPtr listener = - dispatcher->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher->createListener(socket, listener_callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher->createClientConnection( socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -67,10 +66,8 @@ TEST_P(ListenerImplDeathTest, ErrorCallback) { class TestTcpListenerImpl : public TcpListenerImpl { public: TestTcpListenerImpl(Event::DispatcherImpl& dispatcher, Random::RandomGenerator& random_generator, - SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t tcp_backlog = ENVOY_TCP_BACKLOG_SIZE) - : TcpListenerImpl(dispatcher, random_generator, std::move(socket), cb, bind_to_port, - tcp_backlog) {} + SocketSharedPtr socket, TcpListenerCallbacks& cb, bool bind_to_port) + : TcpListenerImpl(dispatcher, random_generator, std::move(socket), cb, bind_to_port) {} MOCK_METHOD(Address::InstanceConstSharedPtr, getLocalAddress, (os_fd_t fd)); }; @@ -80,42 +77,9 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, TcpListenerImplTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -// Test that socket options are set after the listener is setup. -TEST_P(TcpListenerImplTest, SetListeningSocketOptionsSuccess) { - Network::MockTcpListenerCallbacks listener_callbacks; - Random::MockRandomGenerator random_generator; - - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); - std::shared_ptr option = std::make_shared(); - socket->addOption(option); - EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING)) - .WillOnce(Return(true)); - TestTcpListenerImpl listener(dispatcherImpl(), random_generator, socket, listener_callbacks, - true); -} - -// Test that an exception is thrown if there is an error setting socket options. -TEST_P(TcpListenerImplTest, SetListeningSocketOptionsError) { - Network::MockTcpListenerCallbacks listener_callbacks; - Random::MockRandomGenerator random_generator; - - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); - std::shared_ptr option = std::make_shared(); - socket->addOption(option); - EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_LISTENING)) - .WillOnce(Return(false)); - EXPECT_THROW_WITH_MESSAGE( - TestTcpListenerImpl(dispatcherImpl(), random_generator, socket, listener_callbacks, true), - CreateListenerException, - fmt::format("cannot set post-listen socket option on socket: {}", - socket->addressProvider().localAddress()->asString())); -} - TEST_P(TcpListenerImplTest, UseActualDst) { - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); auto socketDst = std::make_shared(alt_address_, nullptr, false); Network::MockTcpListenerCallbacks listener_callbacks1; Random::MockRandomGenerator random_generator; @@ -155,11 +119,10 @@ TEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) { Runtime::LoaderSingleton::getExisting()->mergeValues( {{"overload.global_downstream_max_connections", "2"}}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); Network::MockTcpListenerCallbacks listener_callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); std::vector client_connections; std::vector server_connections; @@ -218,8 +181,8 @@ TEST_P(TcpListenerImplTest, GlobalConnectionLimitEnforcement) { } TEST_P(TcpListenerImplTest, WildcardListenerUseActualDst) { - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); Network::MockTcpListenerCallbacks listener_callbacks; Random::MockRandomGenerator random_generator; // Do not redirect since use_original_dst is false. @@ -259,8 +222,8 @@ TEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) { .WillOnce(Return(true)); options->emplace_back(std::move(option)); - auto socket = std::make_shared(Network::Test::getAnyAddress(version_, true), - options, true); + auto socket = std::make_shared( + Network::Test::getAnyAddress(version_, true), options); Network::MockTcpListenerCallbacks listener_callbacks; Random::MockRandomGenerator random_generator; @@ -302,8 +265,8 @@ TEST_P(TcpListenerImplTest, WildcardListenerIpv4Compat) { TEST_P(TcpListenerImplTest, DisableAndEnableListener) { testing::InSequence s1; - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); MockTcpListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; Random::MockRandomGenerator random_generator; @@ -343,8 +306,8 @@ TEST_P(TcpListenerImplTest, DisableAndEnableListener) { } TEST_P(TcpListenerImplTest, SetListenerRejectFractionZero) { - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); MockTcpListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; Random::MockRandomGenerator random_generator; @@ -374,8 +337,8 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionZero) { } TEST_P(TcpListenerImplTest, SetListenerRejectFractionIntermediate) { - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); MockTcpListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; Random::MockRandomGenerator random_generator; @@ -437,8 +400,8 @@ TEST_P(TcpListenerImplTest, SetListenerRejectFractionIntermediate) { } TEST_P(TcpListenerImplTest, SetListenerRejectFractionAll) { - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version_), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version_)); MockTcpListenerCallbacks listener_callbacks; MockConnectionCallbacks connection_callbacks; Random::MockRandomGenerator random_generator; diff --git a/test/common/network/udp_fuzz.cc b/test/common/network/udp_fuzz.cc index f1768976306fc..67534ef927e70 100644 --- a/test/common/network/udp_fuzz.cc +++ b/test/common/network/udp_fuzz.cc @@ -61,6 +61,8 @@ class UdpFuzz { server_socket_ = createServerSocket(true, ip_version_); server_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); server_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + EXPECT_TRUE(Network::Socket::applyOptions(server_socket_->options(), *server_socket_, + envoy::config::core::v3::SocketOption::STATE_BOUND)); // Create packet writer udp_packet_writer_ = std::make_unique(server_socket_->ioHandle()); diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 7c22ca61c626a..1561d4ba05b6d 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -71,6 +71,8 @@ class UdpListenerImplTest : public UdpListenerImplTestBase { envoy::config::core::v3::SocketOption::STATE_BOUND, ENVOY_MAKE_SOCKET_OPTION_NAME(SOL_SOCKET, SO_RCVBUF), 4 * 1024 * 1024)); server_socket_->addOptions(std::move(options)); + ASSERT_TRUE(Network::Socket::applyOptions(server_socket_->options(), *server_socket_, + envoy::config::core::v3::SocketOption::STATE_BOUND)); envoy::config::core::v3::UdpSocketConfig config; if (prefer_gro) { config.mutable_prefer_gro()->set_value(prefer_gro); @@ -101,32 +103,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, UdpListenerImplTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -// Test that socket options are set after the listener is setup. -TEST_P(UdpListenerImplTest, UdpSetListeningSocketOptionsSuccess) { - setup(); - - MockUdpListenerCallbacks listener_callbacks; - auto socket = std::make_shared(Network::Test::getAnyAddress(version_), - nullptr, true); - std::shared_ptr option = std::make_shared(); - socket->addOption(option); - EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_BOUND)) - .WillOnce(Return(true)); - UdpListenerImpl listener(dispatcherImpl(), socket, listener_callbacks, - dispatcherImpl().timeSource(), - envoy::config::core::v3::UdpSocketConfig()); - -#ifdef SO_RXQ_OVFL - // Verify that overflow detection is enabled. - int get_overflow = 0; - socklen_t int_size = static_cast(sizeof(get_overflow)); - const Api::SysCallIntResult result = - server_socket_->getSocketOption(SOL_SOCKET, SO_RXQ_OVFL, &get_overflow, &int_size); - EXPECT_EQ(0, result.rc_); - EXPECT_EQ(1, get_overflow); -#endif -} - /** * Tests UDP listener for actual destination and data. */ diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index edc1bb8fee0c7..5313ba2513f14 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -106,9 +106,11 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { std::make_shared(local_address_, nullptr, /*bind*/ true); listen_socket_->addOptions(Network::SocketOptionFactory::buildIpPacketInfoOptions()); listen_socket_->addOptions(Network::SocketOptionFactory::buildRxQueueOverFlowOptions()); + ASSERT_TRUE(Network::Socket::applyOptions(listen_socket_->options(), *listen_socket_, + envoy::config::core::v3::SocketOption::STATE_BOUND)); ON_CALL(listener_config_, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); - ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(listen_socket_)); + ON_CALL(socket_factory_, getListenSocket(_)).WillByDefault(Return(listen_socket_)); // Use UdpGsoBatchWriter to perform non-batched writes for the purpose of this test, if it is // supported. @@ -332,25 +334,6 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { INSTANTIATE_TEST_SUITE_P(ActiveQuicListenerTests, ActiveQuicListenerTest, testing::ValuesIn(generateTestParam()), testParamsToString); -TEST_P(ActiveQuicListenerTest, FailSocketOptionUponCreation) { - initialize(); - auto option = std::make_unique(); - EXPECT_CALL(*option, setOption(_, envoy::config::core::v3::SocketOption::STATE_BOUND)) - .WillOnce(Return(false)); - auto options = std::make_shared>(); - options->emplace_back(std::move(option)); - quic_listener_.reset(); - EnvoyQuicCryptoServerStreamFactoryImpl crypto_stream_factory; - EnvoyQuicProofSourceFactoryImpl proof_source_factory; - EXPECT_THROW_WITH_REGEX((void)std::make_unique( - 0, 1, *dispatcher_, connection_handler_, listen_socket_, - listener_config_, quic_config_, options, false, - ActiveQuicListenerFactoryPeer::runtimeEnabled( - static_cast(listener_factory_.get())), - quic_stat_names_, 32u, crypto_stream_factory, proof_source_factory), - Network::CreateListenerException, "Failed to apply socket options."); -} - TEST_P(ActiveQuicListenerTest, ReceiveCHLO) { initialize(); quic::QuicBufferedPacketStore* const buffered_packets = diff --git a/test/config/integration/server.yaml b/test/config/integration/server.yaml index 85bc2fac1f7c9..c874d5a09afe9 100644 --- a/test/config/integration/server.yaml +++ b/test/config/integration/server.yaml @@ -4,7 +4,7 @@ static_resources: socket_address: address: "{{ ip_loopback_address }}" port_value: 0 - reuse_port: "{{ reuse_port }}" + enable_reuse_port: {{ enable_reuse_port }} filter_chains: - filters: - name: http diff --git a/test/config/utility.cc b/test/config/utility.cc index 0ac40d6d3fffd..ac21e0fc31644 100644 --- a/test/config/utility.cc +++ b/test/config/utility.cc @@ -1116,12 +1116,7 @@ void ConfigHelper::addSslConfig(const ServerSslOptions& options) { filter_chain->mutable_transport_socket()->mutable_typed_config()->PackFrom(tls_context); } -void ConfigHelper::addQuicDownstreamTransportSocketConfig(bool reuse_port) { - for (auto& listener : *bootstrap_.mutable_static_resources()->mutable_listeners()) { - if (listener.udp_listener_config().has_quic_options()) { - listener.set_reuse_port(reuse_port); - } - } +void ConfigHelper::addQuicDownstreamTransportSocketConfig() { configDownstreamTransportSocketWithTls( bootstrap_, [](envoy::extensions::transport_sockets::tls::v3::CommonTlsContext& common_tls_context) { diff --git a/test/config/utility.h b/test/config/utility.h index 55dba9b7768c0..cdfdc80eeb8b3 100644 --- a/test/config/utility.h +++ b/test/config/utility.h @@ -246,7 +246,7 @@ class ConfigHelper { void addSslConfig() { addSslConfig({}); } // Add the default SSL configuration for QUIC downstream. - void addQuicDownstreamTransportSocketConfig(bool reuse_port); + void addQuicDownstreamTransportSocketConfig(); // Set the HTTP access log for the first HCM (if present) to a given file. The default is // the platform's null device. diff --git a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc index f739c187f0da0..aea16f0433be4 100644 --- a/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc +++ b/test/extensions/common/proxy_protocol/proxy_protocol_regression_test.cc @@ -41,15 +41,15 @@ class ProxyProtocolRegressionTest : public testing::TestWithParamallocateDispatcher("test_thread")), - socket_(std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), + socket_(std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()))), connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()) .WillOnce(ReturnRef(socket_->addressProvider().localAddress())); - EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); + EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); conn_ = dispatcher_->createClientConnection(socket_->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index ff3082ed5d042..e9cc8f02ec022 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -55,15 +55,15 @@ class ProxyProtocolTest : public testing::TestWithParamallocateDispatcher("test_thread")), - socket_(std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true)), + socket_(std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam()))), connection_handler_(new Server::ConnectionHandlerImpl(*dispatcher_, absl::nullopt)), name_("proxy"), filter_chain_(Network::Test::createEmptyFilterChainWithRawBufferSockets()), init_manager_(nullptr) { EXPECT_CALL(socket_factory_, socketType()).WillOnce(Return(Network::Socket::Type::Stream)); EXPECT_CALL(socket_factory_, localAddress()) .WillOnce(ReturnRef(socket_->addressProvider().localAddress())); - EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); + EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); conn_ = dispatcher_->createClientConnection(socket_->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -1281,8 +1281,8 @@ class WildcardProxyProtocolTest : public testing::TestWithParamallocateDispatcher("test_thread")), - socket_(std::make_shared(Network::Test::getAnyAddress(GetParam()), - nullptr, true)), + socket_(std::make_shared( + Network::Test::getAnyAddress(GetParam()))), local_dst_address_(Network::Utility::getAddressWithPort( *Network::Test::getCanonicalLoopbackAddress(GetParam()), socket_->addressProvider().localAddress()->ip()->port())), @@ -1292,7 +1292,7 @@ class WildcardProxyProtocolTest : public testing::TestWithParamaddressProvider().localAddress())); - EXPECT_CALL(socket_factory_, getListenSocket()).WillOnce(Return(socket_)); + EXPECT_CALL(socket_factory_, getListenSocket(_)).WillOnce(Return(socket_)); connection_handler_->addListener(absl::nullopt, *this); conn_ = dispatcher_->createClientConnection(local_dst_address_, Network::Address::InstanceConstSharedPtr(), diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc index e520062aba748..ddf42cf751850 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_integration_test.cc @@ -75,7 +75,6 @@ class DnsFilterIntegrationTest : public testing::TestWithParam 1. TEST_P(UdpProxyIntegrationTest, NoReusePort) { concurrency_ = 2; + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_enable_reuse_port() + ->set_value(false); + }); // Do not wait for listeners to start as the listener will fail. defer_listener_finalization_ = true; setup(1); diff --git a/test/extensions/transport_sockets/tls/ssl_socket_test.cc b/test/extensions/transport_sockets/tls/ssl_socket_test.cc index 1c4cb541fae78..0e501606c5658 100644 --- a/test/extensions/transport_sockets/tls/ssl_socket_test.cc +++ b/test/extensions/transport_sockets/tls/ssl_socket_test.cc @@ -313,11 +313,10 @@ void testUtil(const TestUtilOptions& options) { server_stats_store, std::vector{}); Event::DispatcherPtr dispatcher = server_api->allocateDispatcher("test_thread"); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(options.version()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(options.version())); Network::MockTcpListenerCallbacks callbacks; - Network::ListenerPtr listener = - dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher->createListener(socket, callbacks, true); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(options.clientCtxYaml()), @@ -630,11 +629,10 @@ void testUtilV2(const TestUtilOptionsV2& options) { server_stats_store, server_names); Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(options.version()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(options.version())); NiceMock callbacks; - Network::ListenerPtr listener = - dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher->createListener(socket, callbacks, true); Stats::TestUtil::TestStore client_stats_store; Api::ApiPtr client_api = Api::createApiForTest(client_stats_store, time_system); @@ -2419,11 +2417,10 @@ TEST_P(SslSocketTest, FlushCloseDuringHandshake) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -2475,11 +2472,10 @@ TEST_P(SslSocketTest, HalfClose) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks listener_callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); std::shared_ptr server_read_filter(new Network::MockReadFilter()); std::shared_ptr client_read_filter(new Network::MockReadFilter()); @@ -2557,11 +2553,10 @@ TEST_P(SslSocketTest, ShutdownWithCloseNotify) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks listener_callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); std::shared_ptr server_read_filter(new Network::MockReadFilter()); std::shared_ptr client_read_filter(new Network::MockReadFilter()); @@ -2645,11 +2640,10 @@ TEST_P(SslSocketTest, ShutdownWithoutCloseNotify) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks listener_callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, listener_callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, listener_callbacks, true); std::shared_ptr server_read_filter(new Network::MockReadFilter()); std::shared_ptr client_read_filter(new Network::MockReadFilter()); @@ -2749,11 +2743,10 @@ TEST_P(SslSocketTest, ClientAuthMultipleCAs) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); const std::string client_ctx_yaml = R"EOF( common_tls_context: @@ -2844,16 +2837,14 @@ void testTicketSessionResumption(const std::string& server_ctx_yaml1, ServerSslSocketFactory server_ssl_socket_factory2(std::move(server_cfg2), manager, server_stats_store, server_names2); - auto socket1 = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true); - auto socket2 = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true); + auto socket1 = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(ip_version)); + auto socket2 = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(ip_version)); NiceMock callbacks; Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); - Network::ListenerPtr listener1 = - dispatcher->createListener(socket1, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); - Network::ListenerPtr listener2 = - dispatcher->createListener(socket2, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener1 = dispatcher->createListener(socket1, callbacks, true); + Network::ListenerPtr listener2 = dispatcher->createListener(socket2, callbacks, true); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context); @@ -2985,12 +2976,11 @@ void testSupportForStatelessSessionResumption(const std::string& server_ctx_yaml ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, {}); - auto tcp_socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(ip_version), nullptr, true); + auto tcp_socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(ip_version)); NiceMock callbacks; Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); - Network::ListenerPtr listener = - dispatcher->createListener(tcp_socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher->createListener(tcp_socket, callbacks, true); envoy::extensions::transport_sockets::tls::v3::UpstreamTlsContext client_tls_context; TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml), client_tls_context); @@ -3426,15 +3416,13 @@ TEST_P(SslSocketTest, ClientAuthCrossListenerSessionResumption) { ServerSslSocketFactory server2_ssl_socket_factory(std::move(server2_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - auto socket2 = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + auto socket2 = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); - Network::ListenerPtr listener2 = - dispatcher_->createListener(socket2, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); + Network::ListenerPtr listener2 = dispatcher_->createListener(socket2, callbacks, true); const std::string client_ctx_yaml = R"EOF( common_tls_context: tls_certificates: @@ -3544,13 +3532,12 @@ void SslSocketTest::testClientSessionResumption(const std::string& server_ctx_ya ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(version), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(version)); NiceMock callbacks; Api::ApiPtr api = Api::createApiForTest(server_stats_store, time_system_); Event::DispatcherPtr dispatcher(server_api->allocateDispatcher("test_thread")); - Network::ListenerPtr listener = - dispatcher->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher->createListener(socket, callbacks, true); Network::ConnectionPtr server_connection; Network::MockConnectionCallbacks server_connection_callbacks; @@ -3806,11 +3793,10 @@ TEST_P(SslSocketTest, SslError) { ServerSslSocketFactory server_ssl_socket_factory(std::move(server_cfg), manager, server_stats_store, std::vector{}); - auto socket = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); + auto socket = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); Network::MockTcpListenerCallbacks callbacks; - Network::ListenerPtr listener = - dispatcher_->createListener(socket, callbacks, true, ENVOY_TCP_BACKLOG_SIZE); + Network::ListenerPtr listener = dispatcher_->createListener(socket, callbacks, true); Network::ClientConnectionPtr client_connection = dispatcher_->createClientConnection( socket->addressProvider().localAddress(), Network::Address::InstanceConstSharedPtr(), @@ -4806,10 +4792,9 @@ class SslReadBufferLimitTest : public SslSocketTest { server_ssl_socket_factory_ = std::make_unique( std::move(server_cfg), *manager_, server_stats_store_, std::vector{}); - socket_ = std::make_shared( - Network::Test::getCanonicalLoopbackAddress(GetParam()), nullptr, true); - listener_ = - dispatcher_->createListener(socket_, listener_callbacks_, true, ENVOY_TCP_BACKLOG_SIZE); + socket_ = std::make_shared( + Network::Test::getCanonicalLoopbackAddress(GetParam())); + listener_ = dispatcher_->createListener(socket_, listener_callbacks_, true); TestUtility::loadFromYaml(TestEnvironment::substitute(client_ctx_yaml_), upstream_tls_context_); auto client_cfg = @@ -4952,7 +4937,7 @@ class SslReadBufferLimitTest : public SslSocketTest { Stats::TestUtil::TestStore server_stats_store_; Stats::TestUtil::TestStore client_stats_store_; - std::shared_ptr socket_; + std::shared_ptr socket_; Network::MockTcpListenerCallbacks listener_callbacks_; const std::string server_ctx_yaml_ = R"EOF( common_tls_context: diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index a945b00471128..52ee01dd3dc89 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -508,7 +508,7 @@ FakeUpstream::FakeUpstream(Network::TransportSocketFactoryPtr&& transport_socket : http_type_(config.upstream_protocol_), http2_options_(config.http2_options_), http3_options_(config.http3_options_), socket_(Network::SocketSharedPtr(listen_socket.release())), - socket_factory_(std::make_shared(socket_)), + socket_factory_(std::make_unique(socket_)), api_(Api::createApiForTest(stats_store_)), time_system_(config.time_system_), dispatcher_(api_->allocateDispatcher("fake_upstream")), handler_(new Server::ConnectionHandlerImpl(*dispatcher_, 0)), config_(config), @@ -571,6 +571,7 @@ void FakeUpstream::createUdpListenerFilterChain(Network::UdpListenerFilterManage } void FakeUpstream::threadRoutine() { + socket_factory_->doFinalPreWorkerInit(); handler_->addListener(absl::nullopt, listener_); server_initialized_.setReady(); dispatcher_->run(Event::Dispatcher::RunType::Block); @@ -767,6 +768,16 @@ testing::AssertionResult FakeUpstream::rawWriteConnection(uint32_t index, const timeout); } +void FakeUpstream::FakeListenSocketFactory::doFinalPreWorkerInit() { + if (socket_->socketType() == Network::Socket::Type::Stream) { + ASSERT_EQ(0, socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE).rc_); + } else { + ASSERT(socket_->socketType() == Network::Socket::Type::Datagram); + ASSERT_TRUE(Network::Socket::applyOptions(socket_->options(), *socket_, + envoy::config::core::v3::SocketOption::STATE_BOUND)); + } +} + FakeRawConnection::~FakeRawConnection() { // If the filter was already deleted, it means the shared_connection_ was too, so don't try to // access it. diff --git a/test/integration/fake_upstream.h b/test/integration/fake_upstream.h index 580da4db816b3..2758cbad861c2 100644 --- a/test/integration/fake_upstream.h +++ b/test/integration/fake_upstream.h @@ -701,13 +701,13 @@ class FakeUpstream : Logger::Loggable, // Network::ListenSocketFactory Network::Socket::Type socketType() const override { return socket_->socketType(); } - const Network::Address::InstanceConstSharedPtr& localAddress() const override { return socket_->addressProvider().localAddress(); } - - Network::SocketSharedPtr getListenSocket() override { return socket_; } - Network::SocketOptRef sharedSocket() const override { return *socket_; } + Network::SocketSharedPtr getListenSocket(uint32_t) override { return socket_; } + Network::ListenSocketFactoryPtr clone() const override { return nullptr; } + void closeAllSockets() override {} + void doFinalPreWorkerInit() override; private: Network::SocketSharedPtr socket_; @@ -814,7 +814,7 @@ class FakeUpstream : Logger::Loggable, const envoy::config::core::v3::Http2ProtocolOptions http2_options_; const envoy::config::core::v3::Http3ProtocolOptions http3_options_; Network::SocketSharedPtr socket_; - Network::ListenSocketFactorySharedPtr socket_factory_; + Network::ListenSocketFactoryPtr socket_factory_; ConditionalInitializer server_initialized_; // Guards any objects which can be altered both in the upstream thread and the // main test thread. diff --git a/test/integration/hotrestart_test.sh b/test/integration/hotrestart_test.sh index d1e5764a38037..5c26beaae5299 100755 --- a/test/integration/hotrestart_test.sh +++ b/test/integration/hotrestart_test.sh @@ -1,6 +1,11 @@ #!/bin/bash -# For this test we use a slightly modiified test binary, based on +# In order to get core dumps that can be debugged, uncomment the following line and then run +# the test using --spawn_strategy=local. (There may be a better way of doing this but this worked +# for Matt Klein.) +# ulimit -c unlimited + +# For this test we use a slightly modified test binary, based on # source/exe/envoy-static. If this starts failing to run or build, ensure that # source/exe/main.cc and ./hotrestart_main.cc have not diverged except for # adding the new gauge. @@ -26,7 +31,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ - sed -e "s#{{ reuse_port }}#false#" | \ + sed -e "s#{{ enable_reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V4}" @@ -40,7 +45,7 @@ if [[ -z "${ENVOY_IP_TEST_VERSIONS}" ]] || [[ "${ENVOY_IP_TEST_VERSIONS}" == "al sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#::1#" | \ - sed -e "s#{{ reuse_port }}#false#" | \ + sed -e "s#{{ enable_reuse_port }}#false#" | \ sed -e "s#{{ dns_lookup_family }}#v6_only#" | \ sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_V6}" @@ -57,14 +62,14 @@ sed -e "s#{{ socket_dir }}#${SOCKET_DIR}#" "${TEST_SRCDIR}/envoy"/test/config/in cat > "${HOT_RESTART_JSON_UDS}" JSON_TEST_ARRAY+=("${HOT_RESTART_JSON_UDS}") -# Test reuse port listener. +# Test reuse_port listener. HOT_RESTART_JSON_REUSE_PORT="${TEST_TMPDIR}"/hot_restart_v4.yaml echo "building ${HOT_RESTART_JSON_V4} ..." sed -e "s#{{ upstream_. }}#0#g" "${TEST_SRCDIR}/envoy"/test/config/integration/server.yaml | \ sed -e "s#{{ test_rundir }}#$TEST_SRCDIR/envoy#" | \ sed -e "s#{{ test_tmpdir }}#$TEST_TMPDIR#" | \ sed -e "s#{{ ip_loopback_address }}#127.0.0.1#" | \ - sed -e "s#{{ reuse_port }}#true#" | \ + sed -e "s#{{ enable_reuse_port }}#true#" | \ sed -e "s#{{ dns_lookup_family }}#V4_ONLY#" | \ sed -e "s#{{ null_device_path }}#/dev/null#" | \ cat > "${HOT_RESTART_JSON_REUSE_PORT}" diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index c1e3328f9d5cf..3f0f219e005eb 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -330,7 +330,7 @@ void HttpIntegrationTest::initialize() { // Needed to config QUIC transport socket factory, and needs to be added before base class calls // initialize(). - config_helper_.addQuicDownstreamTransportSocketConfig(set_reuse_port_); + config_helper_.addQuicDownstreamTransportSocketConfig(); BaseIntegrationTest::initialize(); registerTestServerPorts({"http"}); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index d8a15756bc09f..2d795cddba0a7 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -269,7 +269,6 @@ class HttpIntegrationTest : public BaseIntegrationTest { std::string access_log_name_; testing::NiceMock random_; - bool set_reuse_port_{false}; std::string san_to_match_{"spiffe://lyft.com/backend-team"}; }; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 6038ca7fae297..8dd52277396b2 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -57,9 +57,9 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, IntegrationTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); -// Verify that we gracefully handle an invalid pre-bind socket option when using reuse port. +// Verify that we gracefully handle an invalid pre-bind socket option when using reuse_port. TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { - // Reserve a port that we can then use on the integration listener with reuse port. + // Reserve a port that we can then use on the integration listener with reuse_port. auto addr_socket = Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); // Do not wait for listeners to start as the listener will fail. @@ -67,7 +67,6 @@ TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->set_reuse_port(true); listener->mutable_address()->mutable_socket_address()->set_port_value( addr_socket.second->addressProvider().localAddress()->ip()->port()); auto socket_option = listener->add_socket_options(); @@ -79,9 +78,9 @@ TEST_P(IntegrationTest, BadPrebindSocketOptionWithReusePort) { test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); } -// Verify that we gracefully handle an invalid post-bind socket option when using reuse port. +// Verify that we gracefully handle an invalid post-bind socket option when using reuse_port. TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { - // Reserve a port that we can then use on the integration listener with reuse port. + // Reserve a port that we can then use on the integration listener with reuse_port. auto addr_socket = Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); // Do not wait for listeners to start as the listener will fail. @@ -89,7 +88,6 @@ TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - listener->set_reuse_port(true); listener->mutable_address()->mutable_socket_address()->set_port_value( addr_socket.second->addressProvider().localAddress()->ip()->port()); auto socket_option = listener->add_socket_options(); @@ -101,6 +99,22 @@ TEST_P(IntegrationTest, BadPostbindSocketOptionWithReusePort) { test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); } +// Verify that we gracefully handle an invalid post-listen socket option. +TEST_P(IntegrationTest, BadPostListenSocketOption) { + // Do not wait for listeners to start as the listener will fail. + defer_listener_finalization_ = true; + + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_LISTENING); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + initialize(); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + // Make sure we have correctly specified per-worker performance stats. TEST_P(IntegrationTest, PerWorkerStatsAndBalancing) { concurrency_ = 2; diff --git a/test/integration/listener_lds_integration_test.cc b/test/integration/listener_lds_integration_test.cc index 498f9f2a22bba..55a2c00a797f8 100644 --- a/test/integration/listener_lds_integration_test.cc +++ b/test/integration/listener_lds_integration_test.cc @@ -426,9 +426,6 @@ TEST_P(ListenerIntegrationTest, BasicSuccess) { TEST_P(ListenerIntegrationTest, MultipleLdsUpdatesSharingListenSocketFactory) { on_server_init_function_ = [&]() { createLdsStream(); - // Set reuse_port so that a new socket is created by the - // ListenSocketFactory. - listener_config_.set_reuse_port(true); sendLdsResponse({MessageUtil::getYamlStringFromMessage(listener_config_)}, "1"); createRdsStream(route_table_name_); }; diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 9ef03170c68e2..eb8abe377c482 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -158,7 +158,6 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers void testMultipleQuicConnections() { concurrency_ = 8; - set_reuse_port_ = true; initialize(); std::vector codec_clients; for (size_t i = 1; i <= concurrency_; ++i) { @@ -301,7 +300,6 @@ TEST_P(QuicHttpIntegrationTest, MultipleQuicConnectionsNoBPF) { // worker. TEST_P(QuicHttpIntegrationTest, MultiWorkerWithLongConnectionId) { concurrency_ = 8; - set_reuse_port_ = true; initialize(); // Setup 9-byte CID for the next connection. designated_connection_ids_.push_back(quic::test::TestConnectionIdNineBytesLong(2u)); @@ -310,7 +308,6 @@ TEST_P(QuicHttpIntegrationTest, MultiWorkerWithLongConnectionId) { TEST_P(QuicHttpIntegrationTest, PortMigration) { concurrency_ = 2; - set_reuse_port_ = true; initialize(); uint32_t old_port = lookupPort("http"); codec_client_ = makeHttpConnection(old_port); diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 114359d2bda31..6a0cadb551b49 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -569,6 +569,34 @@ TEST_P(LdsIntegrationTest, ReloadConfig) { EXPECT_THAT(response2, HasSubstr("HTTP/1.0 200 OK\r\n")); } +// Verify that a listener that goes through the individual warming path (not server init) is +// failed and removed correctly if there are issues with final pre-worker init. +TEST_P(LdsIntegrationTest, NewListenerWithBadPostListenSocketOption) { + autonomous_upstream_ = true; + initialize(); + // Given we're using LDS in this test, initialize() will not complete until + // the initial LDS file has loaded. + EXPECT_EQ(1, test_server_->counter("listener_manager.lds.update_success")->value()); + + // Reserve a port that we can then use on the integration listener with reuse_port. + auto addr_socket = + Network::Test::bindFreeLoopbackPort(version_, Network::Socket::Type::Stream, true); + ConfigHelper new_config_helper( + version_, *api_, MessageUtil::getJsonStringFromMessageOrDie(config_helper_.bootstrap())); + new_config_helper.addConfigModifier( + [&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + listener->mutable_address()->mutable_socket_address()->set_port_value( + addr_socket.second->addressProvider().localAddress()->ip()->port()); + auto socket_option = listener->add_socket_options(); + socket_option->set_state(envoy::config::core::v3::SocketOption::STATE_LISTENING); + socket_option->set_level(10000); // Invalid level. + socket_option->set_int_value(10000); // Invalid value. + }); + new_config_helper.setLds("1"); + test_server_->waitForCounterGe("listener_manager.listener_create_failure", 1); +} + // Sample test making sure our config framework informs on listener failure. TEST_P(LdsIntegrationTest, FailConfigLoad) { config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index 7943db8e9db44..26b1559e5ff95 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -67,9 +67,9 @@ class MockDispatcher : public Dispatcher { } Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, - Network::TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t backlog_size) override { - return Network::ListenerPtr{createListener_(std::move(socket), cb, bind_to_port, backlog_size)}; + Network::TcpListenerCallbacks& cb, + bool bind_to_port) override { + return Network::ListenerPtr{createListener_(std::move(socket), cb, bind_to_port)}; } Network::UdpListenerPtr @@ -138,7 +138,7 @@ class MockDispatcher : public Dispatcher { MOCK_METHOD(Filesystem::Watcher*, createFilesystemWatcher_, ()); MOCK_METHOD(Network::Listener*, createListener_, (Network::SocketSharedPtr && socket, Network::TcpListenerCallbacks& cb, - bool bind_to_port, uint32_t backlog_size)); + bool bind_to_port)); MOCK_METHOD(Network::UdpListener*, createUdpListener_, (Network::SocketSharedPtr socket, Network::UdpListenerCallbacks& cb, const envoy::config::core::v3::UdpSocketConfig& config)); diff --git a/test/mocks/event/wrapped_dispatcher.h b/test/mocks/event/wrapped_dispatcher.h index 56f5270e05ab9..de0949358f5d7 100644 --- a/test/mocks/event/wrapped_dispatcher.h +++ b/test/mocks/event/wrapped_dispatcher.h @@ -66,9 +66,9 @@ class WrappedDispatcher : public Dispatcher { } Network::ListenerPtr createListener(Network::SocketSharedPtr&& socket, - Network::TcpListenerCallbacks& cb, bool bind_to_port, - uint32_t backlog_size) override { - return impl_.createListener(std::move(socket), cb, bind_to_port, backlog_size); + Network::TcpListenerCallbacks& cb, + bool bind_to_port) override { + return impl_.createListener(std::move(socket), cb, bind_to_port); } Network::UdpListenerPtr diff --git a/test/mocks/network/mocks.cc b/test/mocks/network/mocks.cc index 71a61eb674eea..f07685c37f246 100644 --- a/test/mocks/network/mocks.cc +++ b/test/mocks/network/mocks.cc @@ -38,9 +38,7 @@ MockListenerConfig::MockListenerConfig() ON_CALL(*this, listenSocketFactory()).WillByDefault(ReturnRef(socket_factory_)); ON_CALL(socket_factory_, localAddress()) .WillByDefault(ReturnRef(socket_->addressProvider().localAddress())); - ON_CALL(socket_factory_, getListenSocket()).WillByDefault(Return(socket_)); - ON_CALL(socket_factory_, sharedSocket()) - .WillByDefault(Return(std::reference_wrapper(*socket_))); + ON_CALL(socket_factory_, getListenSocket(_)).WillByDefault(Return(socket_)); ON_CALL(*this, listenerScope()).WillByDefault(ReturnRef(scope_)); ON_CALL(*this, name()).WillByDefault(ReturnRef(name_)); } @@ -146,6 +144,9 @@ MockListenSocket::MockListenSocket() })); ON_CALL(*this, ipVersion()) .WillByDefault(Return(address_provider_->localAddress()->ip()->version())); + ON_CALL(*this, duplicate()).WillByDefault(Invoke([]() { + return std::make_unique>(); + })); } MockSocketOption::MockSocketOption() { diff --git a/test/mocks/network/mocks.h b/test/mocks/network/mocks.h index ef31f4f74aa99..1145db10d1460 100644 --- a/test/mocks/network/mocks.h +++ b/test/mocks/network/mocks.h @@ -359,8 +359,11 @@ class MockListenSocketFactory : public ListenSocketFactory { MOCK_METHOD(Network::Socket::Type, socketType, (), (const)); MOCK_METHOD(const Network::Address::InstanceConstSharedPtr&, localAddress, (), (const)); - MOCK_METHOD(Network::SocketSharedPtr, getListenSocket, ()); - MOCK_METHOD(SocketOptRef, sharedSocket, (), (const)); + MOCK_METHOD(Network::SocketSharedPtr, getListenSocket, (uint32_t)); + MOCK_METHOD(bool, reusePort, (), (const)); + MOCK_METHOD(Network::ListenSocketFactoryPtr, clone, (), (const)); + MOCK_METHOD(void, closeAllSockets, ()); + MOCK_METHOD(void, doFinalPreWorkerInit, ()); }; class MockUdpPacketWriterFactory : public UdpPacketWriterFactory { diff --git a/test/mocks/server/hot_restart.cc b/test/mocks/server/hot_restart.cc index 8a11dbe8011da..6d8a32ea236eb 100644 --- a/test/mocks/server/hot_restart.cc +++ b/test/mocks/server/hot_restart.cc @@ -8,12 +8,15 @@ namespace Envoy { namespace Server { +using ::testing::_; +using ::testing::Return; using ::testing::ReturnRef; MockHotRestart::MockHotRestart() : stats_allocator_(*symbol_table_) { ON_CALL(*this, logLock()).WillByDefault(ReturnRef(log_lock_)); ON_CALL(*this, accessLogLock()).WillByDefault(ReturnRef(access_log_lock_)); ON_CALL(*this, statsAllocator()).WillByDefault(ReturnRef(stats_allocator_)); + ON_CALL(*this, duplicateParentListenSocket(_, _)).WillByDefault(Return(-1)); } MockHotRestart::~MockHotRestart() = default; diff --git a/test/mocks/server/hot_restart.h b/test/mocks/server/hot_restart.h index 02d219fa90abb..09049b87fdb10 100644 --- a/test/mocks/server/hot_restart.h +++ b/test/mocks/server/hot_restart.h @@ -15,10 +15,11 @@ class MockHotRestart : public HotRestart { // Server::HotRestart MOCK_METHOD(void, drainParentListeners, ()); - MOCK_METHOD(int, duplicateParentListenSocket, (const std::string& address)); + MOCK_METHOD(int, duplicateParentListenSocket, + (const std::string& address, uint32_t worker_index)); MOCK_METHOD(std::unique_ptr, getParentStats, ()); MOCK_METHOD(void, initialize, (Event::Dispatcher & dispatcher, Server::Instance& server)); - MOCK_METHOD(void, sendParentAdminShutdownRequest, (time_t & original_start_time)); + MOCK_METHOD(absl::optional, sendParentAdminShutdownRequest, ()); MOCK_METHOD(void, sendParentTerminateRequest, ()); MOCK_METHOD(ServerStatsFromParent, mergeParentStatsIfAny, (Stats::StoreRoot & stats_store)); MOCK_METHOD(void, shutdown, ()); diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index ebd079c44b96b..92be91025b9ed 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -50,6 +50,7 @@ MockInstance::MockInstance() ON_CALL(*this, serverFactoryContext()).WillByDefault(ReturnRef(*server_factory_context_)); ON_CALL(*this, transportSocketFactoryContext()) .WillByDefault(ReturnRef(*transport_socket_factory_context_)); + ON_CALL(*this, enableReusePortDefault()).WillByDefault(Return(true)); } MockInstance::~MockInstance() = default; diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index 21788e4900316..fe3f22fcc2305 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -85,6 +85,7 @@ class MockInstance : public Instance { MOCK_METHOD(ProtobufMessage::ValidationContext&, messageValidationContext, ()); MOCK_METHOD(Configuration::ServerFactoryContext&, serverFactoryContext, ()); MOCK_METHOD(Configuration::TransportSocketFactoryContext&, transportSocketFactoryContext, ()); + MOCK_METHOD(bool, enableReusePortDefault, ()); void setDefaultTracingConfig(const envoy::config::trace::v3::Tracing& tracing_config) override { http_context_.setDefaultTracingConfig(tracing_config); diff --git a/test/mocks/server/listener_component_factory.cc b/test/mocks/server/listener_component_factory.cc index a0b00b7bc756a..540754339eb31 100644 --- a/test/mocks/server/listener_component_factory.cc +++ b/test/mocks/server/listener_component_factory.cc @@ -13,10 +13,11 @@ using ::testing::Invoke; MockListenerComponentFactory::MockListenerComponentFactory() : socket_(std::make_shared>()) { - ON_CALL(*this, createListenSocket(_, _, _, _)) + ON_CALL(*this, createListenSocket(_, _, _, _, _)) .WillByDefault(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { + ListenerComponentFactory::BindType, + uint32_t) -> Network::SocketSharedPtr { if (!Network::Socket::applyOptions(options, *socket_, envoy::config::core::v3::SocketOption::STATE_PREBIND)) { throw EnvoyException("MockListenerComponentFactory: Setting socket options failed"); diff --git a/test/mocks/server/listener_component_factory.h b/test/mocks/server/listener_component_factory.h index 4f8b1213840b5..1234437ee02b6 100644 --- a/test/mocks/server/listener_component_factory.h +++ b/test/mocks/server/listener_component_factory.h @@ -38,8 +38,8 @@ class MockListenerComponentFactory : public ListenerComponentFactory { Configuration::ListenerFactoryContext& context)); MOCK_METHOD(Network::SocketSharedPtr, createListenSocket, (Network::Address::InstanceConstSharedPtr address, Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params)); + const Network::Socket::OptionsSharedPtr& options, BindType bind_type, + uint32_t worker_index)); MOCK_METHOD(DrainManager*, createDrainManager_, (envoy::config::listener::v3::Listener::DrainType drain_type)); MOCK_METHOD(uint64_t, nextListenerTag, ()); diff --git a/test/mocks/server/worker.cc b/test/mocks/server/worker.cc index b4f1e5e8d9ee4..7286c4e4dc546 100644 --- a/test/mocks/server/worker.cc +++ b/test/mocks/server/worker.cc @@ -17,7 +17,7 @@ MockWorker::MockWorker() { Invoke([this](absl::optional overridden_listener, Network::ListenerConfig& config, AddListenerCompletion completion) -> void { UNREFERENCED_PARAMETER(overridden_listener); - config.listenSocketFactory().getListenSocket(); + config.listenSocketFactory().getListenSocket(0); EXPECT_EQ(nullptr, add_listener_completion_); add_listener_completion_ = completion; })); diff --git a/test/mocks/server/worker.h b/test/mocks/server/worker.h index b72f38c5c2c1f..8ed1d7b3760f4 100644 --- a/test/mocks/server/worker.h +++ b/test/mocks/server/worker.h @@ -12,9 +12,9 @@ class MockWorker : public Worker { MockWorker(); ~MockWorker() override; - void callAddCompletion(bool success) { + void callAddCompletion() { EXPECT_NE(nullptr, add_listener_completion_); - add_listener_completion_(success); + add_listener_completion_(); add_listener_completion_ = nullptr; } diff --git a/test/server/connection_handler_test.cc b/test/server/connection_handler_test.cc index a7df85f9333b8..c911742fa469e 100644 --- a/test/server/connection_handler_test.cc +++ b/test/server/connection_handler_test.cc @@ -45,8 +45,7 @@ namespace { class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable { public: ConnectionHandlerTest() - : socket_factory_(std::make_shared()), - handler_(new ConnectionHandlerImpl(dispatcher_, 0)), + : handler_(new ConnectionHandlerImpl(dispatcher_, 0)), filter_chain_(std::make_shared>()), listener_filter_matcher_(std::make_shared>()), access_log_(std::make_shared()) { @@ -65,14 +64,12 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable access_log, std::shared_ptr> filter_chain_manager = nullptr, uint32_t tcp_backlog_size = ENVOY_TCP_BACKLOG_SIZE, Network::ConnectionBalancerSharedPtr connection_balancer = nullptr) : parent_(parent), socket_(std::make_shared>()), - socket_factory_(std::move(socket_factory)), tag_(tag), bind_to_port_(bind_to_port), - tcp_backlog_size_(tcp_backlog_size), + tag_(tag), bind_to_port_(bind_to_port), tcp_backlog_size_(tcp_backlog_size), hand_off_restored_destination_connections_(hand_off_restored_destination_connections), name_(name), listener_filters_timeout_(listener_filters_timeout), continue_on_listener_filters_timeout_(continue_on_listener_filters_timeout), @@ -113,7 +110,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> socket_; - Network::ListenSocketFactorySharedPtr socket_factory_; + Network::MockListenSocketFactory socket_factory_; uint64_t tag_; bool bind_to_port_; const uint32_t tcp_backlog_size_; @@ -162,6 +159,7 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable> inline_filter_chain_manager_; std::unique_ptr init_manager_; envoy::config::core::v3::TrafficDirection direction_; + Network::UdpListenerCallbacks* udp_listener_callbacks_{}; }; using TestListenerPtr = std::unique_ptr; @@ -216,21 +214,22 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable( *this, tag, bind_to_port, hand_off_restored_destination_connections, name, socket_type, - listener_filters_timeout, continue_on_listener_filters_timeout, socket_factory_, - access_log_, overridden_filter_chain_manager, tcp_backlog_size, connection_balancer)); - EXPECT_CALL(*socket_factory_, socketType()).WillOnce(Return(socket_type)); + listener_filters_timeout, continue_on_listener_filters_timeout, access_log_, + overridden_filter_chain_manager, tcp_backlog_size, connection_balancer)); + EXPECT_CALL(listeners_.back()->socket_factory_, socketType()).WillOnce(Return(socket_type)); if (listener == nullptr) { // Expecting listener config in place update. // If so, dispatcher would not create new network listener. return listeners_.back().get(); } - EXPECT_CALL(*socket_factory_, getListenSocket()).WillOnce(Return(listeners_.back()->socket_)); + EXPECT_CALL(listeners_.back()->socket_factory_, getListenSocket(_)) + .WillOnce(Return(listeners_.back()->socket_)); if (socket_type == Network::Socket::Type::Stream) { - EXPECT_CALL(dispatcher_, createListener_(_, _, _, _)) + EXPECT_CALL(dispatcher_, createListener_(_, _, _)) .WillOnce(Invoke([listener, listener_callbacks](Network::SocketSharedPtr&&, - Network::TcpListenerCallbacks& cb, bool, - uint32_t) -> Network::Listener* { + Network::TcpListenerCallbacks& cb, + bool) -> Network::Listener* { if (listener_callbacks != nullptr) { *listener_callbacks = &cb; } @@ -239,8 +238,10 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable Network::UdpListener* { + [listener, &test_listener = listeners_.back()]( + Network::SocketSharedPtr&&, Network::UdpListenerCallbacks& udp_listener_callbacks, + const envoy::config::core::v3::UdpSocketConfig&) -> Network::UdpListener* { + test_listener->udp_listener_callbacks_ = &udp_listener_callbacks; return dynamic_cast(listener); })); listeners_.back()->udp_listener_config_->listener_worker_router_ = @@ -264,7 +265,8 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggableip()->port()); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -294,7 +296,6 @@ class ConnectionHandlerTest : public testing::Test, protected Logger::Loggable socket_factory_; Network::Address::InstanceConstSharedPtr local_address_{ new Network::Address::Ipv4Instance("127.0.0.1", 10001)}; NiceMock dispatcher_{"test"}; @@ -327,7 +328,7 @@ TEST_F(ConnectionHandlerTest, RemoveListenerDuringRebalance) { TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, connection_balancer, ¤t_handler); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); // Fake a balancer posting a connection to us. @@ -367,7 +368,8 @@ TEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) { addListener(1, false, false, "test_listener1", listener1, &listener_callbacks1); Network::Address::InstanceConstSharedPtr normal_address( new Network::Address::Ipv4Instance("127.0.0.1", 10001)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(normal_address)); // Only allow a single connection on this listener. test_listener1->setMaxConnections(1); handler_->addListener(absl::nullopt, *test_listener1); @@ -378,7 +380,8 @@ TEST_F(ConnectionHandlerTest, ListenerConnectionLimitEnforced) { addListener(2, false, false, "test_listener2", listener2, &listener_callbacks2); Network::Address::InstanceConstSharedPtr alt_address( new Network::Address::Ipv4Instance("127.0.0.2", 20002)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); + EXPECT_CALL(test_listener2->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(alt_address)); // Do not allow any connections on this listener. test_listener2->setMaxConnections(0); handler_->addListener(absl::nullopt, *test_listener2); @@ -450,7 +453,7 @@ TEST_F(ConnectionHandlerTest, RemoveListener) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* connection = new NiceMock(); @@ -481,7 +484,7 @@ TEST_F(ConnectionHandlerTest, DisableListener) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, false, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); EXPECT_CALL(*listener, disable()); @@ -498,7 +501,7 @@ TEST_F(ConnectionHandlerTest, AddDisabledListener) { TestListener* test_listener = addListener(1, false, false, "test_listener", listener, &listener_callbacks); EXPECT_CALL(*listener, disable()); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); EXPECT_CALL(*listener, onDestroy()); handler_->disableListeners(); @@ -512,7 +515,7 @@ TEST_F(ConnectionHandlerTest, SetListenerRejectFraction) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, false, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); EXPECT_CALL(*listener, setRejectFraction(UnitFloat(0.1234f))); @@ -529,7 +532,7 @@ TEST_F(ConnectionHandlerTest, AddListenerSetRejectFraction) { TestListener* test_listener = addListener(1, false, false, "test_listener", listener, &listener_callbacks); EXPECT_CALL(*listener, setRejectFraction(UnitFloat(0.12345f))); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); EXPECT_CALL(*listener, onDestroy()); handler_->setListenerRejectFraction(UnitFloat(0.12345f)); @@ -544,7 +547,7 @@ TEST_F(ConnectionHandlerTest, SetsTransportSocketConnectTimeout) { TestListener* test_listener = addListener(1, false, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); auto server_connection = new NiceMock(); @@ -569,7 +572,7 @@ TEST_F(ConnectionHandlerTest, DestroyCloseConnections) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* connection = new NiceMock(); @@ -589,7 +592,7 @@ TEST_F(ConnectionHandlerTest, CloseDuringFilterChainCreate) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get())); @@ -613,7 +616,7 @@ TEST_F(ConnectionHandlerTest, CloseConnectionOnEmptyFilterChain) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); EXPECT_CALL(manager_, findFilterChain(_)).WillOnce(Return(filter_chain_.get())); @@ -637,7 +640,8 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { addListener(1, true, true, "test_listener1", listener1, &listener_callbacks1); Network::Address::InstanceConstSharedPtr normal_address( new Network::Address::Ipv4Instance("127.0.0.1", 10001)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(normal_address)); handler_->addListener(absl::nullopt, *test_listener1); Network::TcpListenerCallbacks* listener_callbacks2; @@ -646,7 +650,8 @@ TEST_F(ConnectionHandlerTest, NormalRedirect) { addListener(1, false, false, "test_listener2", listener2, &listener_callbacks2); Network::Address::InstanceConstSharedPtr alt_address( new Network::Address::Ipv4Instance("127.0.0.2", 20002)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(alt_address)); + EXPECT_CALL(test_listener2->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(alt_address)); handler_->addListener(absl::nullopt, *test_listener2); auto* test_filter = new NiceMock(); @@ -702,7 +707,8 @@ TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) { addListener(1, true, true, "test_listener1", listener1, &listener_callbacks1); Network::Address::InstanceConstSharedPtr normal_address( new Network::Address::Ipv4Instance("127.0.0.1", 10001)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(normal_address)); handler_->addListener(absl::nullopt, *test_listener1); Network::TcpListenerCallbacks* listener_callbacks2; @@ -710,7 +716,8 @@ TEST_F(ConnectionHandlerTest, FallbackToWildcardListener) { TestListener* test_listener2 = addListener(1, false, false, "test_listener2", listener2, &listener_callbacks2); Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getIpv4AnyAddress(); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address)); + EXPECT_CALL(test_listener2->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address)); handler_->addListener(absl::nullopt, *test_listener2); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -759,7 +766,8 @@ TEST_F(ConnectionHandlerTest, OldBehaviorMatchFirstWildcardListener) { addListener(1, true, true, "test_listener1", listener1, &listener_callbacks1); Network::Address::InstanceConstSharedPtr normal_address( new Network::Address::Ipv4Instance("127.0.0.1", 10001)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(normal_address)); handler_->addListener(absl::nullopt, *test_listener1); auto ipv4_overridden_filter_chain_manager = @@ -772,7 +780,8 @@ TEST_F(ConnectionHandlerTest, OldBehaviorMatchFirstWildcardListener) { std::chrono::milliseconds(15000), false, ipv4_overridden_filter_chain_manager); Network::Address::InstanceConstSharedPtr any_address( new Network::Address::Ipv4Instance("0.0.0.0", 80)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address)); + EXPECT_CALL(ipv4_any_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address)); handler_->addListener(absl::nullopt, *ipv4_any_listener); auto ipv6_overridden_filter_chain_manager = @@ -785,7 +794,8 @@ TEST_F(ConnectionHandlerTest, OldBehaviorMatchFirstWildcardListener) { std::chrono::milliseconds(15000), false, ipv6_overridden_filter_chain_manager); Network::Address::InstanceConstSharedPtr any_address_ipv6( new Network::Address::Ipv6Instance("::", 80)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address_ipv6)); + EXPECT_CALL(ipv6_any_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address_ipv6)); handler_->addListener(absl::nullopt, *ipv6_any_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -835,7 +845,8 @@ TEST_F(ConnectionHandlerTest, MatchIPv6WildcardListener) { addListener(1, true, true, "test_listener1", listener1, &listener_callbacks1); Network::Address::InstanceConstSharedPtr normal_address( new Network::Address::Ipv4Instance("127.0.0.1", 10001)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(normal_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(normal_address)); handler_->addListener(absl::nullopt, *test_listener1); auto ipv4_overridden_filter_chain_manager = @@ -849,7 +860,8 @@ TEST_F(ConnectionHandlerTest, MatchIPv6WildcardListener) { Network::Address::InstanceConstSharedPtr any_address( new Network::Address::Ipv4Instance("0.0.0.0", 80)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address)); + EXPECT_CALL(ipv4_any_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address)); handler_->addListener(absl::nullopt, *ipv4_any_listener); auto ipv6_overridden_filter_chain_manager = @@ -862,7 +874,8 @@ TEST_F(ConnectionHandlerTest, MatchIPv6WildcardListener) { std::chrono::milliseconds(15000), false, ipv6_overridden_filter_chain_manager); Network::Address::InstanceConstSharedPtr any_address_ipv6( new Network::Address::Ipv6Instance("::", 80)); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address_ipv6)); + EXPECT_CALL(ipv6_any_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address_ipv6)); handler_->addListener(absl::nullopt, *ipv6_any_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -931,7 +944,8 @@ TEST_F(ConnectionHandlerTest, WildcardListenerWithNoOriginalDst) { new Network::Address::Ipv4Instance("127.0.0.1", 80)); Network::Address::InstanceConstSharedPtr any_address = Network::Utility::getAddressWithPort( *Network::Utility::getIpv4AnyAddress(), normal_address->ip()->port()); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(any_address)); + EXPECT_CALL(test_listener1->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(any_address)); handler_->addListener(absl::nullopt, *test_listener1); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -960,7 +974,8 @@ TEST_F(ConnectionHandlerTest, TransportProtocolDefault) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* accepted_socket = new NiceMock(); @@ -979,7 +994,8 @@ TEST_F(ConnectionHandlerTest, TransportProtocolCustom) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -1013,7 +1029,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeout) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -1059,7 +1076,8 @@ TEST_F(ConnectionHandlerTest, ContinueOnListenerFilterTimeout) { TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, std::chrono::milliseconds(15000), true); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new NiceMock(); @@ -1112,7 +1130,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterTimeoutResetOnSuccess) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -1159,7 +1178,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterDisabledTimeout) { TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks, nullptr, nullptr, Network::Socket::Type::Stream, std::chrono::milliseconds()); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* test_filter = new Network::MockListenerFilter(); @@ -1189,7 +1209,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockListenerFilter* first_filter = new Network::MockListenerFilter(); @@ -1223,8 +1244,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterReportError) { EXPECT_CALL(*listener, onDestroy()); } -// Ensure an exception is thrown if there are no filters registered for a UDP listener -TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { +// Ensure no filters registered for a UDP listener is handled correctly. +TEST_F(ConnectionHandlerTest, UdpListenerNoFilter) { InSequence s; auto listener = new NiceMock(); @@ -1234,17 +1255,17 @@ TEST_F(ConnectionHandlerTest, UdpListenerNoFilterThrowsException) { EXPECT_CALL(factory_, createUdpListenerFilterChain(_, _)) .WillOnce(Invoke([&](Network::UdpListenerFilterManager&, Network::UdpReadFilterCallbacks&) -> bool { return true; })); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); - EXPECT_CALL(*listener, onDestroy()); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); - try { - handler_->addListener(absl::nullopt, *test_listener); - FAIL(); - } catch (const Network::CreateListenerException& e) { - EXPECT_THAT( - e.what(), - HasSubstr("Cannot create listener as no read filter registered for the udp listener")); - } + handler_->addListener(absl::nullopt, *test_listener); + + // Make sure these calls don't crash. + Network::UdpRecvData data; + test_listener->udp_listener_callbacks_->onData(std::move(data)); + test_listener->udp_listener_callbacks_->onReceiveError(Api::IoError::IoErrorCode::UnknownError); + + EXPECT_CALL(*listener, onDestroy()); } TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { @@ -1260,7 +1281,8 @@ TEST_F(ConnectionHandlerTest, TcpListenerInplaceUpdate) { TestListener* old_test_listener = addListener(old_listener_tag, true, false, "test_listener", old_listener, &old_listener_callbacks, mock_connection_balancer, ¤t_handler); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(old_test_listener->socket_factory_, localAddress()) + .WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *old_test_listener); ASSERT_NE(old_test_listener, nullptr); @@ -1297,7 +1319,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerRemoveFilterChain) { auto listener = new NiceMock(); TestListener* test_listener = addListener(listener_tag, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* connection = new NiceMock(); @@ -1345,7 +1367,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerRemoveFilterChainCalledAfterListenerIsR auto listener = new NiceMock(); TestListener* test_listener = addListener(listener_tag, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* connection = new NiceMock(); @@ -1407,7 +1429,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerRemoveListener) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); Network::MockConnectionSocket* connection = new NiceMock(); @@ -1436,7 +1458,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerGlobalCxLimitReject) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); listener_callbacks->onReject(Network::TcpListenerCallbacks::RejectCause::GlobalCxLimit); @@ -1451,7 +1473,7 @@ TEST_F(ConnectionHandlerTest, TcpListenerOverloadActionReject) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); listener_callbacks->onReject(Network::TcpListenerCallbacks::RejectCause::OverloadAction); @@ -1467,7 +1489,8 @@ TEST_F(ConnectionHandlerTest, ListenerFilterWorks) { auto listener = new NiceMock(); TestListener* test_listener = addListener(1, true, false, "test_listener", listener, &listener_callbacks); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); handler_->addListener(absl::nullopt, *test_listener); auto all_matcher = std::make_shared(); @@ -1512,7 +1535,8 @@ TEST_F(ConnectionHandlerTest, ShutdownUdpListener) { udp_listener.addReadFilter(std::move(filter)); return true; })); - EXPECT_CALL(*socket_factory_, localAddress()).WillRepeatedly(ReturnRef(local_address_)); + EXPECT_CALL(test_listener->socket_factory_, localAddress()) + .WillRepeatedly(ReturnRef(local_address_)); EXPECT_CALL(dummy_callbacks.udp_listener_, onDestroy()); handler_->addListener(absl::nullopt, *test_listener); @@ -1522,22 +1546,6 @@ TEST_F(ConnectionHandlerTest, ShutdownUdpListener) { << "The read_filter_ should be deleted before the udp_listener_ is deleted."; } -TEST_F(ConnectionHandlerTest, TcpBacklogCustom) { - uint32_t custom_backlog = 100; - TestListener* test_listener = addListener( - 1, true, false, "test_tcp_backlog", nullptr, nullptr, nullptr, nullptr, - Network::Socket::Type::Stream, std::chrono::milliseconds(), false, nullptr, custom_backlog); - EXPECT_CALL(*socket_factory_, getListenSocket()).WillOnce(Return(listeners_.back()->socket_)); - EXPECT_CALL(*socket_factory_, localAddress()).WillOnce(ReturnRef(local_address_)); - EXPECT_CALL(dispatcher_, createListener_(_, _, _, _)) - .WillOnce(Invoke([custom_backlog](Network::SocketSharedPtr&&, Network::TcpListenerCallbacks&, - bool, uint32_t backlog) -> Network::Listener* { - EXPECT_EQ(custom_backlog, backlog); - return nullptr; - })); - handler_->addListener(absl::nullopt, *test_listener); -} - } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_quic_only_test.cc b/test/server/listener_manager_impl_quic_only_test.cc index 60b923887514e..6f1f08fe4fa78 100644 --- a/test/server/listener_manager_impl_quic_only_test.cc +++ b/test/server/listener_manager_impl_quic_only_test.cc @@ -54,7 +54,6 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryAndSslContext) { match_subject_alt_names: - exact: localhost - exact: 127.0.0.1 -reuse_port: true udp_listener_config: quic_options: {} )EOF", @@ -72,7 +71,7 @@ reuse_port: true /* expected_num_options */ Api::OsSysCallsSingleton::get().supportsUdpGro() ? 3 : 2, #endif - /* expected_creation_params */ {true, false}); + ListenerComponentFactory::BindType::ReusePort); expectSetsockopt(/* expected_sockopt_level */ IPPROTO_IP, /* expected_sockopt_name */ ENVOY_IP_PKTINFO, @@ -105,7 +104,7 @@ reuse_port: true ->listenerFactory() .isTransportConnectionless()); Network::SocketSharedPtr listen_socket = - manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + manager_->listeners().front().get().listenSocketFactory().getListenSocket(0); Network::UdpPacketWriterPtr udp_packet_writer = manager_->listeners() @@ -156,7 +155,6 @@ TEST_F(ListenerManagerImplQuicOnlyTest, QuicListenerFactoryWithWrongTransportSoc match_subject_alt_names: - exact: localhost - exact: 127.0.0.1 -reuse_port: true udp_listener_config: quic_options: {} )EOF", diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index ced855714febb..5608db746f14c 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -42,6 +42,7 @@ namespace Server { namespace { using testing::AtLeast; +using testing::ByMove; using testing::InSequence; using testing::Return; using testing::ReturnRef; @@ -77,9 +78,10 @@ class ListenerManagerImplWithRealFiltersTest : public ListenerManagerImplTest { const envoy::config::core::v3::SocketOption::SocketState& expected_state, const Network::SocketOptionName& expected_option, int expected_value, uint32_t expected_num_options = 1, - ListenSocketCreationParams expected_creation_params = {true, true}) { + ListenerComponentFactory::BindType bind_type = + ListenerComponentFactory::BindType::NoReusePort) { if (expected_option.hasValue()) { - expectCreateListenSocket(expected_state, expected_num_options, expected_creation_params); + expectCreateListenSocket(expected_state, expected_num_options, bind_type); expectSetsockopt(expected_option.level(), expected_option.option(), expected_value, expected_num_options); manager_->addOrUpdateListener(listener, "", true); @@ -113,16 +115,20 @@ class ListenerManagerImplForInPlaceFilterChainUpdateTest : public Event::Simulat void expectAddListener(const envoy::config::listener::v3::Listener& listener_proto, ListenerHandle*) { - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); manager_->addOrUpdateListener(listener_proto, "", true); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); } - void expectUpdateToThenDrain(const envoy::config::listener::v3::Listener& new_listener_proto, - ListenerHandle* old_listener_handle) { + Network::MockListenSocket* + expectUpdateToThenDrain(const envoy::config::listener::v3::Listener& new_listener_proto, + ListenerHandle* old_listener_handle, Network::MockListenSocket& socket) { + auto duplicated_socket = new NiceMock(); + EXPECT_CALL(socket, duplicate()) + .WillOnce(Return(ByMove(std::unique_ptr(duplicated_socket)))); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*old_listener_handle->drain_manager_, startDrainSequence(_)); @@ -134,13 +140,14 @@ class ListenerManagerImplForInPlaceFilterChainUpdateTest : public Event::Simulat EXPECT_CALL(*old_listener_handle, onDestroy()); worker_->callRemovalCompletion(); + return duplicated_socket; } void expectRemove(const envoy::config::listener::v3::Listener& listener_proto, - ListenerHandle* listener_handle) { + ListenerHandle* listener_handle, Network::MockListenSocket& socket) { EXPECT_CALL(*worker_, stopListener(_, _)); - EXPECT_CALL(*listener_factory_.socket_, close()); + EXPECT_CALL(socket, close()); EXPECT_CALL(*listener_handle->drain_manager_, startDrainSequence(_)); EXPECT_TRUE(manager_->removeListener(listener_proto.name())); @@ -168,7 +175,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, EmptyFilter) { )EOF"; EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(&manager_->httpContext(), &server_.httpContext()); EXPECT_EQ(1U, manager_->listeners().size()); @@ -186,7 +193,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, DefaultListenerPerConnectionBuffe - filters: [] )EOF"; - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1024 * 1024U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -202,7 +209,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SetListenerPerConnectionBufferLim per_connection_buffer_limit_bytes: 8192 )EOF"; - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(8192U, manager_->listeners().back().get().perConnectionBufferLimitBytes()); } @@ -234,7 +241,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsTransportSocket) { )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -272,7 +279,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -292,7 +299,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransportSocketConnectTimeout) { transport_socket_connect_timeout: 3s )EOF"; - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); auto filter_chain = findFilterChain(1234, "127.0.0.1", "", "", {}, "8.8.8.8", 111); ASSERT_NE(filter_chain, nullptr); @@ -328,12 +335,12 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, UdpAddress) { EXPECT_CALL(server_.api_.random_, uuid()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(listener_factory_, - createListenSocket(_, Network::Socket::Type::Datagram, _, {{true, false}})) - .WillOnce(Invoke([this](const Network::Address::InstanceConstSharedPtr&, - Network::Socket::Type, const Network::Socket::OptionsSharedPtr&, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - return listener_factory_.socket_; - })); + createListenSocket(_, Network::Socket::Type::Datagram, _, + ListenerComponentFactory::BindType::ReusePort, 0)) + .WillOnce(Invoke( + [this](const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type, + const Network::Socket::OptionsSharedPtr&, ListenerComponentFactory::BindType, + uint32_t) -> Network::SocketSharedPtr { return listener_factory_.socket_; })); EXPECT_CALL(*listener_factory_.socket_, setSocketOption(_, _, _, _)).Times(testing::AtLeast(1)); EXPECT_CALL(os_sys_calls_, close(_)).WillRepeatedly(Return(Api::SysCallIntResult{0, errno})); manager_->addOrUpdateListener(listener_proto, "", true); @@ -532,7 +539,8 @@ bind_to_port: false - name: stats_test )EOF"; - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoBind, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); manager_->listeners().front().get().listenerScope().counterFromString("foo").inc(); @@ -562,7 +570,7 @@ TEST_F(ListenerManagerImplTest, NotDefaultListenerFiltersTimeout) { listener_filters_timeout: 0s )EOF"; - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true)); EXPECT_EQ(std::chrono::milliseconds(), manager_->listeners().front().get().listenerFiltersTimeout()); @@ -583,7 +591,7 @@ TEST_F(ListenerManagerImplTest, ModifyOnlyDrainType) { ListenerHandle* listener_foo = expectListenerCreate(false, true, envoy::config::listener::v3::Listener::MODIFY_ONLY); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -611,7 +619,7 @@ filter_chains: {} )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -650,7 +658,7 @@ filter_chains: {} ListenerHandle* listener_foo_different_address = expectListenerCreate(false, true); // Another socket should be created. - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->addOrUpdateListener( parseListenerFromV3Yaml(listener_foo_different_address_yaml), "version2", true)); @@ -679,7 +687,7 @@ version_info: version2 EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*worker_, start(_, _)); manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); time_system_.setSystemTime(std::chrono::milliseconds(3003003003003)); @@ -694,7 +702,7 @@ filter_chains: {} )EOF"; ListenerHandle* listener_baz = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_baz->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "version3", true)); @@ -747,7 +755,7 @@ filter_chains: {} // Modify the address of a warming listener. ListenerHandle* listener_baz_different_address = expectListenerCreate(true, true); // Another socket should be created. - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*listener_baz, onDestroy()).WillOnce(Invoke([listener_baz]() -> void { listener_baz->target_.ready(); })); @@ -791,7 +799,7 @@ version_info: version4 EXPECT_CALL(*worker_, addListener(_, _, _)); listener_baz_different_address->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_CALL(*listener_foo_different_address, onDestroy()); EXPECT_CALL(*listener_baz_different_address, onDestroy()); @@ -822,7 +830,7 @@ drain_type: default .WillByDefault(Return(Api::SysCallSocketResult{INVALID_SOCKET, 0})); ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -854,7 +862,7 @@ drain_type: default .WillByDefault(Return(Api::SysCallSocketResult{5, 0})); ON_CALL(os_sys_calls_, close(_)).WillByDefault(Return(Api::SysCallIntResult{0, 0})); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -879,7 +887,7 @@ name: foo )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, false); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); checkConfigDump(R"EOF( @@ -959,7 +967,7 @@ filter_chains: {} { // Add and remove a listener before starting workers. ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(server_, initManager()).WillOnce(ReturnRef(server_init_mgr)); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1010,7 +1018,7 @@ version_info: version1 server_init_watcher.expectReady().Times(0); { ListenerHandle* listener_foo2 = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); // Version 2 listener will be initialized by listener manager directly. EXPECT_CALL(listener_foo2->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), @@ -1064,7 +1072,7 @@ filter_chains: {} )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1097,6 +1105,7 @@ name: foo )EOF"; ListenerHandle* listener_foo_update1 = expectListenerOverridden(false); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(*worker_, addListener(_, _, _)); auto* timer = new Event::MockTimer(dynamic_cast(&server_.dispatcher())); EXPECT_CALL(*timer, enableTimer(_, _)); @@ -1104,7 +1113,7 @@ name: foo manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); EXPECT_CALL(*worker_, removeFilterChains(_, _, _)); @@ -1144,7 +1153,7 @@ filter_chains: {} )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version1", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1180,7 +1189,7 @@ version_info: version1 EXPECT_FALSE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); - // Update foo listener. Should share socket. + // Update foo listener. const std::string listener_foo_update1_yaml = R"EOF( name: "foo" address: @@ -1194,6 +1203,9 @@ per_connection_buffer_limit_bytes: 10 time_system_.setSystemTime(std::chrono::milliseconds(2002002002002)); ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + auto duplicated_socket = new NiceMock(); + EXPECT_CALL(*listener_factory_.socket_, duplicate()) + .WillOnce(Return(ByMove(std::unique_ptr(duplicated_socket)))); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "version2", true)); @@ -1241,7 +1253,7 @@ version_info: version2 EXPECT_EQ(0, server_.stats_store_ .gauge("listener_manager.workers_started", Stats::Gauge::ImportMode::NeverImport) .value()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); // Validate that workers_started stat is set to 1 after workers have responded with initialization // status. @@ -1259,12 +1271,13 @@ version_info: version2 // Update foo. Should go into warming, have an immediate warming callback, and start immediate // removal. ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true); + EXPECT_CALL(*duplicated_socket, duplicate()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*listener_foo_update1->drain_manager_, startDrainSequence(_)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "version3", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 1, 2, 0, 0, 1, 1, 0); EXPECT_CALL(*lds_api, versionInfo()).WillOnce(Return("version3")); checkConfigDump(R"EOF( @@ -1329,12 +1342,12 @@ filter_chains: {} )EOF"; ListenerHandle* listener_bar = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_yaml), "version4", true)); EXPECT_EQ(2UL, manager_->listeners().size()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 2, 2, 0, 0, 2, 0, 0); time_system_.setSystemTime(std::chrono::milliseconds(5005005005005)); @@ -1350,7 +1363,7 @@ filter_chains: {} )EOF"; ListenerHandle* listener_baz = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_baz->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_baz_yaml), "version5", true)); @@ -1429,6 +1442,7 @@ name: baz )EOF"; ListenerHandle* listener_baz_update1 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(*listener_baz, onDestroy()).WillOnce(Invoke([listener_baz]() -> void { // Call the initialize callback during destruction like RDS will. listener_baz->target_.ready(); @@ -1443,7 +1457,7 @@ name: baz EXPECT_CALL(*worker_, addListener(_, _, _)); listener_baz_update1->target_.ready(); EXPECT_EQ(3UL, manager_->listeners().size()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 3, 3, 0, 0, 3, 0, 0); EXPECT_CALL(*listener_foo_update2, onDestroy()); @@ -1469,13 +1483,13 @@ name: foo )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -1492,6 +1506,7 @@ per_connection_buffer_limit_bytes: 999 )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -1534,10 +1549,10 @@ name: foo listener_factory_.socket_->address_provider_->setLocalAddress(local_address); ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. @@ -1555,14 +1570,14 @@ name: foo listener_foo->drain_manager_->drain_sequence_completion_(); checkStats(__LINE__, 1, 0, 1, 0, 0, 1, 0); - // Add foo again. We should use the socket from draining. + // Add foo again. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); - EXPECT_CALL(*listener_factory_.socket_, close()).Times(0); stop_completion(); EXPECT_CALL(*listener_foo, onDestroy()); @@ -1594,10 +1609,10 @@ name: foo listener_factory_.socket_->address_provider_->setLocalAddress(local_address); ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); // Remove foo into draining. @@ -1612,10 +1627,10 @@ name: foo // Add foo again. We should use the socket from draining. ListenerHandle* listener_foo2 = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 2, 0, 1, 0, 1, 1, 0); EXPECT_CALL(*listener_foo, onDestroy()); @@ -1641,23 +1656,24 @@ name: foo address: 127.0.0.1 port_value: 1234 bind_to_port: false -reuse_port: false filter_chains: - filters: [] )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))) - .WillOnce(Invoke([this, &real_listener_factory]( - const Network::Address::InstanceConstSharedPtr& address, - Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { - EXPECT_CALL(server_, hotRestart).Times(0); - // When bind_to_port is equal to false, the BSD socket is not created at main thread. - EXPECT_CALL(os_sys_calls_, socket(AF_INET, _, 0)).Times(0); - return real_listener_factory.createListenSocket(address, socket_type, options, params); - })); + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoBind, 0)) + .WillOnce(Invoke( + [this, &real_listener_factory](const Network::Address::InstanceConstSharedPtr& address, + Network::Socket::Type socket_type, + const Network::Socket::OptionsSharedPtr& options, + ListenerComponentFactory::BindType bind_type, + uint32_t worker_index) -> Network::SocketSharedPtr { + // When bind_to_port is equal to false, the BSD socket is not created at main thread. + EXPECT_CALL(os_sys_calls_, socket(AF_INET, _, 0)).Times(0); + return real_listener_factory.createListenSocket(address, socket_type, options, + bind_type, worker_index); + })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); @@ -1679,27 +1695,28 @@ name: foo address: 127.0.0.1 port_value: 1234 bind_to_port: false -reuse_port: false filter_chains: - filters: [] )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))) - .WillOnce(Invoke([this, &real_listener_factory]( - const Network::Address::InstanceConstSharedPtr& address, - Network::Socket::Type socket_type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { - EXPECT_CALL(server_, hotRestart).Times(0); - // When bind_to_port is equal to false, the BSD socket is not created at main thread. - EXPECT_CALL(os_sys_calls_, socket(AF_INET, _, 0)).Times(0); - return real_listener_factory.createListenSocket(address, socket_type, options, params); - })); + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoBind, 0)) + .WillOnce(Invoke( + [this, &real_listener_factory](const Network::Address::InstanceConstSharedPtr& address, + Network::Socket::Type socket_type, + const Network::Socket::OptionsSharedPtr& options, + ListenerComponentFactory::BindType bind_type, + uint32_t worker_index) -> Network::SocketSharedPtr { + // When bind_to_port is equal to false, the BSD socket is not created at main thread. + EXPECT_CALL(os_sys_calls_, socket(AF_INET, _, 0)).Times(0); + return real_listener_factory.createListenSocket(address, socket_type, options, + bind_type, worker_index); + })); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); EXPECT_CALL(server_.drain_manager_, drainClose()).WillOnce(Return(false)); @@ -1738,17 +1755,20 @@ name: foo ASSERT_TRUE(SOCKET_VALID(syscall_result.rc_)); ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))) + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoBind, 0)) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { + ListenerComponentFactory::BindType bind_type, + uint32_t worker_index) -> Network::SocketSharedPtr { EXPECT_CALL(server_, hotRestart).Times(0); // When bind_to_port is equal to false, create socket fd directly, and do not get socket // fd through hot restart. ON_CALL(os_sys_calls_, socket(AF_INET, _, 0)).WillByDefault(Return(syscall_result)); - return real_listener_factory.createListenSocket(address, socket_type, options, params); + return real_listener_factory.createListenSocket(address, socket_type, options, bind_type, + worker_index); })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); @@ -1766,7 +1786,6 @@ name: foo socket_address: address: 127.0.0.1 port_value: 0 -reuse_port: true filter_chains: - filters: [] )EOF"; @@ -1785,15 +1804,16 @@ reuse_port: true return result; })); ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {{true, false}})) + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)) .WillOnce(Invoke([this, &syscall_result, &real_listener_factory]( const Network::Address::InstanceConstSharedPtr& address, Network::Socket::Type socket_type, const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams& params) -> Network::SocketSharedPtr { - EXPECT_CALL(server_, hotRestart).Times(0); + ListenerComponentFactory::BindType bind_type, + uint32_t worker_index) -> Network::SocketSharedPtr { ON_CALL(os_sys_calls_, socket(AF_INET, _, 0)).WillByDefault(Return(syscall_result)); - return real_listener_factory.createListenSocket(address, socket_type, options, params); + return real_listener_factory.createListenSocket(address, socket_type, options, bind_type, + worker_index); })); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_CALL(*listener_foo, onDestroy()); @@ -1804,7 +1824,7 @@ TEST_F(ListenerManagerImplTest, NotSupportedDatagramUds) { ProdListenerComponentFactory real_listener_factory(server_); EXPECT_THROW_WITH_MESSAGE(real_listener_factory.createListenSocket( std::make_shared("/foo"), - Network::Socket::Type::Datagram, nullptr, {true}), + Network::Socket::Type::Datagram, nullptr, default_bind_type, 0), EnvoyException, "socket type SocketType::Datagram not supported for pipes"); } @@ -1822,16 +1842,21 @@ name: foo socket_address: address: 127.0.0.1 port_value: 1234 +enable_reuse_port: false filter_chains: - filters: [] )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoReusePort, 0)) .WillOnce(Throw(EnvoyException("can't bind"))); EXPECT_CALL(*listener_foo, onDestroy()); EXPECT_THROW(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true), EnvoyException); + EXPECT_EQ( + 1UL, + server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); checkConfigDump(R"EOF( dynamic_listeners: - name: foo @@ -1843,6 +1868,7 @@ name: foo socket_address: address: 127.0.0.1 port_value: 1234 + enable_reuse_port: false filter_chains: - {} last_update_attempt: @@ -1916,10 +1942,10 @@ name: foo )EOF"; ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); EXPECT_CALL(*listener_foo->drain_manager_, drainClose()).WillOnce(Return(false)); @@ -1970,7 +1996,7 @@ name: foo )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0UL, manager_->listeners().size()); @@ -1984,13 +2010,13 @@ name: foo // Add foo again and initialize it. listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 2, 0, 1, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 2, 0, 1, 0, 1, 0, 0); @@ -2007,6 +2033,7 @@ per_connection_buffer_limit_bytes: 999 )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -2050,14 +2077,14 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); auto foo_inbound_proto = parseListenerFromV3Yaml(listener_foo_yaml); EXPECT_TRUE(manager_->addOrUpdateListener(foo_inbound_proto, "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -2074,13 +2101,13 @@ traffic_direction: OUTBOUND )EOF"; ListenerHandle* listener_foo_outbound = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo_outbound->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_outbound_yaml), "", true)); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo_outbound->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(2UL, manager_->listeners().size()); // Validate that stop listener is only called once - for inbound listeners. @@ -2102,12 +2129,12 @@ traffic_direction: OUTBOUND )EOF"; ListenerHandle* listener_bar_outbound = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_bar_outbound_yaml), "", true)); EXPECT_EQ(3UL, manager_->listeners().size()); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); // Validate that adding a listener in stopped listener's traffic direction is not allowed. const std::string listener_bar_yaml = R"EOF( @@ -2154,13 +2181,13 @@ name: foo )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -2203,13 +2230,13 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -2227,6 +2254,7 @@ per_connection_buffer_limit_bytes: 999 )EOF"; ListenerHandle* listener_foo_update1 = expectListenerCreate(true, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -2241,81 +2269,6 @@ per_connection_buffer_limit_bytes: 999 EXPECT_EQ(1, server_.stats_store_.counterFromString("listener_manager.listener_stopped").value()); } -TEST_F(ListenerManagerImplTest, AddListenerFailure) { - InSequence s; - - EXPECT_CALL(*worker_, start(_, _)); - manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); - - // Add foo listener into active. - const std::string listener_foo_yaml = R"EOF( -name: foo -address: - socket_address: - address: 0.0.0.0 - port_value: 1234 -filter_chains: -- filters: [] - )EOF"; - - ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); - - EXPECT_CALL(*worker_, stopListener(_, _)); - EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); - worker_->callAddCompletion(false); - - EXPECT_CALL(*worker_, removeListener(_, _)); - listener_foo->drain_manager_->drain_sequence_completion_(); - - EXPECT_CALL(*listener_foo, onDestroy()); - worker_->callRemovalCompletion(); - - EXPECT_EQ( - 1UL, - server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); -} - -TEST_F(ListenerManagerImplTest, StaticListenerAddFailure) { - InSequence s; - - EXPECT_CALL(*worker_, start(_, _)); - manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); - - // Add foo listener into active. - const std::string listener_foo_yaml = R"EOF( -name: foo -address: - socket_address: - address: 0.0.0.0 - port_value: 1234 -filter_chains: -- filters: [] - )EOF"; - - ListenerHandle* listener_foo = expectListenerCreate(false, false); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); - EXPECT_CALL(*worker_, addListener(_, _, _)); - EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", false)); - - EXPECT_CALL(*worker_, stopListener(_, _)); - EXPECT_CALL(*listener_foo->drain_manager_, startDrainSequence(_)); - worker_->callAddCompletion(false); - - EXPECT_CALL(*worker_, removeListener(_, _)); - listener_foo->drain_manager_->drain_sequence_completion_(); - - EXPECT_CALL(*listener_foo, onDestroy()); - worker_->callRemovalCompletion(); - - EXPECT_EQ( - 1UL, - server_.stats_store_.counterFromString("listener_manager.listener_create_failure").value()); - EXPECT_EQ(0, manager_->listeners().size()); -} - TEST_F(ListenerManagerImplTest, StatsNameValidCharacterTest) { const std::string yaml = R"EOF( address: @@ -2368,7 +2321,8 @@ bind_to_port: false )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, ListenSocketCreationParams(false))); + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoBind, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); @@ -2394,7 +2348,7 @@ bind_to_port: false // Move foo to active and then try to add again. This should still fail. EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); listener_bar = expectListenerCreate(true, true); EXPECT_CALL(*listener_bar, onDestroy()); @@ -2435,7 +2389,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationP Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2481,7 +2435,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDirectSource Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2526,7 +2480,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithDestinationI Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2571,7 +2525,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithServerNamesM Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2617,7 +2571,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithTransportPro Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2659,7 +2613,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithApplicationP Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2704,7 +2658,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceTypeMa Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2764,7 +2718,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpMatc Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2823,7 +2777,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourceIpv6Ma Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2861,7 +2815,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, SingleFilterChainWithSourcePortMa Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -2927,7 +2881,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainWithSourceType Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3013,7 +2967,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3098,7 +3052,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDestinati Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3193,7 +3147,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDirectSou Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3287,7 +3241,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithServerNam Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3358,7 +3312,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithTransport Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3402,7 +3356,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithApplicati Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3451,7 +3405,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithMultipleR Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -3525,7 +3479,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, MultipleFilterChainsWithDifferent Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3565,7 +3519,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3696,7 +3650,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateInline) { )EOF"); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3721,7 +3675,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TlsCertificateChainInlinePrivateK Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -3957,7 +3911,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilter) { )EOF", Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4041,7 +3995,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterOutbound) { Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4096,7 +4050,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstFilterStopsIteration) Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4146,7 +4100,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterInbound) { Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4227,7 +4181,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, OriginalDstTestFilterIPv6) { Network::Address::IpVersion::v6); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); @@ -4266,17 +4220,20 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl socket_address: { address: 127.0.0.1, port_value: 1111 } transparent: false freebind: false + enable_reuse_port: false filter_chains: - filters: )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})) - .WillOnce(Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - EXPECT_EQ(options, nullptr); - return listener_factory_.socket_; - })); + EXPECT_CALL(listener_factory_, + createListenSocket(_, _, _, ListenerComponentFactory::BindType::NoReusePort, 0)) + .WillOnce( + Invoke([&](Network::Address::InstanceConstSharedPtr, Network::Socket::Type, + const Network::Socket::OptionsSharedPtr& options, + ListenerComponentFactory::BindType, uint32_t) -> Network::SocketSharedPtr { + EXPECT_EQ(options, nullptr); + return listener_factory_.socket_; + })); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -4290,6 +4247,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentFreebindListenerDisabl TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentListenerEnabled) { auto listener = createIPv4Listener("TransparentListener"); listener.mutable_transparent()->set_value(true); + listener.mutable_enable_reuse_port()->set_value(false); testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_IP_TRANSPARENT, /* expected_value */ 1, /* expected_num_options */ 2); @@ -4304,6 +4262,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, TransparentListenerEnabled) { TEST_F(ListenerManagerImplWithRealFiltersTest, FreebindListenerEnabled) { auto listener = createIPv4Listener("FreebindListener"); listener.mutable_freebind()->set_value(true); + listener.mutable_enable_reuse_port()->set_value(false); testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND, ENVOY_SOCKET_IP_FREEBIND, /* expected_value */ 1); @@ -4318,6 +4277,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, FreebindListenerEnabled) { TEST_F(ListenerManagerImplWithRealFiltersTest, FastOpenListenerEnabled) { auto listener = createIPv4Listener("FastOpenListener"); listener.mutable_tcp_fast_open_queue_length()->set_value(1); + listener.mutable_enable_reuse_port()->set_value(false); testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_LISTENING, ENVOY_SOCKET_TCP_FASTOPEN, /* expected_value */ 1); @@ -4327,22 +4287,22 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, FastOpenListenerEnabled) { // propagated to setsockopt(). TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerEnabledForTcp) { auto listener = createIPv4Listener("ReusePortListener"); - listener.set_reuse_port(true); - // when reuse_port is true, port should be 0 for creating the shared socket, + // When reuse_port is true, port should be 0 for creating the shared socket, // otherwise socket creation will be done on worker thread. listener.mutable_address()->mutable_socket_address()->set_port_value(0); - testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND, - ENVOY_SOCKET_SO_REUSEPORT, /* expected_value */ 1, - /* expected_num_options */ 1, - /* expected_creation_params */ {true, false}); + if (default_bind_type == ListenerComponentFactory::BindType::ReusePort) { + testSocketOption(listener, envoy::config::core::v3::SocketOption::STATE_PREBIND, + ENVOY_SOCKET_SO_REUSEPORT, /* expected_value */ 1, + /* expected_num_options */ 1, default_bind_type); + } } TEST_F(ListenerManagerImplWithRealFiltersTest, ReusePortListenerDisabled) { auto listener = createIPv4Listener("UdpListener"); listener.mutable_address()->mutable_socket_address()->set_protocol( envoy::config::core::v3::SocketAddress::UDP); - // For UDP, verify that we fail if reuse port is false and concurrency is > 1. - listener.set_reuse_port(false); + // For UDP, verify that we fail if reuse_port is false and concurrency is > 1. + listener.mutable_enable_reuse_port()->set_value(false); server_.options_.concurrency_ = 2; EXPECT_THROW_WITH_MESSAGE(manager_->addOrUpdateListener(listener, "", true), EnvoyException, @@ -4358,6 +4318,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { name: SockoptsListener address: socket_address: { address: 127.0.0.1, port_value: 1111 } + enable_reuse_port: false filter_chains: - filters: socket_options: [ @@ -4370,7 +4331,8 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, LiteralSockoptListenerEnabled) { )EOF"); expectCreateListenSocket(envoy::config::core::v3::SocketOption::STATE_PREBIND, - /* expected_num_options */ 3); + /* expected_num_options */ 3, + ListenerComponentFactory::BindType::NoReusePort); expectSetsockopt( /* expected_sockopt_level */ 1, /* expected_sockopt_name */ 2, @@ -4401,7 +4363,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, AddressResolver) { .WillRepeatedly(Return(Network::Utility::parseInternetAddress("127.0.0.1", 1111, false))); Registry::InjectFactory register_resolver(mock_resolver); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -4426,7 +4388,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLFilename) { Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -4454,7 +4416,7 @@ TEST_F(ListenerManagerImplWithRealFiltersTest, CRLInline) { Network::Address::IpVersion::v4); EXPECT_CALL(server_.api_.random_, uuid()); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); } @@ -4725,7 +4687,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -4733,7 +4695,7 @@ traffic_direction: INBOUND checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); checkStats(__LINE__, 1, 0, 0, 0, 1, 0, 0); @@ -4752,6 +4714,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -4787,7 +4750,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -4795,7 +4758,7 @@ traffic_direction: INBOUND checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); // Update foo into warming. @@ -4813,6 +4776,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -4856,7 +4820,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -4864,7 +4828,7 @@ traffic_direction: INBOUND checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); // Update foo into warming. @@ -4882,6 +4846,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -4894,7 +4859,7 @@ traffic_direction: INBOUND EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*listener_foo, onDestroy()); listener_foo_update1->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); EXPECT_CALL(*listener_foo_update1, onDestroy()); @@ -4919,13 +4884,13 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); // Update foo into warming. @@ -4943,6 +4908,9 @@ traffic_direction: INBOUND )EOF"); ListenerHandle* listener_foo_update1 = expectListenerOverridden(true, listener_foo); + auto duplicated_socket = new NiceMock(); + EXPECT_CALL(*listener_factory_.socket_, duplicate()) + .WillOnce(Return(ByMove(std::unique_ptr(duplicated_socket)))); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(listener_foo_update1_proto, "", true)); EXPECT_EQ(1UL, manager_->listeners().size()); @@ -4957,17 +4925,18 @@ traffic_direction: INBOUND listener_foo_update1->target_.ready(); // Sub warming and bump draining filter chain. checkStats(__LINE__, 1, 1, 0, 0, 1, 0, 1); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); auto listener_foo_update2_proto = listener_foo_update1_proto; listener_foo_update2_proto.set_traffic_direction( ::envoy::config::core::v3::TrafficDirection::OUTBOUND); ListenerHandle* listener_foo_update2 = expectListenerCreate(false, true); - expectUpdateToThenDrain(listener_foo_update2_proto, listener_foo_update1); + auto duplicated_socket2 = + expectUpdateToThenDrain(listener_foo_update2_proto, listener_foo_update1, *duplicated_socket); // Bump modified. checkStats(__LINE__, 1, 2, 0, 0, 1, 0, 1); - expectRemove(listener_foo_update2_proto, listener_foo_update2); + expectRemove(listener_foo_update2_proto, listener_foo_update2, *duplicated_socket2); // Bump removed and sub active. checkStats(__LINE__, 1, 2, 1, 0, 0, 0, 1); @@ -5001,13 +4970,13 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo = expectListenerCreate(true, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); EXPECT_CALL(listener_foo->target_, initialize()); EXPECT_TRUE(manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_yaml), "", true)); checkStats(__LINE__, 1, 0, 0, 1, 0, 0, 0); EXPECT_CALL(*worker_, addListener(_, _, _)); listener_foo->target_.ready(); - worker_->callAddCompletion(true); + worker_->callAddCompletion(); EXPECT_EQ(1UL, manager_->listeners().size()); // Update foo into warming. @@ -5025,6 +4994,7 @@ traffic_direction: INBOUND )EOF"; ListenerHandle* listener_foo_update1 = expectListenerOverridden(true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); EXPECT_CALL(listener_foo_update1->target_, initialize()); EXPECT_TRUE( manager_->addOrUpdateListener(parseListenerFromV3Yaml(listener_foo_update1_yaml), "", true)); @@ -5116,7 +5086,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo // Worker is not started yet. auto listener_proto = createDefaultListener(); ListenerHandle* listener_foo = expectListenerCreate(false, true); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, {true})); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, default_bind_type, 0)); manager_->addOrUpdateListener(listener_proto, "", true); EXPECT_EQ(1u, manager_->listeners().size()); @@ -5129,6 +5099,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo EXPECT_CALL(*listener_foo, onDestroy()); ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); + EXPECT_CALL(*listener_factory_.socket_, duplicate()); manager_->addOrUpdateListener(new_listener_proto, "", true); EXPECT_CALL(*listener_foo_update1, onDestroy()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -5149,9 +5120,10 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAn envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); - expectUpdateToThenDrain(new_listener_proto, listener_foo); + auto duplicated_socket = + expectUpdateToThenDrain(new_listener_proto, listener_foo, *listener_factory_.socket_); - expectRemove(new_listener_proto, listener_foo_update1); + expectRemove(new_listener_proto, listener_foo_update1, *duplicated_socket); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -5159,7 +5131,6 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAn TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, DEPRECATED_FEATURE_TEST(TraditionalUpdateIfImplicitProxyProtocolChanges)) { - EXPECT_CALL(*worker_, start(_, _)); manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); @@ -5173,8 +5144,9 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, auto new_listener_proto = listener_proto; new_listener_proto.mutable_filter_chains(0)->mutable_use_proxy_proto()->set_value(true); - expectUpdateToThenDrain(new_listener_proto, listener_foo); - expectRemove(new_listener_proto, listener_foo_update1); + auto duplicated_socket = + expectUpdateToThenDrain(new_listener_proto, listener_foo, *listener_factory_.socket_); + expectRemove(new_listener_proto, listener_foo_update1, *duplicated_socket); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } @@ -5197,7 +5169,7 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateOnZe EnvoyException, "error adding listener '127.0.0.1:1234': no filter chains specified"); - expectRemove(listener_proto, listener_foo); + expectRemove(listener_proto, listener_foo, *listener_factory_.socket_); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } @@ -5216,9 +5188,10 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, auto new_listener_proto = listener_proto; new_listener_proto.set_traffic_direction(::envoy::config::core::v3::TrafficDirection::INBOUND); - expectUpdateToThenDrain(new_listener_proto, listener_foo); + auto duplicated_socket = + expectUpdateToThenDrain(new_listener_proto, listener_foo, *listener_factory_.socket_); - expectRemove(new_listener_proto, listener_foo_update1); + expectRemove(new_listener_proto, listener_foo_update1, *duplicated_socket); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); @@ -5239,7 +5212,7 @@ TEST_F(ListenerManagerImplTest, UdpDefaultWriterConfig) { manager_->addOrUpdateListener(listener, "", true); EXPECT_EQ(1U, manager_->listeners().size()); Network::SocketSharedPtr listen_socket = - manager_->listeners().front().get().listenSocketFactory().getListenSocket(); + manager_->listeners().front().get().listenSocketFactory().getListenSocket(0); Network::UdpPacketWriterPtr udp_packet_writer = manager_->listeners() .front() @@ -5262,7 +5235,7 @@ TEST_F(ListenerManagerImplTest, TcpBacklogCustomConfig) { )EOF", Network::Address::IpVersion::v4); - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, _)); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, _, _)); manager_->addOrUpdateListener(parseListenerFromV3Yaml(yaml), "", true); EXPECT_EQ(1U, manager_->listeners().size()); EXPECT_EQ(100U, manager_->listeners().back().get().tcpBacklogSize()); @@ -5276,6 +5249,49 @@ TEST_F(ListenerManagerImplTest, WorkersStartedCallbackCalled) { manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); } +TEST(ListenerEnableReusePortTest, All) { + Server::MockInstance server; + const bool expected_reuse_port = + ListenerManagerImplTest::default_bind_type == ListenerComponentFactory::BindType::ReusePort; + + { + envoy::config::listener::v3::Listener config; + config.mutable_enable_reuse_port()->set_value(false); + config.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress::TCP); + EXPECT_FALSE(ListenerImpl::getReusePortOrDefault(server, config)); + } + { + envoy::config::listener::v3::Listener config; + config.mutable_enable_reuse_port()->set_value(true); + config.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress::TCP); + EXPECT_EQ(expected_reuse_port, ListenerImpl::getReusePortOrDefault(server, config)); + } + { + envoy::config::listener::v3::Listener config; + config.set_reuse_port(true); + config.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress::TCP); + EXPECT_EQ(expected_reuse_port, ListenerImpl::getReusePortOrDefault(server, config)); + } + { + envoy::config::listener::v3::Listener config; + config.mutable_enable_reuse_port()->set_value(false); + config.set_reuse_port(true); + config.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress::TCP); + EXPECT_FALSE(ListenerImpl::getReusePortOrDefault(server, config)); + } + { + envoy::config::listener::v3::Listener config; + config.mutable_address()->mutable_socket_address()->set_protocol( + envoy::config::core::v3::SocketAddress::TCP); + EXPECT_CALL(server, enableReusePortDefault()); + EXPECT_EQ(expected_reuse_port, ListenerImpl::getReusePortOrDefault(server, config)); + } +} + } // namespace } // namespace Server } // namespace Envoy diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 26caf727ecb4d..856436d9c2423 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -53,6 +53,16 @@ class ListenerHandle { }; class ListenerManagerImplTest : public testing::Test { +public: + // reuse_port is the default on Linux for TCP. On other platforms even if set it is disabled + // and the user is warned. For UDP it's always the default even if not effective. + static constexpr ListenerComponentFactory::BindType default_bind_type = +#ifdef __linux__ + ListenerComponentFactory::BindType::ReusePort; +#else + ListenerComponentFactory::BindType::NoReusePort; +#endif + protected: ListenerManagerImplTest() : api_(Api::createApiForTest(server_.api_.random_)) {} @@ -219,18 +229,20 @@ class ListenerManagerImplTest : public testing::Test { void expectCreateListenSocket(const envoy::config::core::v3::SocketOption::SocketState& expected_state, Network::Socket::Options::size_type expected_num_options, - ListenSocketCreationParams expected_creation_params = {true, true}) { - EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, expected_creation_params)) - .WillOnce(Invoke([this, expected_num_options, &expected_state]( - const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type, - const Network::Socket::OptionsSharedPtr& options, - const ListenSocketCreationParams&) -> Network::SocketSharedPtr { - EXPECT_NE(options.get(), nullptr); - EXPECT_EQ(options->size(), expected_num_options); - EXPECT_TRUE( - Network::Socket::applyOptions(options, *listener_factory_.socket_, expected_state)); - return listener_factory_.socket_; - })); + ListenerComponentFactory::BindType bind_type = default_bind_type, + uint32_t worker_index = 0) { + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, bind_type, worker_index)) + .WillOnce( + Invoke([this, expected_num_options, &expected_state]( + const Network::Address::InstanceConstSharedPtr&, Network::Socket::Type, + const Network::Socket::OptionsSharedPtr& options, + ListenerComponentFactory::BindType, uint32_t) -> Network::SocketSharedPtr { + EXPECT_NE(options.get(), nullptr); + EXPECT_EQ(options->size(), expected_num_options); + EXPECT_TRUE(Network::Socket::applyOptions(options, *listener_factory_.socket_, + expected_state)); + return listener_factory_.socket_; + })); } /** diff --git a/test/server/server_test.cc b/test/server/server_test.cc index f2c005356bcdf..5081f1a05315c 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -347,6 +347,7 @@ TEST_P(ServerInstanceImplTest, StatsFlushWhenServerIsStillInitializing) { EXPECT_EQ(3L, TestUtility::findGauge(stats_store_, "server.state")->value()); EXPECT_EQ(Init::Manager::State::Initializing, server_->initManager().state()); + EXPECT_TRUE(server_->enableReusePortDefault()); server_->dispatcher().post([&] { server_->shutdown(); }); server_thread->join(); } @@ -1135,6 +1136,7 @@ TEST_P(ServerInstanceImplTest, BootstrapNodeWithOptionsOverride) { TEST_P(ServerInstanceImplTest, BootstrapRuntime) { options_.service_cluster_name_ = "some_service"; initialize("test/server/test_data/server/runtime_bootstrap.yaml"); + EXPECT_FALSE(server_->enableReusePortDefault()); EXPECT_EQ("bar", server_->runtime().snapshot().get("foo").value().get()); // This should access via the override/some_service overlay. EXPECT_EQ("fozz", server_->runtime().snapshot().get("fizz").value().get()); diff --git a/test/server/test_data/server/runtime_bootstrap.yaml b/test/server/test_data/server/runtime_bootstrap.yaml index 9d14d094e7d8d..6329649b0f497 100644 --- a/test/server/test_data/server/runtime_bootstrap.yaml +++ b/test/server/test_data/server/runtime_bootstrap.yaml @@ -3,6 +3,7 @@ layered_runtime: - name: some_static_layer static_layer: foo: bar + envoy.reloadable_features.listener_reuse_port_default_enabled: false - name: base_disk_layer disk_layer: {symlink_root: "{{ test_rundir }}/test/server/test_data/runtime/primary"} - name: overlay_disk_layer diff --git a/test/server/worker_impl_test.cc b/test/server/worker_impl_test.cc index f506f20c39896..e83aa447c6ee4 100644 --- a/test/server/worker_impl_test.cc +++ b/test/server/worker_impl_test.cc @@ -73,10 +73,7 @@ TEST_F(WorkerImplTest, BasicFlow) { EXPECT_EQ(config.listenerTag(), 1UL); EXPECT_NE(current_thread_id, std::this_thread::get_id()); })); - worker_.addListener(absl::nullopt, listener, [&ci](bool success) -> void { - EXPECT_TRUE(success); - ci.setReady(); - }); + worker_.addListener(absl::nullopt, listener, [&ci]() -> void { ci.setReady(); }); NiceMock store; worker_.start(guard_dog_, emptyCallback); @@ -92,10 +89,7 @@ TEST_F(WorkerImplTest, BasicFlow) { EXPECT_EQ(config.listenerTag(), 2UL); EXPECT_NE(current_thread_id, std::this_thread::get_id()); })); - worker_.addListener(absl::nullopt, listener2, [&ci](bool success) -> void { - EXPECT_TRUE(success); - ci.setReady(); - }); + worker_.addListener(absl::nullopt, listener2, [&ci]() -> void { ci.setReady(); }); ci.waitReady(); EXPECT_CALL(*handler_, stopListeners(2)) @@ -132,10 +126,7 @@ TEST_F(WorkerImplTest, BasicFlow) { EXPECT_EQ(config.listenerTag(), 3UL); EXPECT_NE(current_thread_id, std::this_thread::get_id()); })); - worker_.addListener(absl::nullopt, listener3, [&ci](bool success) -> void { - EXPECT_TRUE(success); - ci.setReady(); - }); + worker_.addListener(absl::nullopt, listener3, [&ci]() -> void { ci.setReady(); }); ci.waitReady(); EXPECT_CALL(*handler_, removeListeners(3)) @@ -149,19 +140,6 @@ TEST_F(WorkerImplTest, BasicFlow) { worker_.stop(); } -TEST_F(WorkerImplTest, ListenerException) { - InSequence s; - - NiceMock listener; - ON_CALL(listener, listenerTag()).WillByDefault(Return(1UL)); - EXPECT_CALL(*handler_, addListener(_, _)) - .WillOnce(Throw(Network::CreateListenerException("failed"))); - worker_.addListener(absl::nullopt, listener, [](bool success) -> void { EXPECT_FALSE(success); }); - - worker_.start(guard_dog_, emptyCallback); - worker_.stop(); -} - TEST_F(WorkerImplTest, WorkerInvokesProvidedCallback) { absl::Notification callback_ran; auto cb = [&callback_ran]() { callback_ran.Notify(); }; diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 47cea59e53ba7..2eb6f82b2b180 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -8,8 +8,11 @@ #include "envoy/network/io_handle.h" #include "envoy/network/transport_socket.h" +#include "source/common/network/listen_socket_impl.h" #include "source/common/network/utility.h" +#include "gtest/gtest.h" + namespace Envoy { namespace Network { namespace Test { @@ -209,6 +212,18 @@ class UdpSyncPeer { std::list received_datagrams_; }; +/** + * A test version of TcpListenSocket that immediately listens which is a common pattern in tests. + */ +class TcpListenSocketImmediateListen : public Network::TcpListenSocket { +public: + TcpListenSocketImmediateListen(const Address::InstanceConstSharedPtr& address, + const Network::Socket::OptionsSharedPtr& options = nullptr) + : TcpListenSocket(address, options, true) { + EXPECT_EQ(0, io_handle_->listen(ENVOY_TCP_BACKLOG_SIZE).rc_); + } +}; + } // namespace Test } // namespace Network } // namespace Envoy From b145180d17cac80aa5f9a7801429d52017fea6d1 Mon Sep 17 00:00:00 2001 From: Takeshi Yoneda Date: Tue, 20 Jul 2021 15:51:53 +0900 Subject: [PATCH 09/57] doc: put 1.19 release date/EOL in RELEASES.md (#17416) Signed-off-by: Takeshi Yoneda --- RELEASES.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/RELEASES.md b/RELEASES.md index d9096b2439930..c72eeb63805a8 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -71,6 +71,7 @@ deadline of 3 weeks. | 1.16.0 | 2020/09/30 | 2020/10/08 | +8 days | 2021/10/08 | | 1.17.0 | 2020/12/31 | 2021/01/11 | +11 days | 2022/01/11 | | 1.18.0 | 2021/03/31 | 2021/04/15 | +15 days | 2022/04/15 | -| 1.19.0 | 2021/06/30 | | | | +| 1.19.0 | 2021/06/30 | 2021/07/13 | +13 days | 2022/07/13 | +| 1.20.0 | 2021/09/30 | | | | [repokitteh]: https://github.com/repokitteh From 1f752f04dda4af49d5d99372bdcc448e1921bc73 Mon Sep 17 00:00:00 2001 From: Long Dai Date: Tue, 20 Jul 2021 23:33:28 +0800 Subject: [PATCH 10/57] stat_sinks: remove well_known_names (#16794) Signed-off-by: Long Dai --- source/extensions/stat_sinks/BUILD | 11 ------ source/extensions/stat_sinks/dog_statsd/BUILD | 1 - .../stat_sinks/dog_statsd/config.cc | 3 +- .../extensions/stat_sinks/dog_statsd/config.h | 3 ++ source/extensions/stat_sinks/hystrix/BUILD | 1 - .../extensions/stat_sinks/hystrix/config.cc | 3 +- source/extensions/stat_sinks/hystrix/config.h | 3 ++ .../stat_sinks/metrics_service/BUILD | 1 - .../stat_sinks/metrics_service/config.cc | 3 +- .../stat_sinks/metrics_service/config.h | 3 ++ source/extensions/stat_sinks/statsd/BUILD | 1 - source/extensions/stat_sinks/statsd/config.cc | 3 +- source/extensions/stat_sinks/statsd/config.h | 3 ++ source/extensions/stat_sinks/wasm/BUILD | 1 - source/extensions/stat_sinks/wasm/config.cc | 3 +- source/extensions/stat_sinks/wasm/config.h | 3 ++ .../extensions/stat_sinks/well_known_names.h | 35 ------------------- .../stats_sinks/dog_statsd/config_test.cc | 21 +++-------- .../stats_sinks/hystrix/config_test.cc | 5 +-- .../metrics_service/config_test.cc | 4 +-- .../stats_sinks/statsd/config_test.cc | 23 ++++-------- .../stats_sinks/wasm/config_test.cc | 5 ++- test/server/configuration_impl_test.cc | 4 +-- 23 files changed, 37 insertions(+), 106 deletions(-) delete mode 100644 source/extensions/stat_sinks/well_known_names.h diff --git a/source/extensions/stat_sinks/BUILD b/source/extensions/stat_sinks/BUILD index 40a5e79b39d3b..90e061ad8da39 100644 --- a/source/extensions/stat_sinks/BUILD +++ b/source/extensions/stat_sinks/BUILD @@ -1,19 +1,8 @@ load( "//bazel:envoy_build_system.bzl", - "envoy_cc_library", "envoy_extension_package", ) licenses(["notice"]) # Apache 2 envoy_extension_package() - -envoy_cc_library( - name = "well_known_names", - hdrs = ["well_known_names.h"], - # well known names files are public as long as they exist. - visibility = ["//visibility:public"], - deps = [ - "//source/common/singleton:const_singleton", - ], -) diff --git a/source/extensions/stat_sinks/dog_statsd/BUILD b/source/extensions/stat_sinks/dog_statsd/BUILD index ce75b191dedde..5c62fcc58f0a1 100644 --- a/source/extensions/stat_sinks/dog_statsd/BUILD +++ b/source/extensions/stat_sinks/dog_statsd/BUILD @@ -19,7 +19,6 @@ envoy_cc_extension( "//envoy/registry", "//source/common/network:address_lib", "//source/common/network:resolver_lib", - "//source/extensions/stat_sinks:well_known_names", "//source/extensions/stat_sinks/common/statsd:statsd_lib", "//source/server:configuration_lib", "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", diff --git a/source/extensions/stat_sinks/dog_statsd/config.cc b/source/extensions/stat_sinks/dog_statsd/config.cc index e31ac22f68e12..58dbc9e57c379 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.cc +++ b/source/extensions/stat_sinks/dog_statsd/config.cc @@ -8,7 +8,6 @@ #include "source/common/network/resolver_impl.h" #include "source/extensions/stat_sinks/common/statsd/statsd.h" -#include "source/extensions/stat_sinks/well_known_names.h" #include "absl/types/optional.h" @@ -38,7 +37,7 @@ ProtobufTypes::MessagePtr DogStatsdSinkFactory::createEmptyConfigProto() { return std::make_unique(); } -std::string DogStatsdSinkFactory::name() const { return StatsSinkNames::get().DogStatsd; } +std::string DogStatsdSinkFactory::name() const { return DogStatsdName; } /** * Static registration for the this sink factory. @see RegisterFactory. diff --git a/source/extensions/stat_sinks/dog_statsd/config.h b/source/extensions/stat_sinks/dog_statsd/config.h index 69806524bbe6a..3735f4c596ba2 100644 --- a/source/extensions/stat_sinks/dog_statsd/config.h +++ b/source/extensions/stat_sinks/dog_statsd/config.h @@ -9,6 +9,9 @@ namespace Extensions { namespace StatSinks { namespace DogStatsd { +// DogStatsD compatible statsd sink +constexpr char DogStatsdName[] = "envoy.stat_sinks.dog_statsd"; + /** * Config registration for the DogStatsD compatible statsd sink. @see StatsSinkFactory. */ diff --git a/source/extensions/stat_sinks/hystrix/BUILD b/source/extensions/stat_sinks/hystrix/BUILD index 3908e49d32724..25a1ffa4a49f1 100644 --- a/source/extensions/stat_sinks/hystrix/BUILD +++ b/source/extensions/stat_sinks/hystrix/BUILD @@ -20,7 +20,6 @@ envoy_cc_extension( "//envoy/registry", "//source/common/network:address_lib", "//source/common/network:resolver_lib", - "//source/extensions/stat_sinks:well_known_names", "//source/server:configuration_lib", "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", ], diff --git a/source/extensions/stat_sinks/hystrix/config.cc b/source/extensions/stat_sinks/hystrix/config.cc index b9ae021fb9af3..c6a5912d49861 100644 --- a/source/extensions/stat_sinks/hystrix/config.cc +++ b/source/extensions/stat_sinks/hystrix/config.cc @@ -8,7 +8,6 @@ #include "source/common/network/resolver_impl.h" #include "source/extensions/stat_sinks/hystrix/hystrix.h" -#include "source/extensions/stat_sinks/well_known_names.h" namespace Envoy { namespace Extensions { @@ -28,7 +27,7 @@ ProtobufTypes::MessagePtr HystrixSinkFactory::createEmptyConfigProto() { return std::make_unique(); } -std::string HystrixSinkFactory::name() const { return StatsSinkNames::get().Hystrix; } +std::string HystrixSinkFactory::name() const { return HystrixName; } /** * Static registration for the statsd sink factory. @see RegisterFactory. diff --git a/source/extensions/stat_sinks/hystrix/config.h b/source/extensions/stat_sinks/hystrix/config.h index 1bf144323381b..59a3c14364f45 100644 --- a/source/extensions/stat_sinks/hystrix/config.h +++ b/source/extensions/stat_sinks/hystrix/config.h @@ -11,6 +11,9 @@ namespace Extensions { namespace StatSinks { namespace Hystrix { +// Hystrix sink +constexpr char HystrixName[] = "envoy.stat_sinks.hystrix"; + class HystrixSinkFactory : Logger::Loggable, public Server::Configuration::StatsSinkFactory { public: diff --git a/source/extensions/stat_sinks/metrics_service/BUILD b/source/extensions/stat_sinks/metrics_service/BUILD index 11ffe31b018a1..97d79e032f030 100644 --- a/source/extensions/stat_sinks/metrics_service/BUILD +++ b/source/extensions/stat_sinks/metrics_service/BUILD @@ -47,7 +47,6 @@ envoy_cc_extension( "//envoy/registry", "//source/common/common:assert_lib", "//source/common/config:utility_lib", - "//source/extensions/stat_sinks:well_known_names", "//source/extensions/stat_sinks/metrics_service:metrics_proto_descriptors_lib", "//source/extensions/stat_sinks/metrics_service:metrics_service_grpc_lib", "//source/server:configuration_lib", diff --git a/source/extensions/stat_sinks/metrics_service/config.cc b/source/extensions/stat_sinks/metrics_service/config.cc index d467546bfeb75..716e62fd34a7d 100644 --- a/source/extensions/stat_sinks/metrics_service/config.cc +++ b/source/extensions/stat_sinks/metrics_service/config.cc @@ -10,7 +10,6 @@ #include "source/common/network/resolver_impl.h" #include "source/extensions/stat_sinks/metrics_service/grpc_metrics_proto_descriptors.h" #include "source/extensions/stat_sinks/metrics_service/grpc_metrics_service_impl.h" -#include "source/extensions/stat_sinks/well_known_names.h" namespace Envoy { namespace Extensions { @@ -48,7 +47,7 @@ ProtobufTypes::MessagePtr MetricsServiceSinkFactory::createEmptyConfigProto() { std::make_unique()); } -std::string MetricsServiceSinkFactory::name() const { return StatsSinkNames::get().MetricsService; } +std::string MetricsServiceSinkFactory::name() const { return MetricsServiceName; } /** * Static registration for the this sink factory. @see RegisterFactory. diff --git a/source/extensions/stat_sinks/metrics_service/config.h b/source/extensions/stat_sinks/metrics_service/config.h index aff8e19fa2fc6..72d2188ba553a 100644 --- a/source/extensions/stat_sinks/metrics_service/config.h +++ b/source/extensions/stat_sinks/metrics_service/config.h @@ -10,6 +10,9 @@ namespace Extensions { namespace StatSinks { namespace MetricsService { +// MetricsService sink +constexpr char MetricsServiceName[] = "envoy.stat_sinks.metrics_service"; + /** * Config registration for the MetricsService stats sink. @see StatsSinkFactory. */ diff --git a/source/extensions/stat_sinks/statsd/BUILD b/source/extensions/stat_sinks/statsd/BUILD index 8ac0182eba9dd..1443316f71519 100644 --- a/source/extensions/stat_sinks/statsd/BUILD +++ b/source/extensions/stat_sinks/statsd/BUILD @@ -19,7 +19,6 @@ envoy_cc_extension( "//envoy/registry", "//source/common/network:address_lib", "//source/common/network:resolver_lib", - "//source/extensions/stat_sinks:well_known_names", "//source/extensions/stat_sinks/common/statsd:statsd_lib", "//source/server:configuration_lib", "@envoy_api//envoy/config/metrics/v3:pkg_cc_proto", diff --git a/source/extensions/stat_sinks/statsd/config.cc b/source/extensions/stat_sinks/statsd/config.cc index e9656bfdb8e01..a0efae013a106 100644 --- a/source/extensions/stat_sinks/statsd/config.cc +++ b/source/extensions/stat_sinks/statsd/config.cc @@ -8,7 +8,6 @@ #include "source/common/network/resolver_impl.h" #include "source/extensions/stat_sinks/common/statsd/statsd.h" -#include "source/extensions/stat_sinks/well_known_names.h" namespace Envoy { namespace Extensions { @@ -45,7 +44,7 @@ ProtobufTypes::MessagePtr StatsdSinkFactory::createEmptyConfigProto() { return std::make_unique(); } -std::string StatsdSinkFactory::name() const { return StatsSinkNames::get().Statsd; } +std::string StatsdSinkFactory::name() const { return StatsdName; } /** * Static registration for the statsd sink factory. @see RegisterFactory. diff --git a/source/extensions/stat_sinks/statsd/config.h b/source/extensions/stat_sinks/statsd/config.h index dbce694b7a79c..eb55ab09f38b8 100644 --- a/source/extensions/stat_sinks/statsd/config.h +++ b/source/extensions/stat_sinks/statsd/config.h @@ -9,6 +9,9 @@ namespace Extensions { namespace StatSinks { namespace Statsd { +// Statsd sink +constexpr char StatsdName[] = "envoy.stat_sinks.statsd"; + /** * Config registration for the tcp statsd sink. @see StatsSinkFactory. */ diff --git a/source/extensions/stat_sinks/wasm/BUILD b/source/extensions/stat_sinks/wasm/BUILD index 1747789d6c7a0..ce5f63bfb6168 100644 --- a/source/extensions/stat_sinks/wasm/BUILD +++ b/source/extensions/stat_sinks/wasm/BUILD @@ -21,7 +21,6 @@ envoy_cc_extension( "//envoy/server:factory_context_interface", "//envoy/server:instance_interface", "//source/extensions/common/wasm:wasm_lib", - "//source/extensions/stat_sinks:well_known_names", "//source/server:configuration_lib", "@envoy_api//envoy/extensions/stat_sinks/wasm/v3:pkg_cc_proto", ], diff --git a/source/extensions/stat_sinks/wasm/config.cc b/source/extensions/stat_sinks/wasm/config.cc index 8e1ffd9274d47..1eec57ece7efa 100644 --- a/source/extensions/stat_sinks/wasm/config.cc +++ b/source/extensions/stat_sinks/wasm/config.cc @@ -8,7 +8,6 @@ #include "source/extensions/common/wasm/wasm.h" #include "source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h" -#include "source/extensions/stat_sinks/well_known_names.h" namespace Envoy { namespace Extensions { @@ -56,7 +55,7 @@ ProtobufTypes::MessagePtr WasmSinkFactory::createEmptyConfigProto() { return std::make_unique(); } -std::string WasmSinkFactory::name() const { return StatsSinkNames::get().Wasm; } +std::string WasmSinkFactory::name() const { return WasmName; } /** * Static registration for the wasm access log. @see RegisterFactory. diff --git a/source/extensions/stat_sinks/wasm/config.h b/source/extensions/stat_sinks/wasm/config.h index 8887e7b6ab45d..5df22470829f1 100644 --- a/source/extensions/stat_sinks/wasm/config.h +++ b/source/extensions/stat_sinks/wasm/config.h @@ -13,6 +13,9 @@ namespace Extensions { namespace StatSinks { namespace Wasm { +// WebAssembly sink +constexpr char WasmName[] = "envoy.stat_sinks.wasm"; + /** * Config registration for the Wasm statsd sink. @see StatSinkFactory. */ diff --git a/source/extensions/stat_sinks/well_known_names.h b/source/extensions/stat_sinks/well_known_names.h deleted file mode 100644 index ab253da7acb81..0000000000000 --- a/source/extensions/stat_sinks/well_known_names.h +++ /dev/null @@ -1,35 +0,0 @@ -#pragma once - -#include - -#include "source/common/singleton/const_singleton.h" - -namespace Envoy { -namespace Extensions { -namespace StatSinks { - -/** - * Well-known stats sink names. - * NOTE: New sinks should use the well known name: envoy.stat_sinks.name. - */ -class StatsSinkNameValues { -public: - // Statsd sink - const std::string Statsd = "envoy.stat_sinks.statsd"; - // DogStatsD compatible statsd sink - const std::string DogStatsd = "envoy.stat_sinks.dog_statsd"; - // Graphite Statsd sink (with Graphite-formatted tags) - const std::string GraphiteStatsd = "envoy.stat_sinks.graphite_statsd"; - // MetricsService sink - const std::string MetricsService = "envoy.stat_sinks.metrics_service"; - // Hystrix sink - const std::string Hystrix = "envoy.stat_sinks.hystrix"; - // WebAssembly sink - const std::string Wasm = "envoy.stat_sinks.wasm"; -}; - -using StatsSinkNames = ConstSingleton; - -} // namespace StatSinks -} // namespace Extensions -} // namespace Envoy diff --git a/test/extensions/stats_sinks/dog_statsd/config_test.cc b/test/extensions/stats_sinks/dog_statsd/config_test.cc index 6a7e285bf5f11..98b02ba931a9a 100644 --- a/test/extensions/stats_sinks/dog_statsd/config_test.cc +++ b/test/extensions/stats_sinks/dog_statsd/config_test.cc @@ -6,7 +6,6 @@ #include "source/common/protobuf/utility.h" #include "source/extensions/stat_sinks/common/statsd/statsd.h" #include "source/extensions/stat_sinks/dog_statsd/config.h" -#include "source/extensions/stat_sinks/well_known_names.h" #include "test/mocks/server/instance.h" #include "test/test_common/environment.h" @@ -30,8 +29,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, DogStatsdConfigLoopbackTest, TestUtility::ipTestParamsToString); TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { - const std::string name = StatsSinkNames::get().DogStatsd; - envoy::config::metrics::v3::DogStatsdSink sink_config; envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); @@ -42,7 +39,7 @@ TEST_P(DogStatsdConfigLoopbackTest, ValidUdpIp) { socket_address.set_port_value(8125); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(DogStatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -66,8 +63,6 @@ TEST(DogStatsdConfigTest, ValidateFail) { } TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { - const std::string name = StatsSinkNames::get().DogStatsd; - envoy::config::metrics::v3::DogStatsdSink sink_config; sink_config.mutable_max_bytes_per_datagram()->set_value(128); envoy::config::core::v3::Address& address = *sink_config.mutable_address(); @@ -79,7 +74,7 @@ TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { socket_address.set_port_value(8125); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(DogStatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -94,8 +89,6 @@ TEST_P(DogStatsdConfigLoopbackTest, CustomBufferSize) { } TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { - const std::string name = StatsSinkNames::get().DogStatsd; - envoy::config::metrics::v3::DogStatsdSink sink_config; envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); @@ -106,7 +99,7 @@ TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { socket_address.set_port_value(8125); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(DogStatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -122,8 +115,6 @@ TEST_P(DogStatsdConfigLoopbackTest, DefaultBufferSize) { } TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { - const std::string name = StatsSinkNames::get().DogStatsd; - envoy::config::metrics::v3::DogStatsdSink sink_config; envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); @@ -137,7 +128,7 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { sink_config.set_prefix(customPrefix); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(DogStatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -153,10 +144,8 @@ TEST_P(DogStatsdConfigLoopbackTest, WithCustomPrefix) { // Test that the deprecated extension name still functions. TEST(DogStatsdConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - const std::string deprecated_name = "envoy.dog_statsd"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( - deprecated_name)); + DogStatsdName)); } } // namespace diff --git a/test/extensions/stats_sinks/hystrix/config_test.cc b/test/extensions/stats_sinks/hystrix/config_test.cc index 9a5b78b7024f9..90d928a528e21 100644 --- a/test/extensions/stats_sinks/hystrix/config_test.cc +++ b/test/extensions/stats_sinks/hystrix/config_test.cc @@ -4,7 +4,6 @@ #include "source/common/protobuf/utility.h" #include "source/extensions/stat_sinks/hystrix/config.h" #include "source/extensions/stat_sinks/hystrix/hystrix.h" -#include "source/extensions/stat_sinks/well_known_names.h" #include "test/mocks/server/instance.h" #include "test/test_common/environment.h" @@ -23,12 +22,10 @@ namespace Hystrix { namespace { TEST(StatsConfigTest, ValidHystrixSink) { - const std::string name = StatsSinkNames::get().Hystrix; - envoy::config::metrics::v3::HystrixSink sink_config; Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(HystrixName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); diff --git a/test/extensions/stats_sinks/metrics_service/config_test.cc b/test/extensions/stats_sinks/metrics_service/config_test.cc index a9d557639e077..739e49ba5ae2f 100644 --- a/test/extensions/stats_sinks/metrics_service/config_test.cc +++ b/test/extensions/stats_sinks/metrics_service/config_test.cc @@ -14,10 +14,8 @@ namespace { // Test that the deprecated extension name still functions. TEST(MetricsServiceConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { - const std::string deprecated_name = "envoy.metrics_service"; - ASSERT_NE(nullptr, Registry::FactoryRegistry::getFactory( - deprecated_name)); + MetricsServiceName)); } } // namespace diff --git a/test/extensions/stats_sinks/statsd/config_test.cc b/test/extensions/stats_sinks/statsd/config_test.cc index cbf1c02d6030c..af575796eb848 100644 --- a/test/extensions/stats_sinks/statsd/config_test.cc +++ b/test/extensions/stats_sinks/statsd/config_test.cc @@ -7,7 +7,6 @@ #include "source/common/protobuf/utility.h" #include "source/extensions/stat_sinks/common/statsd/statsd.h" #include "source/extensions/stat_sinks/statsd/config.h" -#include "source/extensions/stat_sinks/well_known_names.h" #include "test/mocks/server/instance.h" #include "test/test_common/environment.h" @@ -26,13 +25,11 @@ namespace Statsd { namespace { TEST(StatsConfigTest, ValidTcpStatsd) { - const std::string name = StatsSinkNames::get().Statsd; - envoy::config::metrics::v3::StatsdSink sink_config; sink_config.set_tcp_cluster_name("fake_cluster"); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -60,7 +57,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, StatsConfigParameterizedTest, TestUtility::ipTestParamsToString); TEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) { - const std::string name = StatsSinkNames::get().Statsd; const auto& defaultPrefix = Common::Statsd::getDefaultPrefix(); envoy::config::metrics::v3::StatsdSink sink_config; @@ -76,7 +72,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) { EXPECT_EQ(sink_config.prefix(), ""); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); @@ -91,7 +87,6 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkDefaultPrefix) { } TEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) { - const std::string name = StatsSinkNames::get().Statsd; const std::string customPrefix = "prefix.test"; envoy::config::metrics::v3::StatsdSink sink_config; @@ -108,7 +103,7 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) { EXPECT_NE(sink_config.prefix(), ""); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); TestUtility::jsonConvert(sink_config, *message); @@ -123,14 +118,12 @@ TEST_P(StatsConfigParameterizedTest, UdpSinkCustomPrefix) { } TEST(StatsConfigTest, TcpSinkDefaultPrefix) { - const std::string name = StatsSinkNames::get().Statsd; - envoy::config::metrics::v3::StatsdSink sink_config; const auto& defaultPrefix = Common::Statsd::getDefaultPrefix(); sink_config.set_tcp_cluster_name("fake_cluster"); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); EXPECT_EQ(sink_config.prefix(), ""); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -147,8 +140,6 @@ TEST(StatsConfigTest, TcpSinkDefaultPrefix) { } TEST(StatsConfigTest, TcpSinkCustomPrefix) { - const std::string name = StatsSinkNames::get().Statsd; - envoy::config::metrics::v3::StatsdSink sink_config; std::string prefix = "prefixTest"; sink_config.set_tcp_cluster_name("fake_cluster"); @@ -156,7 +147,7 @@ TEST(StatsConfigTest, TcpSinkCustomPrefix) { sink_config.set_prefix(prefix); EXPECT_EQ(sink_config.prefix(), prefix); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); @@ -178,8 +169,6 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, StatsConfigLoopbackTest, TestUtility::ipTestParamsToString); TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { - const std::string name = StatsSinkNames::get().Statsd; - envoy::config::metrics::v3::StatsdSink sink_config; envoy::config::core::v3::Address& address = *sink_config.mutable_address(); envoy::config::core::v3::SocketAddress& socket_address = *address.mutable_socket_address(); @@ -189,7 +178,7 @@ TEST_P(StatsConfigLoopbackTest, ValidUdpIpStatsd) { socket_address.set_port_value(8125); Server::Configuration::StatsSinkFactory* factory = - Registry::FactoryRegistry::getFactory(name); + Registry::FactoryRegistry::getFactory(StatsdName); ASSERT_NE(factory, nullptr); ProtobufTypes::MessagePtr message = factory->createEmptyConfigProto(); diff --git a/test/extensions/stats_sinks/wasm/config_test.cc b/test/extensions/stats_sinks/wasm/config_test.cc index 0887d1b1ecb14..64a7d4333832d 100644 --- a/test/extensions/stats_sinks/wasm/config_test.cc +++ b/test/extensions/stats_sinks/wasm/config_test.cc @@ -5,7 +5,6 @@ #include "source/extensions/common/wasm/wasm.h" #include "source/extensions/stat_sinks/wasm/config.h" #include "source/extensions/stat_sinks/wasm/wasm_stat_sink_impl.h" -#include "source/extensions/stat_sinks/well_known_names.h" #include "test/extensions/common/wasm/wasm_runtime.h" #include "test/mocks/server/mocks.h" @@ -42,8 +41,8 @@ class WasmStatSinkConfigTest : public testing::TestWithParam { } void initializeWithConfig(const envoy::extensions::stat_sinks::wasm::v3::Wasm& config) { - auto factory = Registry::FactoryRegistry::getFactory( - StatsSinkNames::get().Wasm); + auto factory = + Registry::FactoryRegistry::getFactory(WasmName); ASSERT_NE(factory, nullptr); api_ = Api::createApiForTest(stats_store_); EXPECT_CALL(context_, api()).WillRepeatedly(testing::ReturnRef(*api_)); diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 143b39fd08c1d..4e7de3b88dc1f 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -12,7 +12,7 @@ #include "source/common/json/json_loader.h" #include "source/common/protobuf/utility.h" #include "source/common/upstream/cluster_manager_impl.h" -#include "source/extensions/stat_sinks/well_known_names.h" +#include "source/extensions/stat_sinks/statsd/config.h" #include "source/server/configuration_impl.h" #include "test/common/upstream/utility.h" @@ -467,7 +467,7 @@ TEST_F(ConfigurationImplTest, ProtoSpecifiedStatsSink) { auto bootstrap = Upstream::parseBootstrapFromV3Json(json); auto& sink = *bootstrap.mutable_stats_sinks()->Add(); - sink.set_name(Extensions::StatSinks::StatsSinkNames::get().Statsd); + sink.set_name(Extensions::StatSinks::Statsd::StatsdName); addStatsdFakeClusterConfig(sink); server_.server_factory_context_->cluster_manager_.initializeClusters({"fake_cluster"}, {}); From eae33664a52f972788a2876c70eefdefa6046019 Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 20 Jul 2021 18:31:58 +0100 Subject: [PATCH 11/57] dist: Add GPG identity util (#17399) Signed-off-by: Ryan Northey --- .github/dependabot.yml | 5 + bazel/repositories_extra.bzl | 5 + tools/gpg/BUILD | 12 + tools/gpg/identity.py | 143 +++++++++++ tools/gpg/requirements.txt | 10 + tools/gpg/tests/test_identity.py | 405 +++++++++++++++++++++++++++++++ tools/testing/BUILD | 4 +- 7 files changed, 582 insertions(+), 2 deletions(-) create mode 100644 tools/gpg/BUILD create mode 100644 tools/gpg/identity.py create mode 100644 tools/gpg/requirements.txt create mode 100644 tools/gpg/tests/test_identity.py diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 758bb344b9774..2a85dba110736 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -61,6 +61,11 @@ updates: schedule: interval: "daily" +- package-ecosystem: "pip" + directory: "/tools/gpg" + schedule: + interval: "daily" + - package-ecosystem: "pip" directory: "/tools/protodoc" schedule: diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 93d442f7f5859..76e44809c964c 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -62,6 +62,11 @@ def _python_deps(): requirements = "@envoy//tools/git:requirements.txt", extra_pip_args = ["--require-hashes"], ) + pip_install( + name = "gpg_pip3", + requirements = "@envoy//tools/gpg:requirements.txt", + extra_pip_args = ["--require-hashes"], + ) pip_install( name = "kafka_pip3", requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt", diff --git a/tools/gpg/BUILD b/tools/gpg/BUILD new file mode 100644 index 0000000000000..50a3dd91ff14e --- /dev/null +++ b/tools/gpg/BUILD @@ -0,0 +1,12 @@ +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("@gpg_pip3//:requirements.bzl", "requirement") +load("//tools/base:envoy_python.bzl", "envoy_py_library") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_py_library( + name = "tools.gpg.identity", + deps = [requirement("python-gnupg")], +) diff --git a/tools/gpg/identity.py b/tools/gpg/identity.py new file mode 100644 index 0000000000000..179adece3874e --- /dev/null +++ b/tools/gpg/identity.py @@ -0,0 +1,143 @@ +import logging +import os +import pwd +from functools import cached_property +from email.utils import formataddr, parseaddr +from typing import Optional + +import gnupg + + +class GPGError(Exception): + pass + + +class GPGIdentity(object): + """A GPG identity with a signing key + + The signing key is found either by matching provided name/email, + or by retrieving the first private key. + """ + + def __init__( + self, + name: Optional[str] = None, + email: Optional[str] = None, + log: Optional[logging.Logger] = None): + self._provided_name = name + self._provided_email = email + self._log = log + + def __str__(self) -> str: + return self.uid + + @cached_property + def email(self) -> str: + """Email parsed from the signing key""" + return parseaddr(self.uid)[1] + + @property + def fingerprint(self) -> str: + """GPG key fingerprint""" + return self.signing_key["fingerprint"] + + @cached_property + def gpg(self) -> gnupg.GPG: + return gnupg.GPG() + + @property + def gnupg_home(self) -> str: + return os.path.join(self.home, ".gnupg") + + @cached_property + def home(self) -> str: + """Gets *and sets if required* the `HOME` env var""" + os.environ["HOME"] = os.environ.get("HOME", pwd.getpwuid(os.getuid()).pw_dir) + return os.environ["HOME"] + + @cached_property + def log(self) -> logging.Logger: + return self._log or logging.getLogger(self.__class__.__name__) + + @property + def provided_email(self) -> Optional[str]: + """Provided email for the identity""" + return self._provided_email + + @cached_property + def provided_id(self) -> Optional[str]: + """Provided name and/or email for the identity""" + if not (self.provided_name or self.provided_email): + return + return ( + formataddr(self.provided_name, self.provided_email) if + (self.provided_name and self.provided_email) else + (self.provided_name or self.provided_email)) + + @property + def provided_name(self) -> Optional[str]: + """Provided name for the identity""" + return self._provided_name + + @cached_property + def name(self) -> str: + """Name parsed from the signing key""" + return parseaddr(self.uid)[0] + + @cached_property + def signing_key(self) -> dict: + """A `dict` representing the GPG key to sign with""" + # if name and/or email are provided the list of keys is pre-filtered + # but we still need to figure out which uid matched for the found key + for key in self.gpg.list_keys(True, keys=self.provided_id): + key = self.match(key) + if key: + return key + raise GPGError( + f"No key found for '{self.provided_id}'" if self.provided_id else "No available key") + + @property + def uid(self) -> str: + """UID of the identity's signing key""" + return self.signing_key["uid"] + + def match(self, key: dict) -> Optional[dict]: + """Match a signing key + + The key is found either by matching provided name/email + or the first available private key + + the matching `uid` (or first) is added as `uid` to the dict + """ + if self.provided_id: + key["uid"] = self._match_key(key["uids"]) + return key if key["uid"] else None + if self.log: + self.log.warning("No GPG name/email supplied, signing with first available key") + key["uid"] = key["uids"][0] + return key + + def _match_email(self, uids: list) -> Optional[str]: + """Match only the email""" + for uid in uids: + if parseaddr(uid)[1] == self.provided_email: + return uid + + def _match_key(self, uids: dict) -> Optional[str]: + """If either/both name or email are supplied it tries to match either/both""" + if self.provided_name and self.provided_email: + return self._match_uid(uids) + elif self.provided_name: + return self._match_name(uids) + elif self.provided_email: + return self._match_email(uids) + + def _match_name(self, uids: list) -> Optional[str]: + """Match only the name""" + for uid in uids: + if parseaddr(uid)[0] == self.provided_name: + return uid + + def _match_uid(self, uids: list) -> Optional[str]: + """Match the whole uid - ie `Name `""" + return self.provided_id if self.provided_id in uids else None diff --git a/tools/gpg/requirements.txt b/tools/gpg/requirements.txt new file mode 100644 index 0000000000000..f405a325f70be --- /dev/null +++ b/tools/gpg/requirements.txt @@ -0,0 +1,10 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --generate-hashes tools/gpg/requirements.txt +# +python-gnupg==0.4.7 \ + --hash=sha256:2061f56b1942c29b92727bf9aecbd3cea3893acc9cccbdc7eb4604285efe4ac7 \ + --hash=sha256:3ff5b1bf5e397de6e1fe41a7c0f403dad4e242ac92b345f440eaecfb72a7ebae + # via -r tools/gpg/requirements.txt diff --git a/tools/gpg/tests/test_identity.py b/tools/gpg/tests/test_identity.py new file mode 100644 index 0000000000000..d8ccc8280207a --- /dev/null +++ b/tools/gpg/tests/test_identity.py @@ -0,0 +1,405 @@ +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from tools.gpg import identity + + +@pytest.mark.parametrize("name", ["NAME", None]) +@pytest.mark.parametrize("email", ["EMAIL", None]) +@pytest.mark.parametrize("log", ["LOG", None]) +def test_identity_constructor(name, email, log): + gpg = identity.GPGIdentity(name, email, log) + assert gpg.provided_name == name + assert gpg.provided_email == email + assert gpg._log == log + + +def test_identity_dunder_str(patches): + gpg = identity.GPGIdentity() + patched = patches( + ("GPGIdentity.uid", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_uid, ): + m_uid.return_value = "SOME BODY" + assert str(gpg) == "SOME BODY" + + +def test_identity_email(patches): + gpg = identity.GPGIdentity() + patched = patches( + "parseaddr", + ("GPGIdentity.uid", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_parse, m_uid): + assert gpg.email == m_parse.return_value.__getitem__.return_value + + assert ( + list(m_parse.return_value.__getitem__.call_args) + == [(1,), {}]) + assert ( + list(m_parse.call_args) + == [(m_uid.return_value,), {}]) + assert "email" in gpg.__dict__ + + +def test_identity_fingerprint(patches): + gpg = identity.GPGIdentity() + patched = patches( + ("GPGIdentity.signing_key", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_key, ): + assert gpg.fingerprint == m_key.return_value.__getitem__.return_value + + assert ( + list(m_key.return_value.__getitem__.call_args) + == [('fingerprint',), {}]) + + assert "fingerprint" not in gpg.__dict__ + + +def test_identity_gpg(patches): + gpg = identity.GPGIdentity() + patched = patches( + "gnupg.GPG", + prefix="tools.gpg.identity") + + with patched as (m_gpg, ): + assert gpg.gpg == m_gpg.return_value + + assert ( + list(m_gpg.call_args) + == [(), {}]) + + assert "gpg" in gpg.__dict__ + + +def test_identity_gnupg_home(patches): + gpg = identity.GPGIdentity() + patched = patches( + "os", + ("GPGIdentity.home", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_os, m_home): + assert gpg.gnupg_home == m_os.path.join.return_value + + assert ( + list(m_os.path.join.call_args) + == [(m_home.return_value, '.gnupg'), {}]) + + assert "gnupg_home" not in gpg.__dict__ + + +def test_identity_home(patches): + gpg = identity.GPGIdentity() + patched = patches( + "os", + "pwd", + prefix="tools.gpg.identity") + + with patched as (m_os, m_pwd): + assert gpg.home == m_os.environ.__getitem__.return_value + + assert ( + list(m_os.environ.__getitem__.call_args) + == [('HOME', ), {}]) + assert ( + list(m_os.environ.__setitem__.call_args) + == [('HOME', m_os.environ.get.return_value), {}]) + assert ( + list(m_os.environ.get.call_args) + == [('HOME', m_pwd.getpwuid.return_value.pw_dir), {}]) + assert ( + list(m_pwd.getpwuid.call_args) + == [(m_os.getuid.return_value,), {}]) + assert ( + list(m_os.getuid.call_args) + == [(), {}]) + + assert "home" in gpg.__dict__ + + +@pytest.mark.parametrize("log", ["LOGGER", None]) +def test_identity_log(patches, log): + gpg = identity.GPGIdentity() + patched = patches( + "logging", + prefix="tools.gpg.identity") + + gpg._log = log + + with patched as (m_log, ): + if log: + assert gpg.log == log + assert not m_log.getLogger.called + else: + assert gpg.log == m_log.getLogger.return_value + assert ( + list(m_log.getLogger.call_args) + == [(gpg.__class__.__name__, ), {}]) + + +@pytest.mark.parametrize("name", ["NAME", None]) +@pytest.mark.parametrize("email", ["EMAIL", None]) +def test_identity_identity_id(patches, name, email): + gpg = identity.GPGIdentity() + patched = patches( + "formataddr", + ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), + ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_format, m_name, m_email): + m_name.return_value = name + m_email.return_value = email + result = gpg.provided_id + + assert "provided_id" in gpg.__dict__ + + if name and email: + assert ( + list(m_format.call_args) + == [('NAME', 'EMAIL'), {}]) + assert result == m_format.return_value + return + + assert not m_format.called + assert result == name or email + + +def test_identity_name(patches): + gpg = identity.GPGIdentity() + patched = patches( + "parseaddr", + ("GPGIdentity.uid", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_parse, m_uid): + assert gpg.name == m_parse.return_value.__getitem__.return_value + + assert ( + list(m_parse.return_value.__getitem__.call_args) + == [(0,), {}]) + assert ( + list(m_parse.call_args) + == [(m_uid.return_value,), {}]) + assert "name" in gpg.__dict__ + + +@pytest.mark.parametrize("key", ["KEY1", "KEY2", "KEY5"]) +@pytest.mark.parametrize("name", ["NAME", None]) +@pytest.mark.parametrize("email", ["EMAIL", None]) +def test_identity_signing_key(patches, key, name, email): + packager = MagicMock() + gpg = identity.GPGIdentity() + _keys = ["KEY1", "KEY2", "KEY3"] + patched = patches( + "GPGIdentity.match", + ("GPGIdentity.gpg", dict(new_callable=PropertyMock)), + ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_match, m_gpg, m_id): + if not name and not email: + m_id.return_value = None + m_match.side_effect = lambda k: (k == key and f"MATCH {k}") + m_gpg.return_value.list_keys.return_value = _keys + if key in _keys: + assert gpg.signing_key == f"MATCH {key}" + _match_attempts = _keys[:_keys.index(key) + 1] + else: + with pytest.raises(identity.GPGError) as e: + gpg.signing_key + if name or email: + assert ( + e.value.args[0] + == f"No key found for '{m_id.return_value}'") + else: + assert ( + e.value.args[0] + == 'No available key') + _match_attempts = _keys + + assert ( + list(m_gpg.return_value.list_keys.call_args) + == [(True, ), dict(keys=m_id.return_value)]) + assert ( + list(list(c) for c in m_match.call_args_list) + == [[(k,), {}] for k in _match_attempts]) + + +def test_identity_uid(patches): + gpg = identity.GPGIdentity() + patched = patches( + ("GPGIdentity.signing_key", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_key, ): + assert gpg.uid == m_key.return_value.__getitem__.return_value + + assert ( + list(m_key.return_value.__getitem__.call_args) + == [('uid',), {}]) + + assert "uid" not in gpg.__dict__ + + +@pytest.mark.parametrize("name", ["NAME", None]) +@pytest.mark.parametrize("email", ["EMAIL", None]) +@pytest.mark.parametrize("match", ["MATCH", None]) +@pytest.mark.parametrize("log", [True, False]) +def test_identity_match(patches, name, email, match, log): + gpg = identity.GPGIdentity() + _keys = ["KEY1", "KEY2", "KEY3"] + patched = patches( + "GPGIdentity._match_key", + ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), + ("GPGIdentity.log", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + key = dict(uids=["UID1", "UID2"]) + + with patched as (m_match, m_id, m_log): + if not log: + m_log.return_value = None + m_match.return_value = match + m_id.return_value = name or email + result = gpg.match(key) + + if not name and not email: + assert not m_match.called + if log: + assert ( + list(m_log.return_value.warning.call_args) + == [('No GPG name/email supplied, signing with first available key',), {}]) + assert ( + result + == {'uids': ['UID1', 'UID2'], 'uid': 'UID1'}) + return + assert ( + list(m_match.call_args) + == [(key["uids"],), {}]) + if log: + assert not m_log.return_value.warning.called + if match: + assert ( + result + == {'uids': ['UID1', 'UID2'], 'uid': 'MATCH'}) + else: + assert not result + + +@pytest.mark.parametrize("uids", [[], ["UID1"], ["UID1", "UID2"]]) +@pytest.mark.parametrize("email", [None, "UID1", "UID1", "UID2", "UID3"]) +def test_identity__match_email(patches, uids, email): + gpg = identity.GPGIdentity() + patched = patches( + "parseaddr", + ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_parse, m_email): + m_parse.side_effect = lambda _email: ("NAME", _email) + m_email.return_value = email + result = gpg._match_email(uids) + + if email in uids: + assert result == email + assert ( + list(list(c) for c in m_parse.call_args_list) + == [[(uid,), {}] for uid in uids[:uids.index(email) + 1]]) + return + + assert not result + assert ( + list(list(c) for c in m_parse.call_args_list) + == [[(uid,), {}] for uid in uids]) + + +@pytest.mark.parametrize("name", ["NAME", None]) +@pytest.mark.parametrize("email", ["EMAIL", None]) +def test_identity__match_key(patches, name, email): + gpg = identity.GPGIdentity() + _keys = ["KEY1", "KEY2", "KEY3"] + patched = patches( + "GPGIdentity._match_email", + "GPGIdentity._match_name", + "GPGIdentity._match_uid", + ("GPGIdentity.provided_email", dict(new_callable=PropertyMock)), + ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + key = dict(uids=["UID1", "UID2"]) + + with patched as (m_email, m_name, m_uid, m_pemail, m_pname): + m_pemail.return_value = email + m_pname.return_value = name + result = gpg._match_key(key) + + if name and email: + assert ( + list(m_uid.call_args) + == [(dict(uids=key["uids"]),), {}]) + assert not m_email.called + assert not m_name.called + assert result == m_uid.return_value + elif name: + assert ( + list(m_name.call_args) + == [(dict(uids=key["uids"]),), {}]) + assert not m_email.called + assert not m_uid.called + assert result == m_name.return_value + elif email: + assert ( + list(m_email.call_args) + == [(dict(uids=key["uids"]),), {}]) + assert not m_name.called + assert not m_uid.called + assert result == m_email.return_value + + +@pytest.mark.parametrize("uids", [[], ["UID1"], ["UID1", "UID2"]]) +@pytest.mark.parametrize("name", [None, "UID1", "UID1", "UID2", "UID3"]) +def test_identity__match_name(patches, uids, name): + gpg = identity.GPGIdentity() + patched = patches( + "parseaddr", + ("GPGIdentity.provided_name", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_parse, m_name): + m_parse.side_effect = lambda _name: (_name, "EMAIL") + m_name.return_value = name + result = gpg._match_name(uids) + + if name in uids: + assert result == name + assert ( + list(list(c) for c in m_parse.call_args_list) + == [[(uid,), {}] for uid in uids[:uids.index(name) + 1]]) + return + + assert not result + assert ( + list(list(c) for c in m_parse.call_args_list) + == [[(uid,), {}] for uid in uids]) + + +@pytest.mark.parametrize("uid", ["UID1", "UID7"]) +def test_identity__match_uid(patches, uid): + gpg = identity.GPGIdentity() + uids = [f"UID{i}" for i in range(5)] + matches = uid in uids + patched = patches( + ("GPGIdentity.provided_id", dict(new_callable=PropertyMock)), + prefix="tools.gpg.identity") + + with patched as (m_id, ): + m_id.return_value = uid + if matches: + assert gpg._match_uid(uids) == uid + else: + assert not gpg._match_uid(uids) diff --git a/tools/testing/BUILD b/tools/testing/BUILD index 2f8bd93d6105f..ab36b91e2212c 100644 --- a/tools/testing/BUILD +++ b/tools/testing/BUILD @@ -19,13 +19,13 @@ envoy_py_binary( ":plugin", "//:.coveragerc", "//:pytest.ini", - "//tools/base:runner", - "//tools/base:utils", ], deps = [ requirement("pytest"), requirement("pytest-asyncio"), requirement("pytest-cov"), + "//tools/base:runner", + "//tools/base:utils", ], ) From b651e3fb54c4f1c7e5e6ec2442a4c5f7980d5596 Mon Sep 17 00:00:00 2001 From: phlax Date: Tue, 20 Jul 2021 18:32:58 +0100 Subject: [PATCH 12/57] dist: Add docker build util (#17384) Signed-off-by: Ryan Northey --- .github/dependabot.yml | 5 + bazel/repositories_extra.bzl | 5 + tools/docker/BUILD | 14 +++ tools/docker/requirements.txt | 152 +++++++++++++++++++++++++++++++ tools/docker/tests/test_utils.py | 145 +++++++++++++++++++++++++++++ tools/docker/utils.py | 102 +++++++++++++++++++++ 6 files changed, 423 insertions(+) create mode 100644 tools/docker/BUILD create mode 100644 tools/docker/requirements.txt create mode 100644 tools/docker/tests/test_utils.py create mode 100644 tools/docker/utils.py diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 2a85dba110736..11e168f63a854 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -46,6 +46,11 @@ updates: schedule: interval: "daily" +- package-ecosystem: "pip" + directory: "/tools/docker" + schedule: + interval: "daily" + - package-ecosystem: "pip" directory: "/tools/dependency" schedule: diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index 76e44809c964c..d7d0a869a0c95 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -47,6 +47,11 @@ def _python_deps(): requirements = "@envoy//tools/docs:requirements.txt", extra_pip_args = ["--require-hashes"], ) + pip_install( + name = "docker_pip3", + requirements = "@envoy//tools/docker:requirements.txt", + extra_pip_args = ["--require-hashes"], + ) pip_install( name = "deps_pip3", requirements = "@envoy//tools/dependency:requirements.txt", diff --git a/tools/docker/BUILD b/tools/docker/BUILD new file mode 100644 index 0000000000000..9fd20eedd3b1f --- /dev/null +++ b/tools/docker/BUILD @@ -0,0 +1,14 @@ +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("@docker_pip3//:requirements.bzl", "requirement") +load("//tools/base:envoy_python.bzl", "envoy_py_library") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_py_library( + name = "tools.docker.utils", + deps = [ + requirement("aiodocker"), + ], +) diff --git a/tools/docker/requirements.txt b/tools/docker/requirements.txt new file mode 100644 index 0000000000000..6e65c176c2389 --- /dev/null +++ b/tools/docker/requirements.txt @@ -0,0 +1,152 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile --generate-hashes tools/docker/requirements.txt +# +aiodocker==0.19.1 \ + --hash=sha256:59dfae91b5acbfa953baf4a3553b7c5ff375346b0f3bbfd8cae11c3b93adce04 \ + --hash=sha256:bfbb44dbee185dbc8943be68d1f51358af3ec473c463bdee68a25e33d70ae3ad + # via -r tools/docker/requirements.txt +aiohttp==3.7.4.post0 \ + --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \ + --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \ + --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \ + --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \ + --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \ + --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \ + --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \ + --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \ + --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \ + --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \ + --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \ + --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \ + --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \ + --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \ + --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \ + --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \ + --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \ + --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \ + --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \ + --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \ + --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \ + --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \ + --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \ + --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \ + --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \ + --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \ + --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \ + --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \ + --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \ + --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \ + --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \ + --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \ + --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \ + --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \ + --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \ + --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \ + --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95 + # via aiodocker +async-timeout==3.0.1 \ + --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ + --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 + # via aiohttp +attrs==21.2.0 \ + --hash=sha256:149e90d6d8ac20db7a955ad60cf0e6881a3f20d37096140088356da6c716b0b1 \ + --hash=sha256:ef6aaac3ca6cd92904cdd0d83f629a15f18053ec84e6432106f7a4d04ae4f5fb + # via aiohttp +chardet==4.0.0 \ + --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \ + --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5 + # via aiohttp +idna==3.2 \ + --hash=sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a \ + --hash=sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3 + # via yarl +multidict==5.1.0 \ + --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \ + --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \ + --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \ + --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \ + --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \ + --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \ + --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \ + --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \ + --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \ + --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \ + --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \ + --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \ + --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \ + --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \ + --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \ + --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \ + --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \ + --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \ + --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \ + --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \ + --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \ + --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \ + --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \ + --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \ + --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \ + --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \ + --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \ + --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \ + --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \ + --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \ + --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \ + --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \ + --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \ + --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \ + --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \ + --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \ + --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80 + # via + # aiohttp + # yarl +typing-extensions==3.10.0.0 \ + --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ + --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \ + --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 + # via + # aiodocker + # aiohttp +yarl==1.6.3 \ + --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \ + --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \ + --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \ + --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \ + --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \ + --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \ + --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \ + --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \ + --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \ + --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \ + --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \ + --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \ + --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \ + --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \ + --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \ + --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \ + --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \ + --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \ + --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \ + --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \ + --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \ + --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \ + --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \ + --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \ + --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \ + --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \ + --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \ + --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \ + --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \ + --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \ + --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \ + --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \ + --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \ + --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \ + --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \ + --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \ + --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71 + # via aiohttp diff --git a/tools/docker/tests/test_utils.py b/tools/docker/tests/test_utils.py new file mode 100644 index 0000000000000..dba3026160b27 --- /dev/null +++ b/tools/docker/tests/test_utils.py @@ -0,0 +1,145 @@ +from unittest.mock import AsyncMock, MagicMock + +import pytest + +from tools.docker import utils + + +class MockAsyncIterator: + def __init__(self, seq): + self.iter = iter(seq) + self.count = 0 + + def __aiter__(self): + return self + + async def __anext__(self): + self.count += 1 + try: + return next(self.iter) + except StopIteration: + raise StopAsyncIteration + + +@pytest.mark.asyncio +@pytest.mark.parametrize("args", [(), ("ARG1", ), ("ARG1", "ARG2")]) +@pytest.mark.parametrize("kwargs", [{}, dict(kkey1="VVAR1", kkey2="VVAR2")]) +async def test_util_build_image(patches, args, kwargs): + patched = patches( + "_build_image", + "tempfile", + prefix="tools.docker.utils") + + with patched as (m_build, m_temp): + assert not await utils.build_image(*args, **kwargs) + + assert ( + list(m_temp.NamedTemporaryFile.call_args) + == [(), {}]) + + assert ( + list(m_build.call_args) + == [(m_temp.NamedTemporaryFile.return_value.__enter__.return_value, ) + args, + kwargs]) + + +@pytest.mark.asyncio +@pytest.mark.parametrize("stream", [True, False]) +@pytest.mark.parametrize("buildargs", [None, dict(key1="VAR1", key2="VAR2")]) +@pytest.mark.parametrize("error", [None, "SOMETHING WENT WRONG"]) +async def test_util__build_image(patches, stream, buildargs, error): + lines = ( + dict(notstream=f"NOTLINE{i}", + stream=f"LINE{i}") + for i in range(1, 4)) + + if error: + lines = list(lines) + lines[1]["errorDetail"] = dict(message=error) + lines = iter(lines) + + docker = AsyncMock() + docker.images.build = MagicMock(return_value=MockAsyncIterator(lines)) + + _stream = MagicMock() + tar = MagicMock() + patched = patches( + "tarfile", + prefix="tools.docker.utils") + + with patched as (m_tar, ): + args = (tar, docker, "CONTEXT", "TAG") + kwargs = {} + if stream: + kwargs["stream"] = _stream + if buildargs: + kwargs["buildargs"] = buildargs + + if error: + with pytest.raises(utils.BuildError) as e: + await utils._build_image(*args, **kwargs) + else: + assert not await utils._build_image(*args, **kwargs) + + assert ( + list(m_tar.open.call_args) + == [(tar.name,), {'fileobj': tar, 'mode': 'w'}]) + assert ( + list(m_tar.open.return_value.__enter__.return_value.add.call_args) + == [('CONTEXT',), {'arcname': '.'}]) + assert ( + list(tar.seek.call_args) + == [(0,), {}]) + assert ( + list(docker.images.build.call_args) + == [(), + {'fileobj': tar, + 'encoding': 'gzip', + 'tag': 'TAG', + 'stream': True, + 'buildargs': buildargs or {}}]) + if stream and error: + assert ( + list(list(c) for c in _stream.call_args_list) + == [[('LINE1',), {}]]) + return + elif stream: + assert ( + list(list(c) for c in _stream.call_args_list) + == [[(f'LINE{i}',), {}] for i in range(1, 4)]) + return + # the iterator should be called n + 1 for the n of items + # if there was an error it should stop at the error + assert docker.images.build.return_value.count == 2 if error else 4 + assert not _stream.called + + +@pytest.mark.asyncio +@pytest.mark.parametrize("raises", [True, False]) +@pytest.mark.parametrize("url", [None, "URL"]) +async def test_util_docker_client(patches, raises, url): + + class DummyError(Exception): + pass + + patched = patches( + "aiodocker", + prefix="tools.docker.utils") + + with patched as (m_docker, ): + m_docker.Docker.return_value.close = AsyncMock() + if raises: + with pytest.raises(DummyError): + async with utils.docker_client(url) as docker: + raise DummyError() + else: + async with utils.docker_client(url) as docker: + pass + + assert ( + list(m_docker.Docker.call_args) + == [(url,), {}]) + assert docker == m_docker.Docker.return_value + assert ( + list(m_docker.Docker.return_value.close.call_args) + == [(), {}]) diff --git a/tools/docker/utils.py b/tools/docker/utils.py new file mode 100644 index 0000000000000..93ca70592040b --- /dev/null +++ b/tools/docker/utils.py @@ -0,0 +1,102 @@ +import tarfile +import tempfile +from contextlib import asynccontextmanager +from typing import Callable, Iterator, Optional + +import aiodocker + + +class BuildError(Exception): + pass + + +async def _build_image( + tar: tempfile.NamedTemporaryFile, + docker: aiodocker.Docker, + context: str, + tag: str, + buildargs: Optional[dict] = None, + stream: Optional[Callable] = None, + **kwargs) -> None: + """Docker image builder + + if a `stream` callable arg is supplied, logs are output there. + + raises `tools.docker.utils.BuildError` with any error output. + """ + # create a tarfile from the supplied directory + with tarfile.open(tar.name, fileobj=tar, mode="w") as tarball: + tarball.add(context, arcname=".") + tar.seek(0) + + # build the docker image + build = docker.images.build( + fileobj=tar, encoding="gzip", tag=tag, stream=True, buildargs=buildargs or {}, **kwargs) + + async for line in build: + if line.get("errorDetail"): + raise BuildError( + f"Docker image failed to build {tag} {buildargs}\n{line['errorDetail']['message']}") + if stream and "stream" in line: + stream(line["stream"].strip()) + + +async def build_image(*args, **kwargs) -> None: + """Creates a Docker context by tarballing a directory, and then building an image with it + + aiodocker doesn't provide an in-built way to build docker images from a directory, only + a file, so you can't include artefacts. + + this adds the ability to include artefacts. + + as an example, assuming you have a directory containing a `Dockerfile` and some artefacts at + `/tmp/mydockercontext` - and wanted to build the image `envoy:foo` you could: + + ```python + + import asyncio + + from tools.docker import utils + + + async def myimage(): + async with utils.docker_client() as docker: + await utils.build_image( + docker, + "/tmp/mydockerbuildcontext", + "envoy:foo", + buildargs={}) + + asyncio.run(myimage()) + ``` + """ + with tempfile.NamedTemporaryFile() as tar: + await _build_image(tar, *args, **kwargs) + + +@asynccontextmanager +async def docker_client(url: Optional[str] = "") -> Iterator[aiodocker.Docker]: + """Aiodocker client + + For example to dump the docker image data: + + ```python + + import asyncio + + from tools.docker import utils + + + async def docker_images(): + async with utils.docker_client() as docker: + print(await docker.images.list()) + + asyncio.run(docker_images()) + ``` + """ + + docker = aiodocker.Docker(url) + try: + yield docker + finally: + await docker.close() From a1c5a5076e97e6cd3d4b725b6eeb645513c72092 Mon Sep 17 00:00:00 2001 From: danzh Date: Tue, 20 Jul 2021 14:52:02 -0400 Subject: [PATCH 13/57] quiche: update QUICHE to commit 5dd7a030209f9a6b5043bebd8ac3ee54f18d1d08 (#17328) Commit Message: Modify QUIC_BUG implementation to log with rate limit like ENVOY_BUG in release mode. Fix a QuicMemSliceSpanImpl life time issue which was exposed by new QUICHE change Risk Level: low Testing: added unit tests for QUIC_BUG and QuicMemSliceSpanImpl Signed-off-by: Dan Zhang --- bazel/external/quiche.BUILD | 78 +++++++++--------- bazel/external/quiche.genrule_cmd | 1 + bazel/repository_locations.bzl | 6 +- source/common/quic/platform/BUILD | 12 +-- .../quic/platform/http2_containers_impl.h | 15 ---- .../quic/platform/http2_string_utils_impl.h | 47 ----------- .../quic/platform/quic_containers_impl.h | 45 +---------- .../quic/platform/quic_file_utils_impl.cc | 53 ------------- .../quic/platform/quic_file_utils_impl.h | 30 ------- .../common/quic/platform/quic_map_util_impl.h | 23 ------ .../quic/platform/quic_mem_slice_impl.cc | 5 ++ .../quic/platform/quic_mem_slice_impl.h | 11 ++- .../quic/platform/quic_mem_slice_span_impl.h | 37 ++++++--- .../platform/quic_mem_slice_storage_impl.h | 4 +- .../quic/platform/quiche_bug_tracker_impl.cc | 42 ++++++++++ .../quic/platform/quiche_bug_tracker_impl.h | 46 +++++++++-- .../quic/platform/spdy_containers_impl.h | 22 ------ .../quic/platform/spdy_string_utils_impl.h | 40 ---------- test/common/quic/active_quic_listener_test.cc | 2 +- .../quic/envoy_quic_client_session_test.cc | 28 +------ .../quic/envoy_quic_client_stream_test.cc | 10 ++- .../common/quic/envoy_quic_dispatcher_test.cc | 2 +- .../quic/envoy_quic_server_session_test.cc | 2 +- .../quic/envoy_quic_server_stream_test.cc | 2 +- test/common/quic/platform/BUILD | 12 --- .../quic/platform/http2_platform_test.cc | 14 ---- .../quic/platform/quic_expect_bug_impl.h | 5 +- .../quic/platform/quic_platform_test.cc | 79 ++++++++----------- test/common/quic/platform/quiche_test_impl.h | 7 ++ test/common/quic/test_utils.h | 73 +++++++++++------ .../integration/quic_http_integration_test.cc | 2 +- 31 files changed, 275 insertions(+), 480 deletions(-) delete mode 100644 source/common/quic/platform/http2_containers_impl.h delete mode 100644 source/common/quic/platform/http2_string_utils_impl.h delete mode 100644 source/common/quic/platform/quic_file_utils_impl.cc delete mode 100644 source/common/quic/platform/quic_file_utils_impl.h delete mode 100644 source/common/quic/platform/quic_map_util_impl.h create mode 100644 source/common/quic/platform/quiche_bug_tracker_impl.cc delete mode 100644 source/common/quic/platform/spdy_containers_impl.h delete mode 100644 source/common/quic/platform/spdy_string_utils_impl.h diff --git a/bazel/external/quiche.BUILD b/bazel/external/quiche.BUILD index e8c84f7a145c8..e7a654c919231 100644 --- a/bazel/external/quiche.BUILD +++ b/bazel/external/quiche.BUILD @@ -92,9 +92,9 @@ envoy_cc_library( repository = "@envoy", deps = [ ":http2_core_write_scheduler_lib", + ":quiche_common_platform", ":spdy_core_intrusive_list_lib", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -105,8 +105,8 @@ envoy_cc_library( repository = "@envoy", deps = [ ":http2_core_write_scheduler_lib", + ":quiche_common_platform", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -125,13 +125,10 @@ envoy_cc_library( name = "http2_platform", hdrs = [ "quiche/http2/platform/api/http2_bug_tracker.h", - "quiche/http2/platform/api/http2_containers.h", - "quiche/http2/platform/api/http2_estimate_memory_usage.h", "quiche/http2/platform/api/http2_flag_utils.h", "quiche/http2/platform/api/http2_flags.h", "quiche/http2/platform/api/http2_logging.h", "quiche/http2/platform/api/http2_macros.h", - "quiche/http2/platform/api/http2_string_utils.h", # TODO: uncomment the following files as implementations are added. # "quiche/http2/platform/api/http2_test_helpers.h", ], @@ -686,6 +683,7 @@ envoy_cc_library( ":http2_hpack_decoder_hpack_whole_entry_listener_lib", ":http2_hpack_hpack_constants_lib", ":http2_platform", + ":quiche_common_text_utils_lib", ], ) @@ -773,28 +771,15 @@ envoy_cc_library( deps = [":http2_platform"], ) -envoy_cc_library( - name = "spdy_platform", - hdrs = [ - "quiche/spdy/platform/api/spdy_containers.h", - "quiche/spdy/platform/api/spdy_estimate_memory_usage.h", - "quiche/spdy/platform/api/spdy_string_utils.h", - ], - repository = "@envoy", - visibility = ["//visibility:public"], - deps = [ - ":quiche_common_lib", - "@envoy//source/common/quic/platform:spdy_platform_impl_lib", - ], -) - envoy_cc_library( name = "spdy_simple_arena_lib", srcs = ["quiche/spdy/core/spdy_simple_arena.cc"], hdrs = ["quiche/spdy/core/spdy_simple_arena.h"], repository = "@envoy", visibility = ["//visibility:public"], - deps = [":spdy_platform"], + deps = [ + ":quiche_common_platform", + ], ) envoy_cc_library( @@ -804,7 +789,7 @@ envoy_cc_library( copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], - deps = [":spdy_platform"], + deps = [":quiche_common_platform"], ) envoy_cc_library( @@ -821,6 +806,7 @@ envoy_cc_library( repository = "@envoy", deps = [ ":http2_platform", + ":quiche_common_platform", ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_frame_reader_lib", ":spdy_core_header_block_lib", @@ -828,7 +814,6 @@ envoy_cc_library( ":spdy_core_hpack_hpack_lib", ":spdy_core_protocol_lib", ":spdy_core_zero_copy_output_buffer_lib", - ":spdy_platform", ], ) @@ -839,8 +824,8 @@ envoy_cc_library( copts = quiche_copts, repository = "@envoy", deps = [ + ":quiche_common_platform", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -852,8 +837,9 @@ envoy_cc_library( repository = "@envoy", visibility = ["//visibility:public"], deps = [ + ":quiche_common_lib", + ":quiche_common_platform", ":spdy_core_header_storage_lib", - ":spdy_platform", ], ) @@ -865,7 +851,7 @@ envoy_cc_library( repository = "@envoy", deps = [ "spdy_simple_arena_lib", - ":spdy_platform", + ":quiche_common_platform", ], ) @@ -875,7 +861,7 @@ envoy_cc_library( copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], - deps = [":spdy_platform"], + deps = [":quiche_common_platform"], ) envoy_cc_library( @@ -892,13 +878,13 @@ envoy_cc_library( ":http2_decoder_frame_decoder_listener_lib", ":http2_platform", ":http2_structures_lib", + ":quiche_common_platform", ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_hpack_hpack_decoder_adapter_lib", ":spdy_core_hpack_hpack_lib", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -930,8 +916,8 @@ envoy_cc_library( repository = "@envoy", deps = [ ":http2_hpack_huffman_hpack_huffman_encoder_lib", + ":quiche_common_platform", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -948,10 +934,10 @@ envoy_cc_library( ":http2_hpack_decoder_hpack_decoder_listener_lib", ":http2_hpack_decoder_hpack_decoder_tables_lib", ":http2_hpack_hpack_constants_lib", + ":quiche_common_platform", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_hpack_hpack_lib", - ":spdy_platform", ], ) @@ -966,9 +952,9 @@ envoy_cc_library( repository = "@envoy", visibility = ["//visibility:public"], deps = [ + ":quiche_common_platform", ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_header_block_lib", - ":spdy_platform", ], ) @@ -977,8 +963,8 @@ envoy_cc_library( hdrs = ["quiche/spdy/core/write_scheduler.h"], repository = "@envoy", deps = [ + ":quiche_common_platform", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -989,11 +975,11 @@ envoy_cc_test_library( copts = quiche_copts, repository = "@envoy", deps = [ + ":quiche_common_platform", ":quiche_common_test_tools_test_utils_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_protocol_lib", - ":spdy_platform", ], ) @@ -1007,12 +993,10 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform", srcs = [ - "quiche/quic/platform/api/quic_file_utils.cc", "quiche/quic/platform/api/quic_hostname_utils.cc", "quiche/quic/platform/api/quic_mutex.cc", ], hdrs = [ - "quiche/quic/platform/api/quic_file_utils.h", "quiche/quic/platform/api/quic_hostname_utils.h", "quiche/quic/platform/api/quic_mutex.h", ], @@ -1033,13 +1017,11 @@ envoy_cc_library( "quiche/quic/platform/api/quic_client_stats.h", "quiche/quic/platform/api/quic_containers.h", "quiche/quic/platform/api/quic_error_code_wrappers.h", - "quiche/quic/platform/api/quic_estimate_memory_usage.h", "quiche/quic/platform/api/quic_exported_stats.h", "quiche/quic/platform/api/quic_flag_utils.h", "quiche/quic/platform/api/quic_flags.h", "quiche/quic/platform/api/quic_iovec.h", "quiche/quic/platform/api/quic_logging.h", - "quiche/quic/platform/api/quic_map_util.h", "quiche/quic/platform/api/quic_mem_slice.h", "quiche/quic/platform/api/quic_reference_counted.h", "quiche/quic/platform/api/quic_server_stats.h", @@ -1605,6 +1587,7 @@ envoy_cc_library( ":quic_core_time_lib", ":quic_core_types_lib", ":quic_platform", + ":quiche_common_print_elements_lib", ], ) @@ -2683,6 +2666,7 @@ envoy_cc_library( ":quic_core_versions_lib", ":quic_platform_base", ":quiche_common_circular_deque_lib", + ":quiche_common_print_elements_lib", ], ) @@ -3124,6 +3108,7 @@ envoy_cc_library( ":quic_core_unacked_packet_map_lib", ":quic_core_utils_lib", ":quic_platform_base", + ":quiche_common_print_elements_lib", ], ) @@ -4017,8 +4002,6 @@ envoy_cc_library( hdrs = [ "quiche/common/platform/default/quiche_platform_impl/quic_mutex_impl.h", "quiche/common/platform/default/quiche_platform_impl/quic_testvalue_impl.h", - "quiche/common/platform/default/quiche_platform_impl/quiche_containers_impl.h", - "quiche/common/platform/default/quiche_platform_impl/quiche_estimate_memory_usage_impl.h", "quiche/common/platform/default/quiche_platform_impl/quiche_prefetch_impl.h", "quiche/common/platform/default/quiche_platform_impl/quiche_sleep_impl.h", "quiche/common/platform/default/quiche_platform_impl/quiche_time_utils_impl.h", @@ -4058,6 +4041,19 @@ envoy_cc_test_library( ], ) +envoy_cc_library( + name = "quiche_common_print_elements_lib", + hdrs = ["quiche/common/print_elements.h"], + external_deps = [ + "abseil_inlined_vector", + ], + repository = "@envoy", + tags = ["nofips"], + deps = [ + ":quiche_common_platform_export", + ], +) + envoy_cc_test_library( name = "quiche_common_test_tools_test_utils_lib", srcs = ["quiche/common/test_tools/quiche_test_utils.cc"], @@ -4188,10 +4184,8 @@ envoy_cc_test( envoy_cc_test( name = "quic_platform_api_test", srcs = [ - "quiche/quic/platform/api/quic_containers_test.cc", "quiche/quic/platform/api/quic_mem_slice_span_test.cc", - # Re-enable it when tests pass. - # "quiche/quic/platform/api/quic_mem_slice_storage_test.cc", + "quiche/quic/platform/api/quic_mem_slice_storage_test.cc", "quiche/quic/platform/api/quic_mem_slice_test.cc", "quiche/quic/platform/api/quic_reference_counted_test.cc", ], diff --git a/bazel/external/quiche.genrule_cmd b/bazel/external/quiche.genrule_cmd index 43f17a517e408..6719aa0f7227c 100644 --- a/bazel/external/quiche.genrule_cmd +++ b/bazel/external/quiche.genrule_cmd @@ -51,6 +51,7 @@ cat <sed_commands # Use envoy specific implementations for below platform APIs. /^#include/ s!"quiche_platform_impl/quiche_logging_impl.h!"source/common/quic/platform/quiche_logging_impl.h! +/^#include/ s!"quiche_platform_impl/quiche_bug_tracker_impl.h!"source/common/quic/platform/quiche_bug_tracker_impl.h! # The reset platform APIs use the QUICHE default implementations. /^#include/ s!"quiche_platform_impl/!"quiche/common/platform/default/quiche_platform_impl/! diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 3e7f6cfa44dac..9339a4afdc800 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -808,12 +808,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://quiche.googlesource.com/quiche", - version = "aee86fb6ffce85e884a6f613ae5f47ce2c6b1e23", - sha256 = "a1b2c0cdd53a3a932db991bee736c6df20912c3d8070be9fbb4152575837cbf9", + version = "5dd7a030209f9a6b5043bebd8ac3ee54f18d1d08", + sha256 = "306342cb35cb9d8baea079c7b924b0133c53cbf182b251655e589d3b4604dc41", # Static snapshot of https://quiche.googlesource.com/quiche/+archive/{version}.tar.gz urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"], use_category = ["dataplane_core"], - release_date = "2021-06-02", + release_date = "2021-07-16", cpe = "N/A", ), com_googlesource_googleurl = dict( diff --git a/source/common/quic/platform/BUILD b/source/common/quic/platform/BUILD index 13e879f96eaed..ef8aa550ab179 100644 --- a/source/common/quic/platform/BUILD +++ b/source/common/quic/platform/BUILD @@ -64,10 +64,8 @@ envoy_cc_library( envoy_cc_library( name = "http2_platform_impl_lib", hdrs = [ - "http2_containers_impl.h", "http2_logging_impl.h", "http2_macros_impl.h", - "http2_string_utils_impl.h", ], external_deps = [ "abseil_base", @@ -90,7 +88,10 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform_logging_impl_lib", - srcs = ["quic_logging_impl.cc"], + srcs = [ + "quic_logging_impl.cc", + "quiche_bug_tracker_impl.cc", + ], hdrs = [ "quic_logging_impl.h", "quiche_bug_tracker_impl.h", @@ -115,7 +116,6 @@ envoy_cc_library( "quic_error_code_wrappers_impl.h", "quic_flags_impl.h", "quic_iovec_impl.h", - "quic_map_util_impl.h", "quic_mem_slice_impl.h", "quic_reference_counted_impl.h", "quic_server_stats_impl.h", @@ -151,11 +151,9 @@ envoy_cc_library( envoy_cc_library( name = "quic_platform_impl_lib", srcs = [ - "quic_file_utils_impl.cc", "quic_hostname_utils_impl.cc", ], hdrs = [ - "quic_file_utils_impl.h", "quic_hostname_utils_impl.h", "quic_mutex_impl.h", "quic_pcc_sender_impl.h", @@ -259,9 +257,7 @@ envoy_cc_library( name = "spdy_platform_impl_lib", hdrs = [ "spdy_bug_tracker_impl.h", - "spdy_containers_impl.h", "spdy_logging_impl.h", - "spdy_string_utils_impl.h", "spdy_test_utils_prod_impl.h", ], external_deps = [ diff --git a/source/common/quic/platform/http2_containers_impl.h b/source/common/quic/platform/http2_containers_impl.h deleted file mode 100644 index e43ec40bfc13a..0000000000000 --- a/source/common/quic/platform/http2_containers_impl.h +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -namespace http2 { - -template using Http2DequeImpl = std::deque; - -} // namespace http2 diff --git a/source/common/quic/platform/http2_string_utils_impl.h b/source/common/quic/platform/http2_string_utils_impl.h deleted file mode 100644 index 38c75a57a3948..0000000000000 --- a/source/common/quic/platform/http2_string_utils_impl.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) -// -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/quic/platform/string_utils.h" - -#include "absl/strings/escaping.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" -#include "fmt/printf.h" - -namespace http2 { - -template inline std::string Http2StrCatImpl(const Args&... args) { - return absl::StrCat(std::forward(args)...); -} - -template -inline void Http2StrAppendImpl(std::string* output, const Args&... args) { - absl::StrAppend(output, std::forward(args)...); -} - -template inline std::string Http2StringPrintfImpl(const Args&... args) { - return fmt::sprintf(std::forward(args)...); -} - -inline std::string Http2HexEncodeImpl(const void* bytes, size_t size) { - return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); -} - -inline std::string Http2HexDecodeImpl(absl::string_view data) { - return absl::HexStringToBytes(data); -} - -inline std::string Http2HexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } - -inline std::string Http2HexEscapeImpl(absl::string_view data) { return absl::CHexEscape(data); } - -template inline std::string Http2HexImpl(Number number) { - return absl::StrCat(absl::Hex(number)); -} - -} // namespace http2 diff --git a/source/common/quic/platform/quic_containers_impl.h b/source/common/quic/platform/quic_containers_impl.h index 061fe114695d3..2e45c63eba05f 100644 --- a/source/common/quic/platform/quic_containers_impl.h +++ b/source/common/quic/platform/quic_containers_impl.h @@ -23,48 +23,7 @@ namespace quic { -template using QuicDefaultHasherImpl = absl::Hash; - -template -using QuicUnorderedMapImpl = absl::node_hash_map; - -template -using QuicHashMapImpl = absl::flat_hash_map; - -template using QuicHashSetImpl = absl::flat_hash_set; - -template using QuicUnorderedSetImpl = absl::node_hash_set; - -template -using QuicLinkedHashMapImpl = quiche::QuicheLinkedHashMap; - -template -using QuicSmallMapImpl = absl::flat_hash_map; - -template using QuicQueueImpl = std::queue; - -template using QuicDequeImpl = std::deque; - -template > -using QuicInlinedVectorImpl = absl::InlinedVector; - -template -inline std::ostream& operator<<(std::ostream& os, - const QuicInlinedVectorImpl inlined_vector) { - std::stringstream debug_string; - debug_string << "{"; - typename QuicInlinedVectorImpl::const_iterator it = inlined_vector.cbegin(); - debug_string << *it; - ++it; - while (it != inlined_vector.cend()) { - debug_string << ", " << *it; - ++it; - } - debug_string << "}"; - return os << debug_string.str(); -} - -template -using QuicOrderedSetImpl = absl::btree_set; +template +using QuicSmallOrderedSetImpl = absl::btree_set; } // namespace quic diff --git a/source/common/quic/platform/quic_file_utils_impl.cc b/source/common/quic/platform/quic_file_utils_impl.cc deleted file mode 100644 index 9c909c801643c..0000000000000 --- a/source/common/quic/platform/quic_file_utils_impl.cc +++ /dev/null @@ -1,53 +0,0 @@ -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/quic/platform/quic_file_utils_impl.h" - -#include "source/common/filesystem/directory.h" -#include "source/common/filesystem/filesystem_impl.h" - -#include "absl/strings/str_cat.h" - -namespace quic { -namespace { - -void depthFirstTraverseDirectory(const std::string& dirname, std::vector& files) { - Envoy::Filesystem::Directory directory(dirname); - for (const Envoy::Filesystem::DirectoryEntry& entry : directory) { - switch (entry.type_) { - case Envoy::Filesystem::FileType::Regular: - files.push_back(absl::StrCat(dirname, "/", entry.name_)); - break; - case Envoy::Filesystem::FileType::Directory: - if (entry.name_ != "." && entry.name_ != "..") { - depthFirstTraverseDirectory(absl::StrCat(dirname, "/", entry.name_), files); - } - break; - default: - ASSERT(false, - absl::StrCat("Unknow file entry type ", entry.type_, " under directory ", dirname)); - } - } -} - -} // namespace - -// Traverses the directory |dirname| and returns all of the files it contains. -// NOLINTNEXTLINE(readability-identifier-naming) -std::vector ReadFileContentsImpl(const std::string& dirname) { - std::vector files; - depthFirstTraverseDirectory(dirname, files); - return files; -} - -// Reads the contents of |filename| as a string into |contents|. -// NOLINTNEXTLINE(readability-identifier-naming) -void ReadFileContentsImpl(absl::string_view filename, std::string* contents) { - Envoy::Filesystem::InstanceImpl fs; - *contents = fs.fileReadToEnd(std::string(filename.data(), filename.size())); -} - -} // namespace quic diff --git a/source/common/quic/platform/quic_file_utils_impl.h b/source/common/quic/platform/quic_file_utils_impl.h deleted file mode 100644 index 25c31e9deca27..0000000000000 --- a/source/common/quic/platform/quic_file_utils_impl.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include - -#include "absl/strings/string_view.h" - -namespace quic { - -/** - * Traverses the directory |dirname| and returns all of the files it contains. - * @param dirname full path without trailing '/'. - */ -// NOLINTNEXTLINE(readability-identifier-naming)` -std::vector ReadFileContentsImpl(const std::string& dirname); - -/** - * Reads the contents of |filename| as a string into |contents|. - * @param filename the full path to the file. - * @param contents output location of the file content. - */ -// NOLINTNEXTLINE(readability-identifier-naming) -void ReadFileContentsImpl(absl::string_view filename, std::string* contents); - -} // namespace quic diff --git a/source/common/quic/platform/quic_map_util_impl.h b/source/common/quic/platform/quic_map_util_impl.h deleted file mode 100644 index 2bf549d9c353a..0000000000000 --- a/source/common/quic/platform/quic_map_util_impl.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) -// -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include - -namespace quic { - -template -bool QuicContainsKeyImpl(const Collection& collection, const Key& key) { - return collection.find(key) != collection.end(); -} - -template -bool QuicContainsValueImpl(const Collection& collection, const Value& value) { - return std::find(collection.begin(), collection.end(), value) != collection.end(); -} - -} // namespace quic diff --git a/source/common/quic/platform/quic_mem_slice_impl.cc b/source/common/quic/platform/quic_mem_slice_impl.cc index 3744ae0597e44..a9e9876ffd10c 100644 --- a/source/common/quic/platform/quic_mem_slice_impl.cc +++ b/source/common/quic/platform/quic_mem_slice_impl.cc @@ -44,6 +44,11 @@ QuicMemSliceImpl::QuicMemSliceImpl(std::unique_ptr buffer, size_t length ASSERT(this->length() == length); } +QuicMemSliceImpl::~QuicMemSliceImpl() { + ASSERT(fragment_ == nullptr || (firstSliceLength(single_slice_buffer_) == fragment_->size() && + data() == fragment_->data())); +} + const char* QuicMemSliceImpl::data() const { return reinterpret_cast(single_slice_buffer_.frontSlice().mem_); } diff --git a/source/common/quic/platform/quic_mem_slice_impl.h b/source/common/quic/platform/quic_mem_slice_impl.h index c6e90cba507a2..6e97d5faab1d8 100644 --- a/source/common/quic/platform/quic_mem_slice_impl.h +++ b/source/common/quic/platform/quic_mem_slice_impl.h @@ -23,6 +23,8 @@ class QuicMemSliceImpl { // Constructs an empty QuicMemSliceImpl. QuicMemSliceImpl() = default; + ~QuicMemSliceImpl(); + // Constructs a QuicMemSliceImpl by taking ownership of the memory in |buffer|. QuicMemSliceImpl(QuicUniqueBufferPtr buffer, size_t length); QuicMemSliceImpl(std::unique_ptr buffer, size_t length); @@ -47,7 +49,11 @@ class QuicMemSliceImpl { } // Below methods implements interface needed by QuicMemSlice. - void Reset() { single_slice_buffer_.drain(length()); } + // NOLINTNEXTLINE(readability-identifier-naming) + void Reset() { + single_slice_buffer_.drain(length()); + fragment_ = nullptr; + } // Returns a char pointer to the one and only slice in buffer. const char* data() const; @@ -55,10 +61,9 @@ class QuicMemSliceImpl { size_t length() const { return single_slice_buffer_.length(); } bool empty() const { return length() == 0; } - Envoy::Buffer::OwnedImpl& single_slice_buffer() { return single_slice_buffer_; } + Envoy::Buffer::OwnedImpl& getSingleSliceBuffer() { return single_slice_buffer_; } private: - // Prerequisite: buffer has at least one slice. size_t firstSliceLength(Envoy::Buffer::Instance& buffer); std::unique_ptr fragment_; diff --git a/source/common/quic/platform/quic_mem_slice_span_impl.h b/source/common/quic/platform/quic_mem_slice_span_impl.h index ef40e63870573..d1e9855551282 100644 --- a/source/common/quic/platform/quic_mem_slice_span_impl.h +++ b/source/common/quic/platform/quic_mem_slice_span_impl.h @@ -25,19 +25,24 @@ class QuicMemSliceSpanImpl { * @param buffer has to outlive the life time of this class. */ explicit QuicMemSliceSpanImpl(Envoy::Buffer::Instance& buffer) : buffer_(&buffer) {} - explicit QuicMemSliceSpanImpl(QuicMemSliceImpl* slice) : buffer_(&slice->single_slice_buffer()) {} + explicit QuicMemSliceSpanImpl(QuicMemSliceImpl* slice) + : buffer_(&slice->getSingleSliceBuffer()), mem_slice_(slice) {} QuicMemSliceSpanImpl(const QuicMemSliceSpanImpl& other) = default; QuicMemSliceSpanImpl& operator=(const QuicMemSliceSpanImpl& other) = default; - QuicMemSliceSpanImpl(QuicMemSliceSpanImpl&& other) noexcept : buffer_(other.buffer_) { + QuicMemSliceSpanImpl(QuicMemSliceSpanImpl&& other) noexcept + : buffer_(other.buffer_), mem_slice_(other.mem_slice_) { other.buffer_ = nullptr; + other.mem_slice_ = nullptr; } QuicMemSliceSpanImpl& operator=(QuicMemSliceSpanImpl&& other) noexcept { if (this != &other) { buffer_ = other.buffer_; + mem_slice_ = other.mem_slice_; other.buffer_ = nullptr; + other.mem_slice_ = nullptr; } return *this; } @@ -54,23 +59,33 @@ class QuicMemSliceSpanImpl { bool empty() const { return buffer_->length() == 0; } private: + // If constructed with a QuicMemSlice, mem_slice_ point to that object and this points to + // mem_slice_->getSingleSliceBuffer(). If constructed with an Envoy buffer, this points to the + // buffer itself. Envoy::Buffer::Instance* buffer_{nullptr}; + // If this span is not constructed with a QuicMemSlice, this points to nullptr. + QuicMemSliceImpl* mem_slice_{nullptr}; }; template // NOLINTNEXTLINE(readability-identifier-naming) QuicByteCount QuicMemSliceSpanImpl::ConsumeAll(ConsumeFunction consume) { size_t saved_length = 0; - for (auto& slice : buffer_->getRawSlices()) { - if (slice.len_ == 0) { - continue; + if (mem_slice_ == nullptr) { + for (auto& slice : buffer_->getRawSlices()) { + if (slice.len_ == 0) { + continue; + } + // Move each slice into a stand-alone buffer. + // TODO(danzh): investigate the cost of allocating one buffer per slice. + // If it turns out to be expensive, add a new function to free data in the middle in buffer + // interface and re-design QuicMemSliceImpl. + consume(QuicMemSlice(QuicMemSliceImpl(*buffer_, slice.len_))); + saved_length += slice.len_; } - // Move each slice into a stand-alone buffer. - // TODO(danzh): investigate the cost of allocating one buffer per slice. - // If it turns out to be expensive, add a new function to free data in the middle in buffer - // interface and re-design QuicMemSliceImpl. - consume(QuicMemSlice(QuicMemSliceImpl(*buffer_, slice.len_))); - saved_length += slice.len_; + } else { + saved_length += mem_slice_->length(); + consume(quic::QuicMemSlice(std::move(*mem_slice_))); } ASSERT(buffer_->length() == 0); return saved_length; diff --git a/source/common/quic/platform/quic_mem_slice_storage_impl.h b/source/common/quic/platform/quic_mem_slice_storage_impl.h index 437e9be46736f..797ade760686e 100644 --- a/source/common/quic/platform/quic_mem_slice_storage_impl.h +++ b/source/common/quic/platform/quic_mem_slice_storage_impl.h @@ -34,9 +34,11 @@ class QuicMemSliceStorageImpl { QuicMemSliceStorageImpl(QuicMemSliceStorageImpl&& other) = default; QuicMemSliceStorageImpl& operator=(QuicMemSliceStorageImpl&& other) = default; + // NOLINTNEXTLINE(readability-identifier-naming) QuicMemSliceSpan ToSpan() { return QuicMemSliceSpan(QuicMemSliceSpanImpl(buffer_)); } - void Append(QuicMemSliceImpl mem_slice) { buffer_.move(mem_slice.single_slice_buffer()); } + // NOLINTNEXTLINE(readability-identifier-naming) + void Append(QuicMemSliceImpl mem_slice) { buffer_.move(mem_slice.getSingleSliceBuffer()); } private: Envoy::Buffer::OwnedImpl buffer_; diff --git a/source/common/quic/platform/quiche_bug_tracker_impl.cc b/source/common/quic/platform/quiche_bug_tracker_impl.cc new file mode 100644 index 0000000000000..170ecfb85e15b --- /dev/null +++ b/source/common/quic/platform/quiche_bug_tracker_impl.cc @@ -0,0 +1,42 @@ +#include "source/common/quic/platform/quiche_bug_tracker_impl.h" + +#include "source/common/common/assert.h" +#include "source/common/quic/platform/quic_logging_impl.h" + +// NOLINT(namespace-envoy) +// +// This file is part of the QUICHE platform implementation, and is not to be +// consumed or referenced directly by other Envoy code. It serves purely as a +// porting layer for QUICHE. + +namespace quic { + +std::atomic g_quiche_bug_exit_disabled; + +ScopedDisableExitOnQuicheBug::ScopedDisableExitOnQuicheBug() + : previous_value_(g_quiche_bug_exit_disabled) { + + g_quiche_bug_exit_disabled.store(true, std::memory_order_relaxed); +} + +ScopedDisableExitOnQuicheBug::~ScopedDisableExitOnQuicheBug() { + g_quiche_bug_exit_disabled.store(previous_value_, std::memory_order_relaxed); +} + +QuicheBugEmitter::~QuicheBugEmitter() { + // Release mode ENVOY_BUG applies rate limit. + if (Envoy::Assert::shouldLogAndInvokeEnvoyBugForEnvoyBugMacroUseOnly(bug_name_)) { + ENVOY_LOG_TO_LOGGER(Envoy::Logger::Registry::getLog(Envoy::Logger::Id::envoy_bug), error, + "QUICHE_BUG failure: {}.{}{}", condition_str_, + stream_.eof() ? "" : " Details: ", stream_.str()); +#if !defined(NDEBUG) && !defined(ENVOY_CONFIG_COVERAGE) + if (!g_quiche_bug_exit_disabled) { + abort(); + } +#else + Envoy::Assert::invokeEnvoyBugFailureRecordActionForEnvoyBugMacroUseOnly(bug_name_.data()); +#endif + } +} + +} // namespace quic diff --git a/source/common/quic/platform/quiche_bug_tracker_impl.h b/source/common/quic/platform/quiche_bug_tracker_impl.h index 0d4f8aa884acb..00155e774c7f3 100644 --- a/source/common/quic/platform/quiche_bug_tracker_impl.h +++ b/source/common/quic/platform/quiche_bug_tracker_impl.h @@ -6,11 +6,47 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. -#include "source/common/quic/platform/quic_logging_impl.h" +#include + +#include "absl/strings/string_view.h" + +namespace quic { + +class QuicheBugEmitter { +public: + explicit QuicheBugEmitter(absl::string_view condition_str, absl::string_view bug_name) + : condition_str_(condition_str), bug_name_(bug_name) {} + + ~QuicheBugEmitter(); + + std::ostringstream& stream() { return stream_; } + +private: + std::ostringstream stream_; + const std::string condition_str_; + const std::string bug_name_; +}; + +// Test and fuzz only, not for production, not thread-safe. +class ScopedDisableExitOnQuicheBug { +public: + ScopedDisableExitOnQuicheBug(); + ~ScopedDisableExitOnQuicheBug(); + +private: + const bool previous_value_; +}; + +} // namespace quic + +#define QUICHE_BUG_IF_IMPL(bug_id, condition) \ + switch (0) \ + default: \ + if (!(condition)) { \ + } else \ + quic::QuicheBugEmitter(#condition, #bug_id).stream() + +#define QUICHE_BUG_IMPL(bug_id) QUICHE_BUG_IF_IMPL(bug_id, true) -// TODO(wub): Implement exponential back off to avoid performance problems due -// to excessive QUIC_BUG. -#define QUICHE_BUG_IMPL(bug_id) QUICHE_LOG_IMPL(DFATAL) -#define QUICHE_BUG_IF_IMPL(bug_id, condition) QUICHE_LOG_IF_IMPL(DFATAL, condition) #define QUICHE_PEER_BUG_IMPL(bug_id) QUICHE_LOG_IMPL(ERROR) #define QUICHE_PEER_BUG_IF_IMPL(bug_id, condition) QUICHE_LOG_IF_IMPL(ERROR, condition) diff --git a/source/common/quic/platform/spdy_containers_impl.h b/source/common/quic/platform/spdy_containers_impl.h deleted file mode 100644 index 676127f066c96..0000000000000 --- a/source/common/quic/platform/spdy_containers_impl.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) - -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" -#include "absl/container/inlined_vector.h" -#include "absl/hash/hash.h" -#include "quiche/common/quiche_linked_hash_map.h" - -namespace spdy { - -template -using SpdyLinkedHashMapImpl = quiche::QuicheLinkedHashMap; - -template -using SpdySmallMapImpl = absl::flat_hash_map; -} // namespace spdy diff --git a/source/common/quic/platform/spdy_string_utils_impl.h b/source/common/quic/platform/spdy_string_utils_impl.h deleted file mode 100644 index e62fdb3f73aad..0000000000000 --- a/source/common/quic/platform/spdy_string_utils_impl.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -// NOLINT(namespace-envoy) -// -// This file is part of the QUICHE platform implementation, and is not to be -// consumed or referenced directly by other Envoy code. It serves purely as a -// porting layer for QUICHE. - -#include "source/common/quic/platform/string_utils.h" - -#include "absl/strings/escaping.h" -#include "absl/strings/match.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_format.h" -#include "fmt/printf.h" - -namespace spdy { - -// NOLINTNEXTLINE(readability-identifier-naming) -inline char SpdyHexDigitToIntImpl(char c) { return quiche::HexDigitToInt(c); } - -// NOLINTNEXTLINE(readability-identifier-naming) -inline std::string SpdyHexDecodeImpl(absl::string_view data) { - return absl::HexStringToBytes(data); -} - -// NOLINTNEXTLINE(readability-identifier-naming) -inline bool SpdyHexDecodeToUInt32Impl(absl::string_view data, uint32_t* out) { - return quiche::HexDecodeToUInt32(data, out); -} - -// NOLINTNEXTLINE(readability-identifier-naming) -inline std::string SpdyHexEncodeImpl(const void* bytes, size_t size) { - return absl::BytesToHexString(absl::string_view(static_cast(bytes), size)); -} - -// NOLINTNEXTLINE(readability-identifier-naming) -inline std::string SpdyHexDumpImpl(absl::string_view data) { return quiche::HexDump(data); } - -} // namespace spdy diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 5313ba2513f14..100e60657e1c4 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -85,7 +85,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { } bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_enable_version_rfcv1, use_http3); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); return quic::CurrentSupportedVersions(); }()[0]), quic_stat_names_(listener_config_.listenerScope().symbolTable()) {} diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index d377abd663a6c..62b9c66516736 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -64,32 +64,6 @@ class TestEnvoyQuicClientConnection : public EnvoyQuicClientConnection { using EnvoyQuicClientConnection::connectionStats; }; -class TestQuicCryptoClientStream : public quic::QuicCryptoClientStream { -public: - TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, - std::unique_ptr verify_context, - quic::QuicCryptoClientConfig* crypto_config, - ProofHandler* proof_handler, bool has_application_state) - : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config, - proof_handler, has_application_state) {} - - bool encryption_established() const override { return true; } -}; - -class TestQuicCryptoClientStreamFactory : public EnvoyQuicCryptoClientStreamFactoryInterface { -public: - std::unique_ptr - createEnvoyQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, - std::unique_ptr verify_context, - quic::QuicCryptoClientConfig* crypto_config, - quic::QuicCryptoClientStream::ProofHandler* proof_handler, - bool has_application_state) override { - return std::make_unique(server_id, session, - std::move(verify_context), crypto_config, - proof_handler, has_application_state); - } -}; - class EnvoyQuicClientSessionTest : public testing::TestWithParam { public: EnvoyQuicClientSessionTest() @@ -97,7 +71,7 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_enable_version_rfcv1, GetParam()); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), diff --git a/test/common/quic/envoy_quic_client_stream_test.cc b/test/common/quic/envoy_quic_client_stream_test.cc index a0e381e8e28a5..5de4695bb4794 100644 --- a/test/common/quic/envoy_quic_client_stream_test.cc +++ b/test/common/quic/envoy_quic_client_stream_test.cc @@ -31,7 +31,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_enable_version_rfcv1, GetParam()); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), peer_addr_(Network::Utility::getAddressWithPort(*Network::Utility::getIpv6LoopbackAddress(), @@ -42,8 +42,10 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { quic::test::TestConnectionId(), connection_helper_, alarm_factory_, &writer_, /*owns_writer=*/false, {quic_version_}, *dispatcher_, createConnectionSocket(peer_addr_, self_addr_, nullptr))), - quic_session_(quic_config_, {quic_version_}, quic_connection_, *dispatcher_, - quic_config_.GetInitialStreamFlowControlWindowToSend() * 2), + quic_session_(quic_config_, {quic_version_}, + std::unique_ptr(quic_connection_), *dispatcher_, + quic_config_.GetInitialStreamFlowControlWindowToSend() * 2, + crypto_stream_factory_), stream_id_(quic::VersionUsesHttp3(quic_version_.transport_version) ? 4u : 5u), stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), POOL_GAUGE_PREFIX(scope_, "http3."))}), @@ -51,6 +53,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { stats_, http3_options_)), request_headers_{{":authority", host_}, {":method", "POST"}, {":path", "/"}}, request_trailers_{{"trailer-key", "trailer-value"}} { + SetQuicReloadableFlag(quic_single_ack_in_packet2, false); quic_stream_->setResponseDecoder(stream_decoder_); quic_stream_->addCallbacks(stream_callbacks_); quic_session_.ActivateStream(std::unique_ptr(quic_stream_)); @@ -145,6 +148,7 @@ class EnvoyQuicClientStreamTest : public testing::TestWithParam { Network::Address::InstanceConstSharedPtr self_addr_; MockDelegate delegate_; EnvoyQuicClientConnection* quic_connection_; + TestQuicCryptoClientStreamFactory crypto_stream_factory_; MockEnvoyQuicClientSession quic_session_; quic::QuicStreamId stream_id_; Stats::IsolatedStoreImpl scope_; diff --git a/test/common/quic/envoy_quic_dispatcher_test.cc b/test/common/quic/envoy_quic_dispatcher_test.cc index f15072bd140f9..8260ef7b6e833 100644 --- a/test/common/quic/envoy_quic_dispatcher_test.cc +++ b/test/common/quic/envoy_quic_dispatcher_test.cc @@ -67,7 +67,7 @@ class EnvoyQuicDispatcherTest : public QuicMultiVersionTest, } bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_enable_version_rfcv1, use_http3); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); return quic::CurrentSupportedVersions(); }()), quic_version_(version_manager_.GetSupportedVersions()[0]), diff --git a/test/common/quic/envoy_quic_server_session_test.cc b/test/common/quic/envoy_quic_server_session_test.cc index ff26575bf3e57..349afd7ff9737 100644 --- a/test/common/quic/envoy_quic_server_session_test.cc +++ b/test/common/quic/envoy_quic_server_session_test.cc @@ -153,7 +153,7 @@ class EnvoyQuicServerSessionTest : public testing::TestWithParam { dispatcher_(api_->allocateDispatcher("test_thread")), connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_enable_version_rfcv1, GetParam()); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); return quic::ParsedVersionOfIndex(quic::CurrentSupportedVersions(), 0); }()), quic_stat_names_(listener_config_.listenerScope().symbolTable()), diff --git a/test/common/quic/envoy_quic_server_stream_test.cc b/test/common/quic/envoy_quic_server_stream_test.cc index 557975eb41b7b..53ffe144a0e3a 100644 --- a/test/common/quic/envoy_quic_server_stream_test.cc +++ b/test/common/quic/envoy_quic_server_stream_test.cc @@ -46,7 +46,7 @@ class EnvoyQuicServerStreamTest : public testing::TestWithParam { connection_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *connection_helper_.GetClock()), quic_version_([]() { SetQuicReloadableFlag(quic_disable_version_draft_29, !GetParam()); - SetQuicReloadableFlag(quic_enable_version_rfcv1, GetParam()); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !GetParam()); return quic::CurrentSupportedVersions()[0]; }()), listener_stats_({ALL_LISTENER_STATS(POOL_COUNTER(listener_config_.listenerScope()), diff --git a/test/common/quic/platform/BUILD b/test/common/quic/platform/BUILD index ad15b0e73441e..c7031156309e9 100644 --- a/test/common/quic/platform/BUILD +++ b/test/common/quic/platform/BUILD @@ -61,18 +61,6 @@ envoy_cc_test( ], ) -envoy_cc_test( - name = "spdy_platform_test", - srcs = ["spdy_platform_test.cc"], - external_deps = ["quiche_spdy_platform"], - deps = [ - "//source/common/quic/platform:quiche_flags_impl_lib", - "//test/test_common:logging_lib", - "//test/test_common:utility_lib", - "@com_googlesource_quiche//:spdy_platform", - ], -) - envoy_cc_test_library( name = "epoll_server_platform_impl_lib", hdrs = [ diff --git a/test/common/quic/platform/http2_platform_test.cc b/test/common/quic/platform/http2_platform_test.cc index c25c0e1c0a845..148ab17b30e5e 100644 --- a/test/common/quic/platform/http2_platform_test.cc +++ b/test/common/quic/platform/http2_platform_test.cc @@ -13,8 +13,6 @@ #include "gtest/gtest.h" #include "quiche/http2/platform/api/http2_bug_tracker.h" -#include "quiche/http2/platform/api/http2_containers.h" -#include "quiche/http2/platform/api/http2_estimate_memory_usage.h" #include "quiche/http2/platform/api/http2_flags.h" #include "quiche/http2/platform/api/http2_logging.h" #include "quiche/http2/platform/api/http2_macros.h" @@ -35,18 +33,6 @@ TEST(Http2PlatformTest, Http2BugTracker) { EXPECT_LOG_NOT_CONTAINS("error", "", HTTP2_BUG_IF(bug_1, false) << "A feature is not a bug."); } -TEST(Http2PlatformTest, Http2Deque) { - http2::Http2Deque deque; - deque.push_back(10); - EXPECT_EQ(10, deque.back()); -} - -TEST(Http2PlatformTest, Http2EstimateMemoryUsage) { - std::string s = "foo"; - // Stubbed out to always return 0. - EXPECT_EQ(0, http2::Http2EstimateMemoryUsage(s)); -} - TEST(Http2PlatformTest, Http2Log) { // HTTP2_LOG macros are defined to QUIC_LOG macros, which is tested in // QuicPlatformTest. Here we just make sure HTTP2_LOG macros compile. diff --git a/test/common/quic/platform/quic_expect_bug_impl.h b/test/common/quic/platform/quic_expect_bug_impl.h index 7ddba5ef37546..fee2dc2b283fa 100644 --- a/test/common/quic/platform/quic_expect_bug_impl.h +++ b/test/common/quic/platform/quic_expect_bug_impl.h @@ -6,11 +6,12 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include "test/test_common/utility.h" + #include "quiche/quic/platform/api/quic_logging.h" #include "quiche/quic/platform/api/quic_mock_log.h" -#define EXPECT_QUIC_BUG_IMPL(statement, regex) \ - EXPECT_QUIC_DFATAL_IMPL(statement, testing::ContainsRegex(regex)) +#define EXPECT_QUIC_BUG_IMPL(statement, regex) EXPECT_ENVOY_BUG(statement, regex) #define EXPECT_QUIC_PEER_BUG_IMPL(statement, regex) \ EXPECT_QUIC_LOG_IMPL(statement, ERROR, testing::ContainsRegex(regex)) diff --git a/test/common/quic/platform/quic_platform_test.cc b/test/common/quic/platform/quic_platform_test.cc index 0d53089815957..73d13b22a733d 100644 --- a/test/common/quic/platform/quic_platform_test.cc +++ b/test/common/quic/platform/quic_platform_test.cc @@ -31,14 +31,11 @@ #include "quiche/quic/platform/api/quic_bug_tracker.h" #include "quiche/quic/platform/api/quic_client_stats.h" #include "quiche/quic/platform/api/quic_containers.h" -#include "quiche/quic/platform/api/quic_estimate_memory_usage.h" #include "quiche/quic/platform/api/quic_expect_bug.h" #include "quiche/quic/platform/api/quic_exported_stats.h" -#include "quiche/quic/platform/api/quic_file_utils.h" #include "quiche/quic/platform/api/quic_flags.h" #include "quiche/quic/platform/api/quic_hostname_utils.h" #include "quiche/quic/platform/api/quic_logging.h" -#include "quiche/quic/platform/api/quic_map_util.h" #include "quiche/quic/platform/api/quic_mem_slice.h" #include "quiche/quic/platform/api/quic_mem_slice_span.h" #include "quiche/quic/platform/api/quic_mem_slice_storage.h" @@ -73,6 +70,7 @@ class QuicPlatformTest : public testing::Test { GetLogger().set_level(spdlog::level::err); } + void SetUp() override { Envoy::Assert::resetEnvoyBugCountersForTest(); } ~QuicPlatformTest() override { setVerbosityLogThreshold(verbosity_log_threshold_); GetLogger().set_level(log_level_); @@ -85,9 +83,17 @@ class QuicPlatformTest : public testing::Test { enum class TestEnum { ZERO = 0, ONE, TWO, COUNT }; TEST_F(QuicPlatformTest, QuicBugTracker) { - EXPECT_DEBUG_DEATH(QUIC_BUG(bug_id) << "Here is a bug,", " bug"); - EXPECT_DEBUG_DEATH(QUIC_BUG_IF(bug_id, true) << "There is a bug,", " bug"); - EXPECT_LOG_NOT_CONTAINS("error", "", QUIC_BUG_IF(bug_id, false) << "A feature is not a bug."); + EXPECT_ENVOY_BUG(QUIC_BUG(bug_id) << "Here is a bug,", " bug"); + EXPECT_ENVOY_BUG(QUIC_BUG_IF(bug_id, 1 == 1) << "There is a bug,", " bug"); + bool evaluated = false; + EXPECT_LOG_NOT_CONTAINS( + "error", "", QUIC_BUG_IF(bug_id_1, false) << "A feature is not a bug." << (evaluated = true)); + EXPECT_FALSE(evaluated); + + { + ScopedDisableExitOnQuicheBug no_crash_quiche_bug; + QUIC_BUG(bug_id_2) << "No crash bug"; + } EXPECT_LOG_CONTAINS("error", " bug", QUIC_PEER_BUG(bug_id) << "Everywhere's a bug,"); EXPECT_LOG_CONTAINS("error", " here", QUIC_PEER_BUG_IF(bug_id, true) << "Including here."); @@ -112,9 +118,14 @@ TEST_F(QuicPlatformTest, QuicExpectBug) { auto bug = [](const char* error_message) { QUIC_BUG(bug_id) << error_message; }; auto peer_bug = [](const char* error_message) { QUIC_PEER_BUG(bug_id) << error_message; }; - EXPECT_QUIC_BUG(bug("bug one is expected"), "bug one"); EXPECT_QUIC_BUG(bug("bug two is expected"), "bug two"); +#ifdef NDEBUG + // The 3rd triggering in release mode should not be logged. + EXPECT_LOG_NOT_CONTAINS("error", "bug three", bug("bug three is expected")); +#else + EXPECT_QUIC_BUG(bug("bug three is expected"), "bug three"); +#endif EXPECT_QUIC_PEER_BUG(peer_bug("peer_bug_1 is expected"), "peer_bug_1"); EXPECT_QUIC_PEER_BUG(peer_bug("peer_bug_2 is expected"), "peer_bug_2"); @@ -139,28 +150,6 @@ TEST_F(QuicPlatformTest, QuicHostnameUtils) { EXPECT_EQ("quicwg.org", QuicHostnameUtils::NormalizeHostname("QUICWG.ORG")); } -TEST_F(QuicPlatformTest, QuicInlinedVector) { - QuicInlinedVector vec; - vec.push_back(3); - EXPECT_EQ(3, vec[0]); -} - -TEST_F(QuicPlatformTest, QuicEstimateMemoryUsage) { - std::string s = "foo"; - // Stubbed out to always return 0. - EXPECT_EQ(0, QuicEstimateMemoryUsage(s)); -} - -TEST_F(QuicPlatformTest, QuicMapUtil) { - std::map stdmap = {{"one", 1}, {"two", 2}, {"three", 3}}; - EXPECT_TRUE(QuicContainsKey(stdmap, "one")); - EXPECT_FALSE(QuicContainsKey(stdmap, "zero")); - - std::vector stdvec = {1, 2, 3}; - EXPECT_TRUE(QuicContainsValue(stdvec, 1)); - EXPECT_FALSE(QuicContainsValue(stdvec, 0)); -} - TEST_F(QuicPlatformTest, QuicMockLog) { ASSERT_EQ(spdlog::level::err, GetLogger().level()); @@ -577,25 +566,6 @@ class FileUtilsTest : public testing::Test { std::stack files_to_remove_; }; -TEST_F(FileUtilsTest, ReadDirContents) { - addSubDirs({"sub_dir1", "sub_dir2", "sub_dir1/sub_dir1_1"}); - addFiles({"file", "sub_dir1/sub_file1", "sub_dir1/sub_dir1_1/sub_file1_1", "sub_dir2/sub_file2"}); - - EXPECT_THAT(ReadFileContents(dir_path_), - testing::UnorderedElementsAre(dir_path_ + "/file", dir_path_ + "/sub_dir1/sub_file1", - dir_path_ + "/sub_dir1/sub_dir1_1/sub_file1_1", - dir_path_ + "/sub_dir2/sub_file2")); -} - -TEST_F(FileUtilsTest, ReadFileContents) { - const std::string data = "test string\ntest"; - const std::string file_path = - Envoy::TestEnvironment::writeStringToFileForTest("test_envoy", data); - std::string output; - ReadFileContents(file_path, &output); - EXPECT_EQ(data, output); -} - TEST_F(QuicPlatformTest, TestEnvoyQuicBufferAllocator) { QuicStreamBufferAllocator allocator; Envoy::Stats::TestUtil::MemoryTest memory_test; @@ -664,6 +634,19 @@ TEST(EnvoyQuicMemSliceTest, ConstructQuicMemSliceSpan) { QuicMemSliceSpan span(&slice); EXPECT_EQ(1024u, span.total_length()); EXPECT_EQ(str, span.GetData(0)); + span.ConsumeAll([](quic::QuicMemSlice&& mem_slice) { mem_slice.Reset(); }); + EXPECT_EQ(0u, span.total_length()); + + QuicMemSlice slice3; + { + quic::QuicMemSlice slice2{quic::QuicMemSliceImpl(std::make_unique(5), 5u)}; + + QuicMemSliceSpan span2(&slice2); + EXPECT_EQ(5u, span2.total_length()); + span2.ConsumeAll([&slice3](quic::QuicMemSlice&& mem_slice) { slice3 = std::move(mem_slice); }); + EXPECT_EQ(0u, span2.total_length()); + } + slice3.Reset(); } TEST(EnvoyQuicMemSliceTest, QuicMemSliceStorage) { diff --git a/test/common/quic/platform/quiche_test_impl.h b/test/common/quic/platform/quiche_test_impl.h index 34e5f3eeefd1e..8362180983c5a 100644 --- a/test/common/quic/platform/quiche_test_impl.h +++ b/test/common/quic/platform/quiche_test_impl.h @@ -6,6 +6,7 @@ // consumed or referenced directly by other Envoy code. It serves purely as a // porting layer for QUICHE. +#include "absl/strings/str_cat.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -16,5 +17,11 @@ using QuicheTest = ::testing::Test; template using QuicheTestWithParamImpl = ::testing::TestWithParam; +// NOLINTNEXTLINE(readability-identifier-naming) +inline std::string QuicheGetCommonSourcePathImpl() { + std::string test_srcdir(getenv("TEST_SRCDIR")); + return absl::StrCat(test_srcdir, "/external/com_googlesource_quiche/quiche/common"); +} + } // namespace test } // namespace quiche diff --git a/test/common/quic/test_utils.h b/test/common/quic/test_utils.h index ec7249ffd39e0..c5487c81538e0 100644 --- a/test/common/quic/test_utils.h +++ b/test/common/quic/test_utils.h @@ -11,7 +11,6 @@ #endif #include "quiche/quic/core/http/quic_spdy_session.h" -#include "quiche/quic/core/http/quic_spdy_client_session.h" #include "quiche/quic/test_tools/quic_test_utils.h" #include "quiche/quic/test_tools/first_flight.h" #include "quiche/quic/core/quic_utils.h" @@ -25,6 +24,7 @@ #endif #include "source/common/quic/envoy_quic_utils.h" +#include "source/common/quic/envoy_quic_client_session.h" #include "test/test_common/environment.h" namespace Envoy { @@ -66,6 +66,14 @@ class MockEnvoyQuicServerConnection : public EnvoyQuicServerConnection { MOCK_METHOD(void, dumpState, (std::ostream&, int), (const)); }; +class TestQuicCryptoStream : public quic::test::MockQuicCryptoStream { +public: + explicit TestQuicCryptoStream(quic::QuicSession* session) + : quic::test::MockQuicCryptoStream(session) {} + + bool encryption_established() const override { return true; } +}; + class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterManagerConnectionImpl { public: MockEnvoyQuicSession(const quic::QuicConfig& config, @@ -74,9 +82,8 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana uint32_t send_buffer_limit) : quic::QuicSpdySession(connection, /*visitor=*/nullptr, config, supported_versions), QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, - send_buffer_limit) { - crypto_stream_ = std::make_unique(this); - } + send_buffer_limit), + crypto_stream_(std::make_unique(this)) {} void Initialize() override { quic::QuicSpdySession::Initialize(); @@ -124,22 +131,47 @@ class MockEnvoyQuicSession : public quic::QuicSpdySession, public QuicFilterMana std::unique_ptr crypto_stream_; }; -class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, - public QuicFilterManagerConnectionImpl { +class TestQuicCryptoClientStream : public quic::QuicCryptoClientStream { +public: + TestQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, + std::unique_ptr verify_context, + quic::QuicCryptoClientConfig* crypto_config, + ProofHandler* proof_handler, bool has_application_state) + : quic::QuicCryptoClientStream(server_id, session, std::move(verify_context), crypto_config, + proof_handler, has_application_state) {} + + bool encryption_established() const override { return true; } +}; + +class TestQuicCryptoClientStreamFactory : public EnvoyQuicCryptoClientStreamFactoryInterface { +public: + std::unique_ptr + createEnvoyQuicCryptoClientStream(const quic::QuicServerId& server_id, quic::QuicSession* session, + std::unique_ptr verify_context, + quic::QuicCryptoClientConfig* crypto_config, + quic::QuicCryptoClientStream::ProofHandler* proof_handler, + bool has_application_state) override { + return std::make_unique(server_id, session, + std::move(verify_context), crypto_config, + proof_handler, has_application_state); + } +}; + +class MockEnvoyQuicClientSession : public EnvoyQuicClientSession { public: MockEnvoyQuicClientSession(const quic::QuicConfig& config, const quic::ParsedQuicVersionVector& supported_versions, - EnvoyQuicClientConnection* connection, Event::Dispatcher& dispatcher, - uint32_t send_buffer_limit) - : quic::QuicSpdyClientSession(config, supported_versions, connection, - quic::QuicServerId("example.com", 443, false), &crypto_config_, - nullptr), - QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, - send_buffer_limit), - crypto_config_(quic::test::crypto_test_utils::ProofVerifierForTesting()) {} + std::unique_ptr connection, + Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, + EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory) + : EnvoyQuicClientSession(config, supported_versions, std::move(connection), + quic::QuicServerId("example.com", 443, false), + std::make_shared( + quic::test::crypto_test_utils::ProofVerifierForTesting()), + nullptr, dispatcher, send_buffer_limit, crypto_stream_factory) {} void Initialize() override { - quic::QuicSpdyClientSession::Initialize(); + EnvoyQuicClientSession::Initialize(); initialized_ = true; } @@ -171,9 +203,6 @@ class MockEnvoyQuicClientSession : public quic::QuicSpdyClientSession, return initialized_ ? connection() : nullptr; } quic::QuicConnection* quicConnection() override { return initialized_ ? connection() : nullptr; } - -private: - quic::QuicCryptoClientConfig crypto_config_; }; Buffer::OwnedImpl @@ -253,11 +282,9 @@ std::string spdyHeaderToHttp3StreamPayload(const spdy::SpdyHeaderBlock& header) } std::string bodyToHttp3StreamPayload(const std::string& body) { - std::unique_ptr data_buffer; - quic::QuicByteCount data_frame_header_length = - quic::HttpEncoder::SerializeDataFrameHeader(body.length(), &data_buffer); - absl::string_view data_frame_header(data_buffer.get(), data_frame_header_length); - return absl::StrCat(data_frame_header, body); + quic::SimpleBufferAllocator allocator; + quic::QuicBuffer header = quic::HttpEncoder::SerializeDataFrameHeader(body.length(), &allocator); + return absl::StrCat(header.AsStringView(), body); } // A test suite with variation of ip version and a knob to turn on/off IETF QUIC implementation. diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index eb8abe377c482..3a38fea1fadba 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -74,7 +74,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers } bool use_http3 = GetParam().second == QuicVersionType::Iquic; SetQuicReloadableFlag(quic_disable_version_draft_29, !use_http3); - SetQuicReloadableFlag(quic_enable_version_rfcv1, use_http3); + SetQuicReloadableFlag(quic_disable_version_rfcv1, !use_http3); return quic::CurrentSupportedVersions(); }()), conn_helper_(*dispatcher_), alarm_factory_(*dispatcher_, *conn_helper_.GetClock()) {} From d483098022e72de39a23dc5f9ed65a044ae86b3a Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Tue, 20 Jul 2021 17:04:38 -0600 Subject: [PATCH 14/57] listener: allow TCP and UDP on the same port (#17414) Fixes https://github.com/envoyproxy/envoy/issues/15562 Signed-off-by: Matt Klein --- source/server/listener_impl.cc | 6 +++++ source/server/listener_impl.h | 3 +++ source/server/listener_manager_impl.cc | 19 ++++++++-------- source/server/listener_manager_impl.h | 4 ++-- test/server/listener_manager_impl_test.cc | 27 +++++++++++++++-------- 5 files changed, 38 insertions(+), 21 deletions(-) diff --git a/source/server/listener_impl.cc b/source/server/listener_impl.cc index a01633616315d..44feb19180a99 100644 --- a/source/server/listener_impl.cc +++ b/source/server/listener_impl.cc @@ -813,6 +813,12 @@ bool ListenerImpl::getReusePortOrDefault(Server::Instance& server, return initial_reuse_port_value; } +bool ListenerImpl::hasCompatibleAddress(const ListenerImpl& other) const { + return *address() == *other.address() && + Network::Utility::protobufAddressSocketType(config_.address()) == + Network::Utility::protobufAddressSocketType(other.config_.address()); +} + bool ListenerMessageUtil::filterChainOnlyChange(const envoy::config::listener::v3::Listener& lhs, const envoy::config::listener::v3::Listener& rhs) { Protobuf::util::MessageDifferencer differencer; diff --git a/source/server/listener_impl.h b/source/server/listener_impl.h index d2706136e60f2..dfc04ef5b5a3e 100644 --- a/source/server/listener_impl.h +++ b/source/server/listener_impl.h @@ -282,6 +282,9 @@ class ListenerImpl final : public Network::ListenerConfig, static bool getReusePortOrDefault(Server::Instance& server, const envoy::config::listener::v3::Listener& config); + // Check whether a new listener can share sockets with this listener. + bool hasCompatibleAddress(const ListenerImpl& other) const; + // Network::ListenerConfig Network::FilterChainManager& filterChainManager() override { return filter_chain_manager_; } Network::FilterChainFactory& filterChainFactory() override { return *this; } diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 6d837c407eb42..98814eea12918 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -448,7 +448,7 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( // In this case we can just replace inline. ASSERT(workers_started_); new_listener->debugLog("update warming listener"); - if (*(*existing_warming_listener)->address() != *new_listener->address()) { + if (!(*existing_warming_listener)->hasCompatibleAddress(*new_listener)) { setNewOrDrainingSocketFactory(name, config.address(), *new_listener); } else { new_listener->setSocketFactory((*existing_warming_listener)->getSocketFactory().clone()); @@ -457,7 +457,7 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( } else if (existing_active_listener != active_listeners_.end()) { // In this case we have no warming listener, so what we do depends on whether workers // have been started or not. - if (*(*existing_active_listener)->address() != *new_listener->address()) { + if (!(*existing_active_listener)->hasCompatibleAddress(*new_listener)) { setNewOrDrainingSocketFactory(name, config.address(), *new_listener); } else { new_listener->setSocketFactory((*existing_active_listener)->getSocketFactory().clone()); @@ -495,10 +495,10 @@ bool ListenerManagerImpl::addOrUpdateListenerInternal( return true; } -bool ListenerManagerImpl::hasListenerWithAddress(const ListenerList& list, - const Network::Address::Instance& address) { - for (const auto& listener : list) { - if (*listener->address() == address) { +bool ListenerManagerImpl::hasListenerWithCompatibleAddress(const ListenerList& list, + const ListenerImpl& listener) { + for (const auto& existing_listener : list) { + if (existing_listener->hasCompatibleAddress(listener)) { return true; } } @@ -962,8 +962,8 @@ void ListenerManagerImpl::setNewOrDrainingSocketFactory( // is an edge case and nothing will explicitly break, but there is no possibility that two // listeners that do not bind will ever be used. Only the first one will be used when searched for // by address. Thus we block it. - if (!listener.bindToPort() && (hasListenerWithAddress(warming_listeners_, *listener.address()) || - hasListenerWithAddress(active_listeners_, *listener.address()))) { + if (!listener.bindToPort() && (hasListenerWithCompatibleAddress(warming_listeners_, listener) || + hasListenerWithCompatibleAddress(active_listeners_, listener))) { const std::string message = fmt::format("error adding listener: '{}' has duplicate address '{}' as existing listener", name, listener.address()->asString()); @@ -980,8 +980,7 @@ void ListenerManagerImpl::setNewOrDrainingSocketFactory( draining_listeners_.cbegin(), draining_listeners_.cend(), [&listener](const DrainingListener& draining_listener) { return draining_listener.listener_->listenSocketFactory().getListenSocket(0)->isOpen() && - *listener.address() == - *draining_listener.listener_->listenSocketFactory().localAddress(); + listener.hasCompatibleAddress(*draining_listener.listener_); }); if (existing_draining_listener != draining_listeners_.cend()) { diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index 2484a23fe2efd..a98e8fbfe56c4 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -234,8 +234,8 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable(); - EXPECT_CALL(socket, duplicate()) - .WillOnce(Return(ByMove(std::unique_ptr(duplicated_socket)))); + ListenerHandle* old_listener_handle, + OptRef socket, + ListenerComponentFactory::BindType bind_type = default_bind_type) { + Network::MockListenSocket* new_socket; + if (socket.has_value()) { + new_socket = new NiceMock(); + EXPECT_CALL(socket.value().get(), duplicate()) + .WillOnce(Return(ByMove(std::unique_ptr(new_socket)))); + } else { + new_socket = listener_factory_.socket_.get(); + EXPECT_CALL(listener_factory_, createListenSocket(_, _, _, bind_type, 0)); + } EXPECT_CALL(*worker_, addListener(_, _, _)); EXPECT_CALL(*worker_, stopListener(_, _)); EXPECT_CALL(*old_listener_handle->drain_manager_, startDrainSequence(_)); @@ -140,7 +148,7 @@ class ListenerManagerImplForInPlaceFilterChainUpdateTest : public Event::Simulat EXPECT_CALL(*old_listener_handle, onDestroy()); worker_->callRemovalCompletion(); - return duplicated_socket; + return new_socket; } void expectRemove(const envoy::config::listener::v3::Listener& listener_proto, @@ -5105,6 +5113,8 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfWo EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); } +// This case also verifies that listeners that share port but do not share socket type (TCP vs. UDP) +// do not share a listener. TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAnyListenerIsNotTcp) { EXPECT_CALL(*worker_, start(_, _)); manager_->startWorkers(guard_dog_, callback_.AsStdFunction()); @@ -5120,10 +5130,9 @@ TEST_F(ListenerManagerImplForInPlaceFilterChainUpdateTest, TraditionalUpdateIfAn envoy::config::core::v3::SocketAddress_Protocol::SocketAddress_Protocol_UDP); ListenerHandle* listener_foo_update1 = expectListenerCreate(false, true); - auto duplicated_socket = - expectUpdateToThenDrain(new_listener_proto, listener_foo, *listener_factory_.socket_); - - expectRemove(new_listener_proto, listener_foo_update1, *duplicated_socket); + expectUpdateToThenDrain(new_listener_proto, listener_foo, OptRef(), + ListenerComponentFactory::BindType::ReusePort); + expectRemove(new_listener_proto, listener_foo_update1, *listener_factory_.socket_); EXPECT_EQ(0UL, manager_->listeners().size()); EXPECT_EQ(0, server_.stats_store_.counter("listener_manager.listener_in_place_updated").value()); From 47160ca33f693876025b6e4ac2fde76182988d60 Mon Sep 17 00:00:00 2001 From: ankatare Date: Wed, 21 Jul 2021 08:33:29 +0530 Subject: [PATCH 15/57] remove support of hidden_deprecated_use_alpha (#17169) Removing support of hidden_deprecated_use_alpha Risk Level: LOW Testing: unit (i.e. bazel test under bazel test test/extensions/filters/http/ext_authz/... ) Signed-off-by: Abhay Narayan Katare --- .../filters/http/ext_authz/config.cc | 11 ---- .../filters/http/ext_authz/config_test.cc | 50 ------------------- test/integration/hds_integration_test.cc | 4 +- test/test_common/utility.h | 15 ++---- 4 files changed, 7 insertions(+), 73 deletions(-) diff --git a/source/extensions/filters/http/ext_authz/config.cc b/source/extensions/filters/http/ext_authz/config.cc index dc3f385a525be..bcff25d3704e5 100644 --- a/source/extensions/filters/http/ext_authz/config.cc +++ b/source/extensions/filters/http/ext_authz/config.cc @@ -43,12 +43,6 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( } else if (proto_config.grpc_service().has_google_grpc()) { // Google gRPC client. - // The use_alpha field was there select the v2alpha api version, which is - // long deprecated and should not be used anymore. - if (proto_config.hidden_envoy_deprecated_use_alpha()) { - throw EnvoyException("The use_alpha field is deprecated and is no longer supported."); - } - const uint32_t timeout_ms = PROTOBUF_GET_MS_OR_DEFAULT(proto_config.grpc_service(), timeout, DefaultTimeout); @@ -64,11 +58,6 @@ Http::FilterFactoryCb ExtAuthzFilterConfig::createFilterFactoryFromProtoTyped( } else { // Envoy gRPC client. - // The use_alpha field was there select the v2alpha api version, which is - // long deprecated and should not be used anymore. - if (proto_config.hidden_envoy_deprecated_use_alpha()) { - throw EnvoyException("The use_alpha field is deprecated and is no longer supported."); - } Grpc::RawAsyncClientSharedPtr raw_client = context.clusterManager().grpcAsyncClientManager().getOrCreateRawAsyncClient( proto_config.grpc_service(), context.scope(), true, Grpc::CacheOption::AlwaysCache); diff --git a/test/extensions/filters/http/ext_authz/config_test.cc b/test/extensions/filters/http/ext_authz/config_test.cc index e8c967d75f1de..8e74e5e37e19f 100644 --- a/test/extensions/filters/http/ext_authz/config_test.cc +++ b/test/extensions/filters/http/ext_authz/config_test.cc @@ -131,56 +131,6 @@ TEST(HttpExtAuthzConfigTest, CorrectProtoHttp) { cb(filter_callback); } -// Test that setting the use_alpha proto field throws. -TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(UseAlphaFieldIsNoLongerSupported)) { - TestScopedRuntime scoped_runtime; - Runtime::LoaderSingleton::getExisting()->mergeValues( - {{"envoy.deprecated_features:envoy.extensions.filters.http.ext_authz.v3.ExtAuthz.hidden_" - "envoy_deprecated_use_alpha", - "true"}}); - - envoy::extensions::filters::http::ext_authz::v3::ExtAuthz proto_config; - proto_config.set_hidden_envoy_deprecated_use_alpha(true); - - // Trigger the throw in the Envoy gRPC branch. - { - testing::StrictMock context; - testing::StrictMock server_context; - EXPECT_CALL(context, getServerFactoryContext()) - .WillRepeatedly(testing::ReturnRef(server_context)); - EXPECT_CALL(context, messageValidationVisitor()); - EXPECT_CALL(context, runtime()); - EXPECT_CALL(context, scope()); - - ExtAuthzFilterConfig factory; - EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), - EnvoyException, - "The use_alpha field is deprecated and is no longer supported.") - } - - // Trigger the throw in the Google gRPC branch. - { - auto google_grpc = new envoy::config::core::v3::GrpcService_GoogleGrpc(); - google_grpc->set_stat_prefix("grpc"); - google_grpc->set_target_uri("http://example.com"); - proto_config.set_transport_api_version(envoy::config::core::v3::ApiVersion::V3); - proto_config.mutable_grpc_service()->set_allocated_google_grpc(google_grpc); - - testing::StrictMock context; - testing::StrictMock server_context; - EXPECT_CALL(context, getServerFactoryContext()) - .WillRepeatedly(testing::ReturnRef(server_context)); - EXPECT_CALL(context, messageValidationVisitor()); - EXPECT_CALL(context, runtime()); - EXPECT_CALL(context, scope()); - - ExtAuthzFilterConfig factory; - EXPECT_THROW_WITH_MESSAGE(factory.createFilterFactoryFromProto(proto_config, "stats", context), - EnvoyException, - "The use_alpha field is deprecated and is no longer supported.") - } -} - // Test that the deprecated extension name still functions. TEST(HttpExtAuthzConfigTest, DEPRECATED_FEATURE_TEST(DeprecatedExtensionFilterName)) { const std::string deprecated_name = "envoy.ext_authz"; diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index 0f137953f886a..15a3015807cab 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -280,7 +280,7 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", "StreamHealthCheck", apiVersion(), - /*use_alpha=*/false, serviceNamespace()), + serviceNamespace()), hds_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); } @@ -335,7 +335,7 @@ class HdsIntegrationTest : public Grpc::VersionedGrpcClientIntegrationParamTest, EXPECT_EQ("POST", hds_stream_->headers().getMethodValue()); EXPECT_EQ(TestUtility::getVersionedMethodPath("envoy.service.{1}.{0}.HealthDiscoveryService", "StreamHealthCheck", apiVersion(), - /*use_alpha=*/false, serviceNamespace()), + serviceNamespace()), hds_stream_->headers().getPathValue()); EXPECT_EQ("application/grpc", hds_stream_->headers().getContentTypeValue()); } diff --git a/test/test_common/utility.h b/test/test_common/utility.h index 2da5dee226feb..57c2162ce82ec 100644 --- a/test/test_common/utility.h +++ b/test/test_common/utility.h @@ -782,21 +782,18 @@ class TestUtility { * * @param service_full_name_template the service fully-qualified name template. * @param api_version version of a service. - * @param use_alpha if the alpha version is preferred. * @param service_namespace to override the service namespace. * @return std::string full path of a service method. */ static std::string getVersionedServiceFullName(const std::string& service_full_name_template, envoy::config::core::v3::ApiVersion api_version, - bool use_alpha = false, const std::string& service_namespace = EMPTY_STRING) { switch (api_version) { case envoy::config::core::v3::ApiVersion::AUTO: FALLTHRU; case envoy::config::core::v3::ApiVersion::V2: - return fmt::format(service_full_name_template, use_alpha ? "v2alpha" : "v2", - service_namespace); + return fmt::format(service_full_name_template, "v2", service_namespace); case envoy::config::core::v3::ApiVersion::V3: return fmt::format(service_full_name_template, "v3", service_namespace); @@ -811,19 +808,17 @@ class TestUtility { * @param service_full_name_template the service fully-qualified name template. * @param method_name the method name. * @param api_version version of a service method. - * @param use_alpha if the alpha version is preferred. * @param service_namespace to override the service namespace. * @return std::string full path of a service method. */ static std::string getVersionedMethodPath(const std::string& service_full_name_template, absl::string_view method_name, envoy::config::core::v3::ApiVersion api_version, - bool use_alpha = false, const std::string& service_namespace = EMPTY_STRING) { - return absl::StrCat("/", - getVersionedServiceFullName(service_full_name_template, api_version, - use_alpha, service_namespace), - "/", method_name); + return absl::StrCat( + "/", + getVersionedServiceFullName(service_full_name_template, api_version, service_namespace), + "/", method_name); } }; From aaac62b81dc4ef263763e7f86c363d4ddf75f8c3 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 20 Jul 2021 23:26:41 -0400 Subject: [PATCH 16/57] v2: removing v2 http filter protos (#17368) Risk Level: Low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- api/BUILD | 5 --- .../filters/http/compressor/v3/BUILD | 1 - .../filters/http/ext_authz/v3/BUILD | 1 - .../extensions/filters/http/gzip/v3/BUILD | 1 - .../filters/http/health_check/v3/BUILD | 1 - .../filters/http/ip_tagging/v3/BUILD | 1 - api/test/validate/BUILD | 31 ++++++++++--------- api/test/validate/pgv_test.cc | 29 ++++++++--------- generated_api_shadow/BUILD | 5 --- .../filters/http/compressor/v3/BUILD | 1 - .../filters/http/ext_authz/v3/BUILD | 2 -- .../filters/http/ext_authz/v3/ext_authz.proto | 11 +++---- .../extensions/filters/http/gzip/v3/BUILD | 2 -- .../filters/http/gzip/v3/gzip.proto | 18 +++-------- .../filters/http/health_check/v3/BUILD | 1 - .../filters/http/ip_tagging/v3/BUILD | 1 - test/common/config/BUILD | 1 - test/common/config/api_type_oracle_test.cc | 10 +----- .../http/filter_config_discovery_impl_test.cc | 5 +-- test/integration/version_integration_test.cc | 26 ---------------- tools/proto_format/proto_sync.py | 5 +++ 21 files changed, 49 insertions(+), 109 deletions(-) diff --git a/api/BUILD b/api/BUILD index 4b11cc147633a..a70eae799d797 100644 --- a/api/BUILD +++ b/api/BUILD @@ -22,11 +22,6 @@ proto_library( "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/fault/v2:pkg", - "//envoy/config/filter/http/compressor/v2:pkg", - "//envoy/config/filter/http/ext_authz/v2:pkg", - "//envoy/config/filter/http/gzip/v2:pkg", - "//envoy/config/filter/http/health_check/v2:pkg", - "//envoy/config/filter/http/ip_tagging/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", diff --git a/api/envoy/extensions/filters/http/compressor/v3/BUILD b/api/envoy/extensions/filters/http/compressor/v3/BUILD index cf5fc2a635de4..a1775bbe6f513 100644 --- a/api/envoy/extensions/filters/http/compressor/v3/BUILD +++ b/api/envoy/extensions/filters/http/compressor/v3/BUILD @@ -8,7 +8,6 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/compressor/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/BUILD b/api/envoy/extensions/filters/http/ext_authz/v3/BUILD index 9d4abbe84157f..bc2a58d2a7f1c 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/BUILD +++ b/api/envoy/extensions/filters/http/ext_authz/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/ext_authz/v2:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/extensions/filters/http/gzip/v3/BUILD b/api/envoy/extensions/filters/http/gzip/v3/BUILD index 419af66fc5b45..bfe5d198e6129 100644 --- a/api/envoy/extensions/filters/http/gzip/v3/BUILD +++ b/api/envoy/extensions/filters/http/gzip/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/http/gzip/v2:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/api/envoy/extensions/filters/http/health_check/v3/BUILD b/api/envoy/extensions/filters/http/health_check/v3/BUILD index 977b8f1e5f8da..c6ef74063aabe 100644 --- a/api/envoy/extensions/filters/http/health_check/v3/BUILD +++ b/api/envoy/extensions/filters/http/health_check/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/http/health_check/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/api/envoy/extensions/filters/http/ip_tagging/v3/BUILD b/api/envoy/extensions/filters/http/ip_tagging/v3/BUILD index 843190b939854..1c1a6f6b44235 100644 --- a/api/envoy/extensions/filters/http/ip_tagging/v3/BUILD +++ b/api/envoy/extensions/filters/http/ip_tagging/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/ip_tagging/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/api/test/validate/BUILD b/api/test/validate/BUILD index c9a7ba701f979..be7374cfc1fb0 100644 --- a/api/test/validate/BUILD +++ b/api/test/validate/BUILD @@ -12,20 +12,21 @@ api_cc_test( "@envoy_api//envoy/api/v2/route:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/accesslog/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/buffer/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/fault/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/gzip/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/header_to_metadata/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/health_check/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/lua/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/router/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/squash/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/http/transcoder/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/network/mongo_proxy/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/network/redis_proxy/v2:pkg_cc_proto", - "@envoy_api//envoy/config/filter/network/tcp_proxy/v2:pkg_cc_proto", - "@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto", + "@envoy_api//envoy/extensions/compression/gzip/decompressor/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/buffer/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/fault/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/gzip/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/header_to_metadata/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/health_check/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/lua/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/router/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/http/squash/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/mongo_proxy/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/filters/network/tcp_proxy/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/health_checkers/redis/v3:pkg_cc_proto", ], ) diff --git a/api/test/validate/pgv_test.cc b/api/test/validate/pgv_test.cc index b9b1bf058df9e..d89f99cbc31c2 100644 --- a/api/test/validate/pgv_test.cc +++ b/api/test/validate/pgv_test.cc @@ -9,22 +9,23 @@ #include "envoy/api/v2/listener.pb.validate.h" #include "envoy/api/v2/route.pb.validate.h" #include "envoy/api/v2/core/protocol.pb.validate.h" -#include "envoy/config/health_checker/redis/v2/redis.pb.validate.h" #include "envoy/config/filter/accesslog/v2/accesslog.pb.validate.h" -#include "envoy/config/filter/http/buffer/v2/buffer.pb.validate.h" -#include "envoy/config/filter/http/fault/v2/fault.pb.validate.h" -#include "envoy/config/filter/http/gzip/v2/gzip.pb.validate.h" -#include "envoy/config/filter/http/health_check/v2/health_check.pb.validate.h" -#include "envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.pb.validate.h" -#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h" -#include "envoy/config/filter/http/lua/v2/lua.pb.validate.h" -#include "envoy/config/filter/http/router/v2/router.pb.validate.h" -#include "envoy/config/filter/http/squash/v2/squash.pb.validate.h" -#include "envoy/config/filter/http/transcoder/v2/transcoder.pb.validate.h" #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h" -#include "envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.pb.validate.h" -#include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.validate.h" -#include "envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.validate.h" +#include "envoy/extensions/compression/gzip/decompressor/v3/gzip.pb.validate.h" +#include "envoy/extensions/filters/http/buffer/v3/buffer.pb.validate.h" +#include "envoy/extensions/filters/http/fault/v3/fault.pb.validate.h" +#include "envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.pb.validate.h" +#include "envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.pb.validate.h" +#include "envoy/extensions/filters/http/health_check/v3/health_check.pb.validate.h" +#include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.validate.h" +#include "envoy/extensions/filters/http/lua/v3/lua.pb.validate.h" +#include "envoy/extensions/filters/http/router/v3/router.pb.validate.h" +#include "envoy/extensions/filters/http/squash/v3/squash.pb.validate.h" +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.validate.h" +#include "envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.pb.validate.h" +#include "envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.pb.validate.h" +#include "envoy/extensions/health_checkers/redis/v3/redis.pb.validate.h" #include "envoy/api/v2/listener/listener.pb.validate.h" #include "envoy/api/v2/route/route.pb.validate.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" diff --git a/generated_api_shadow/BUILD b/generated_api_shadow/BUILD index 4b11cc147633a..a70eae799d797 100644 --- a/generated_api_shadow/BUILD +++ b/generated_api_shadow/BUILD @@ -22,11 +22,6 @@ proto_library( "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/fault/v2:pkg", - "//envoy/config/filter/http/compressor/v2:pkg", - "//envoy/config/filter/http/ext_authz/v2:pkg", - "//envoy/config/filter/http/gzip/v2:pkg", - "//envoy/config/filter/http/health_check/v2:pkg", - "//envoy/config/filter/http/ip_tagging/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD index cf5fc2a635de4..a1775bbe6f513 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/compressor/v3/BUILD @@ -8,7 +8,6 @@ api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/compressor/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD index 7f71770a47c6c..bc2a58d2a7f1c 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/BUILD @@ -6,9 +6,7 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/ext_authz/v2:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index a3e503f2f76fc..c04d53f1cf8be 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -10,7 +10,6 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -29,6 +28,10 @@ message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; + reserved 4; + + reserved "use_alpha"; + // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). @@ -140,12 +143,6 @@ message ExtAuthz { // :ref:`destination`. // The labels will be read from :ref:`metadata` with the specified key. string bootstrap_metadata_labels_key = 15; - - bool hidden_envoy_deprecated_use_alpha = 4 [ - deprecated = true, - (envoy.annotations.deprecated_at_minor_version) = "3.0", - (envoy.annotations.disallowed_by_default) = true - ]; } // Configuration for buffering the request data. diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD index f2b3cf9034a0a..bfe5d198e6129 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/BUILD @@ -6,8 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/annotations:pkg", - "//envoy/config/filter/http/gzip/v2:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], diff --git a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto index 8c3786f5f12af..a931ab78689ff 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/gzip/v3/gzip.proto @@ -6,7 +6,6 @@ import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; import "google/protobuf/wrappers.proto"; -import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; @@ -41,6 +40,11 @@ message Gzip { } } + reserved 2, 6, 7, 8; + + reserved "content_length", "content_type", "disable_on_etag_header", + "remove_accept_encoding_header"; + // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values // use more memory, but are faster and produce better compression results. The default value is 5. google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; @@ -74,16 +78,4 @@ message Gzip { // See https://www.zlib.net/manual.html for more details. Also see // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; - - google.protobuf.UInt32Value hidden_envoy_deprecated_content_length = 2 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - repeated string hidden_envoy_deprecated_content_type = 6 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - bool hidden_envoy_deprecated_disable_on_etag_header = 7 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; - - bool hidden_envoy_deprecated_remove_accept_encoding_header = 8 - [deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0"]; } diff --git a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD index 977b8f1e5f8da..c6ef74063aabe 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/health_check/v3/BUILD @@ -6,7 +6,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ - "//envoy/config/filter/http/health_check/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", diff --git a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD index 843190b939854..1c1a6f6b44235 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD +++ b/generated_api_shadow/envoy/extensions/filters/http/ip_tagging/v3/BUILD @@ -7,7 +7,6 @@ licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", - "//envoy/config/filter/http/ip_tagging/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) diff --git a/test/common/config/BUILD b/test/common/config/BUILD index f8cd436f49465..e710497bad9be 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -21,7 +21,6 @@ envoy_cc_test( srcs = ["api_type_oracle_test.cc"], deps = [ "//source/common/config:api_type_oracle_lib", - "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto", "@envoy_api//envoy/extensions/filters/http/ip_tagging/v3:pkg_cc_proto", ], ) diff --git a/test/common/config/api_type_oracle_test.cc b/test/common/config/api_type_oracle_test.cc index 97aa001ff5a1c..d4cda061cde5b 100644 --- a/test/common/config/api_type_oracle_test.cc +++ b/test/common/config/api_type_oracle_test.cc @@ -1,4 +1,3 @@ -#include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.h" #include "envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.pb.h" #include "source/common/config/api_type_oracle.h" @@ -12,19 +11,12 @@ namespace Config { namespace { TEST(ApiTypeOracleTest, All) { - envoy::config::filter::http::ip_tagging::v2::IPTagging v2_config; envoy::extensions::filters::http::ip_tagging::v3::IPTagging v3_config; ProtobufWkt::Any non_api_type; EXPECT_EQ(nullptr, ApiTypeOracle::getEarlierVersionDescriptor(non_api_type.GetDescriptor()->full_name())); - EXPECT_EQ(nullptr, - ApiTypeOracle::getEarlierVersionDescriptor(v2_config.GetDescriptor()->full_name())); - const auto* desc = - ApiTypeOracle::getEarlierVersionDescriptor(v3_config.GetDescriptor()->full_name()); - EXPECT_EQ(envoy::config::filter::http::ip_tagging::v2::IPTagging::descriptor()->full_name(), - desc->full_name()); - EXPECT_EQ(envoy::config::filter::http::ip_tagging::v2::IPTagging::descriptor()->full_name(), + EXPECT_NE(envoy::extensions::filters::http::ip_tagging::v3::IPTagging::descriptor()->full_name(), ApiTypeOracle::getEarlierVersionMessageTypeName(v3_config.GetDescriptor()->full_name()) .value()); } diff --git a/test/common/filter/http/filter_config_discovery_impl_test.cc b/test/common/filter/http/filter_config_discovery_impl_test.cc index 3446c4bc338fb..3fe8e87f0da84 100644 --- a/test/common/filter/http/filter_config_discovery_impl_test.cc +++ b/test/common/filter/http/filter_config_discovery_impl_test.cc @@ -362,7 +362,7 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { - "@type": type.googleapis.com/envoy.config.core.v3.TypedExtensionConfig name: foo typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck + "@type": type.googleapis.com/envoy.extensions.filters.http.health_check.v3.HealthCheck pass_through_mode: false )EOF"; const auto response = @@ -373,7 +373,8 @@ TEST_F(FilterConfigDiscoveryImplTest, DualProvidersInvalid) { EXPECT_THROW_WITH_MESSAGE( callbacks_->onConfigUpdate(decoded_resources.refvec_, response.version_info()), EnvoyException, - "Error: filter config has type URL envoy.config.filter.http.health_check.v2.HealthCheck but " + "Error: filter config has type URL envoy.extensions.filters.http.health_check.v3.HealthCheck " + "but " "expect envoy.extensions.filters.http.router.v3.Router."); EXPECT_EQ(0UL, scope_.counter("xds.extension_config_discovery.foo.config_reload").value()); } diff --git a/test/integration/version_integration_test.cc b/test/integration/version_integration_test.cc index 67b376a83e527..2f7a5aa2e5e00 100644 --- a/test/integration/version_integration_test.cc +++ b/test/integration/version_integration_test.cc @@ -39,20 +39,6 @@ TEST_P(VersionIntegrationTest, DEPRECATED_FEATURE_TEST(IpTaggingV2StaticStructCo initialize(); } -// envoy.filters.http.ip_tagging from v2 TypedStruct config. -TEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedStructConfig) { - config_helper_.enableDeprecatedV2Api(); - config_helper_.addFilter(absl::StrCat(R"EOF( -name: ip_tagging -typed_config: - "@type": type.googleapis.com/udpa.type.v1.TypedStruct - type_url: type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging - value: - )EOF", - ExampleIpTaggingConfig)); - initialize(); -} - // envoy.filters.http.ip_tagging from v3 TypedStruct config. TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedStructConfig) { config_helper_.addFilter(absl::StrCat(R"EOF( @@ -66,18 +52,6 @@ name: ip_tagging initialize(); } -// envoy.filters.http.ip_tagging from v2 typed Any config. -TEST_P(VersionIntegrationTest, IpTaggingV2StaticTypedConfig) { - config_helper_.enableDeprecatedV2Api(); - config_helper_.addFilter(absl::StrCat(R"EOF( - name: ip_tagging - typed_config: - "@type": type.googleapis.com/envoy.config.filter.http.ip_tagging.v2.IPTagging - )EOF", - ExampleIpTaggingConfig)); - initialize(); -} - // envoy.filters.http.ip_tagging from v3 typed Any config. TEST_P(VersionIntegrationTest, IpTaggingV3StaticTypedConfig) { config_helper_.addFilter(absl::StrCat(R"EOF( diff --git a/tools/proto_format/proto_sync.py b/tools/proto_format/proto_sync.py index 9dd232a4fae33..8a2c589772ae6 100755 --- a/tools/proto_format/proto_sync.py +++ b/tools/proto_format/proto_sync.py @@ -62,16 +62,21 @@ "envoy/config/filter/http/aws_request_signing/v2alpha", "envoy/config/filter/http/buffer/v2", "envoy/config/filter/http/cache/v2alpha", + "envoy/config/filter/http/compressor/v2", "envoy/config/filter/http/cors/v2", "envoy/config/filter/http/csrf/v2", "envoy/config/filter/http/dynamic_forward_proxy/v2alpha", "envoy/config/filter/http/dynamo/v2", + "envoy/config/filter/http/ext_authz/v2", "envoy/config/filter/http/fault/v2", "envoy/config/filter/http/grpc_http1_bridge/v2", "envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1", "envoy/config/filter/http/grpc_stats/v2alpha", "envoy/config/filter/http/grpc_web/v2", + "envoy/config/filter/http/gzip/v2", "envoy/config/filter/http/header_to_metadata/v2", + "envoy/config/filter/http/health_check/v2", + "envoy/config/filter/http/ip_tagging/v2", "envoy/config/filter/http/jwt_authn/v2alpha", "envoy/config/filter/http/lua/v2", "envoy/config/filter/http/on_demand/v2", From e3b14ba5358263ed50d581d12df854d9df183637 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Jul 2021 08:22:17 -0400 Subject: [PATCH 17/57] build(deps): bump sphinx from 4.1.0 to 4.1.1 in /tools/docs (#17365) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tools/docs/requirements.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/docs/requirements.txt b/tools/docs/requirements.txt index 4510f57eac092..d322bbb1e62a4 100644 --- a/tools/docs/requirements.txt +++ b/tools/docs/requirements.txt @@ -198,9 +198,9 @@ sphinx-tabs==3.1.0 \ --hash=sha256:63df94e84bc05eb8598419a313ffc24455a14d1a580d174bb748404063958a67 \ --hash=sha256:5eee2a869b1226e1f618f0c7ed267e5e3c24425565e6313cad80d00a7119694f # via -r tools/docs/requirements.txt -sphinx==4.1.0 \ - --hash=sha256:51028bb0d3340eb80bcc1a2d614e8308ac78d226e6b796943daf57920abc1aea \ - --hash=sha256:4219f14258ca5612a0c85ed9b7222d54da69724d7e9dd92d1819ad1bf65e1ad2 +sphinx==4.1.1 \ + --hash=sha256:3d513088236eef51e5b0adb78b0492eb22cc3b8ccdb0b36dd021173b365d4454 \ + --hash=sha256:23c846a1841af998cb736218539bb86d16f5eb95f5760b1966abcd2d584e62b8 # via # -r tools/docs/requirements.txt # sphinx-copybutton From 9c8b5dc659ef7130a67b4f16d38c4345705403a6 Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 21 Jul 2021 13:23:28 +0100 Subject: [PATCH 18/57] tooling: Misc log/proc improvements (#17420) Signed-off-by: Ryan Northey --- tools/base/checker.py | 24 +++++++++------ tools/base/runner.py | 6 ++-- tools/base/tests/test_checker.py | 53 +++++++++++++++++++++++++------- 3 files changed, 60 insertions(+), 23 deletions(-) diff --git a/tools/base/checker.py b/tools/base/checker.py index bcde297ba6f42..c7b03b7742611 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -31,6 +31,10 @@ def error_count(self) -> int: """Count of all errors found""" return sum(len(e) for e in self.errors.values()) + @property + def exiting(self): + return "exiting" in self.errors + @property def failed(self) -> dict: """Dictionary of errors per check""" @@ -70,8 +74,7 @@ def paths(self) -> list: def show_summary(self) -> bool: """Show a summary at the end or not""" return bool( - not "exiting" in self.errors - and (self.args.summary or self.error_count or self.warning_count)) + not self.exiting and (self.args.summary or self.error_count or self.warning_count)) @property def status(self) -> dict: @@ -175,7 +178,7 @@ def error(self, name: str, errors: list, log: bool = True, log_type: str = "erro getattr(self.log, log_type)(f"[{name}] {message}") return 1 - def exiting(self): + def exit(self): return self.error("exiting", ["Keyboard exit"], log_type="fatal") def get_checks(self) -> Sequence[str]: @@ -189,10 +192,10 @@ def on_check_begin(self, check: str) -> None: def on_check_run(self, check: str) -> None: """Callback hook called after each check run""" - if check in self.errors: + if self.exiting: + return + elif check in self.errors: self.log.error(f"[{check}] Check failed") - elif "exiting" in self.errors: - pass elif check in self.warnings: self.log.warning(f"[{check}] Check has warnings") else: @@ -218,7 +221,7 @@ def run(self) -> int: getattr(self, f"check_{check}")() self.on_check_run(check) except KeyboardInterrupt as e: - self.exiting() + self.exit() finally: result = self.on_checks_complete() return result @@ -316,7 +319,10 @@ async def _run(self) -> int: await getattr(self, f"check_{check}")() await self.on_check_run(check) finally: - result = await self.on_checks_complete() + if self.exiting: + result = 1 + else: + result = await self.on_checks_complete() return result def run(self) -> int: @@ -325,7 +331,7 @@ def run(self) -> int: except KeyboardInterrupt as e: # This needs to be outside the loop to catch the a keyboard interrupt # This means that a new loop has to be created to cleanup - result = self.exiting() + result = self.exit() result = asyncio.get_event_loop().run_until_complete(self.on_checks_complete()) return result diff --git a/tools/base/runner.py b/tools/base/runner.py index 300b9ca647a8b..5f4d18af6cf6a 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -23,13 +23,13 @@ LOG_LEVEL_STYLES = frozendict( critical=frozendict(bold=True, color="red"), debug=frozendict(color="green"), - error=frozendict(color="red"), - info=frozendict(), + error=frozendict(color="red", bold=True), + info=frozendict(color="white", bold=True), notice=frozendict(color="magenta", bold=True), spam=frozendict(color="green", faint=True), success=frozendict(bold=True, color="green"), verbose=frozendict(color="blue"), - warning=frozendict(color="yellow")) + warning=frozendict(color="yellow", bold=True)) def catches(errors: Union[Tuple[Exception], Exception]) -> Callable: diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 9afee0a858f01..46fa8a152ca34 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -61,6 +61,16 @@ def test_checker_diff(): assert "diff" not in checker.__dict__ +@pytest.mark.parametrize( + "errors", + [{}, dict(exiting="EEK"), dict(notexiting="OK")]) +def test_checker_exiting(errors): + checker = Checker("path1", "path2", "path3") + checker.errors = errors + assert checker.exiting == bool("exiting" in errors) + assert "exiting" not in checker.__dict__ + + def test_checker_error_count(): checker = Checker("path1", "path2", "path3") checker.errors = dict(foo=["err"] * 3, bar=["err"] * 5, baz=["err"] * 7) @@ -168,21 +178,26 @@ def test_checker_paths(patches, paths): @pytest.mark.parametrize("summary", [True, False]) @pytest.mark.parametrize("error_count", [0, 1]) @pytest.mark.parametrize("warning_count", [0, 1]) -def test_checker_show_summary(patches, summary, error_count, warning_count): +@pytest.mark.parametrize("exiting", [True, False]) +def test_checker_show_summary(patches, summary, error_count, warning_count, exiting): checker = Checker("path1", "path2", "path3") patched = patches( ("Checker.args", dict(new_callable=PropertyMock)), + ("Checker.exiting", dict(new_callable=PropertyMock)), ("Checker.error_count", dict(new_callable=PropertyMock)), ("Checker.warning_count", dict(new_callable=PropertyMock)), prefix="tools.base.checker") - with patched as (m_args, m_errors, m_warnings): + with patched as (m_args, m_exit, m_errors, m_warnings): m_args.return_value.summary = summary m_errors.return_value = error_count m_warnings.return_value = warning_count + m_exit.return_value = exiting result = checker.show_summary - if summary or error_count or warning_count: + if exiting: + assert result is False + elif summary or error_count or warning_count: assert result is True else: assert result is False @@ -362,14 +377,14 @@ def test_checker_error(log, log_type, errors): assert not getattr(m_log.return_value, log_type or "error").called -def test_checker_exiting(patches): +def test_checker_exit(patches): checker = Checker("path1", "path2", "path3") patched = patches( "Checker.error", prefix="tools.base.checker") with patched as (m_error, ): - assert checker.exiting() == m_error.return_value + assert checker.exit() == m_error.return_value assert ( list(m_error.call_args) @@ -419,9 +434,11 @@ def test_checker_on_check_begin(patches): @pytest.mark.parametrize("errors", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) @pytest.mark.parametrize("warnings", [[], ["CHECK1", "CHECK2", "CHECK3"], ["CHECK2", "CHECK3"]]) -def test_checker_on_check_run(patches, errors, warnings): +@pytest.mark.parametrize("exiting", [True, False]) +def test_checker_on_check_run(patches, errors, warnings, exiting): checker = Checker("path1", "path2", "path3") patched = patches( + ("Checker.exiting", dict(new_callable=PropertyMock)), ("Checker.log", dict(new_callable=PropertyMock)), prefix="tools.base.checker") @@ -429,9 +446,14 @@ def test_checker_on_check_run(patches, errors, warnings): checker.errors = errors checker.warnings = warnings - with patched as (m_log, ): + with patched as (m_exit, m_log): + m_exit.return_value = exiting assert not checker.on_check_run(check) + if exiting: + assert not m_log.called + return + if check in errors: assert ( list(m_log.return_value.error.call_args) @@ -487,7 +509,7 @@ def test_checker_on_checks_complete(patches, failed, show_summary): def test_checker_run(patches, raises): checker = DummyCheckerWithChecks("path1", "path2", "path3") patched = patches( - "Checker.exiting", + "Checker.exit", "Checker.get_checks", "Checker.on_check_begin", "Checker.on_check_run", @@ -789,7 +811,7 @@ def test_asynchecker_run(patches, raises): patched = patches( "asyncio", - "Checker.exiting", + "Checker.exit", ("AsyncChecker._run", dict(new_callable=MagicMock)), ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), prefix="tools.base.checker") @@ -902,7 +924,8 @@ async def test_asynchecker_on_checks_complete(patches): @pytest.mark.asyncio @pytest.mark.parametrize("raises", [True, False]) -async def test_asynchecker__run(patches, raises): +@pytest.mark.parametrize("exiting", [True, False]) +async def test_asynchecker__run(patches, raises, exiting): _check1 = MagicMock() _check2 = MagicMock() _check3 = MagicMock() @@ -930,21 +953,29 @@ class SomeError(Exception): "AsyncChecker.on_check_begin", "AsyncChecker.on_check_run", "AsyncChecker.on_checks_complete", + ("AsyncChecker.exiting", dict(new_callable=PropertyMock)), prefix="tools.base.checker") - with patched as (m_log, m_checks, m_begin, m_check, m_run, m_complete): + with patched as (m_log, m_checks, m_begin, m_check, m_run, m_complete, m_exit): m_checks.return_value = ["check1", "check2", "check3"] + m_exit.return_value = exiting if raises: m_begin.side_effect = SomeError("AN ERROR OCCURRED") with pytest.raises(SomeError): await checker._run() + elif exiting: + assert await checker._run() == 1 else: assert await checker._run() == m_complete.return_value assert ( list(m_begin.call_args) == [(), {}]) + + if exiting: + return + assert ( list(m_complete.call_args) == [(), {}]) From 18e9fad76c8732e63d5b6613a2c19dbf1d45a9f3 Mon Sep 17 00:00:00 2001 From: Jose Ulises Nino Rivera Date: Wed, 21 Jul 2021 05:26:57 -0700 Subject: [PATCH 19/57] apple dns: add more details on DNS failure (#17423) Risk Level: low, new telemetry Testing: added tests Signed-off-by: Jose Nino --- .../arch_overview/upstream/dns_resolution.rst | 5 +- source/common/network/apple_dns_impl.cc | 22 ++- source/common/network/apple_dns_impl.h | 7 +- test/common/network/apple_dns_impl_test.cc | 128 ++++++++++++------ 4 files changed, 119 insertions(+), 43 deletions(-) diff --git a/docs/root/intro/arch_overview/upstream/dns_resolution.rst b/docs/root/intro/arch_overview/upstream/dns_resolution.rst index b48d24071b07b..878f44f1a9f6e 100644 --- a/docs/root/intro/arch_overview/upstream/dns_resolution.rst +++ b/docs/root/intro/arch_overview/upstream/dns_resolution.rst @@ -20,5 +20,8 @@ The Apple-based DNS Resolver emits the following stats rooted in the ``dns.apple :widths: 1, 1, 2 connection_failure, Counter, Number of failed attempts to connect to the DNS server - socket_failure, Counter, Number of failed attempts to obtain a file descriptor to the socket to the DNS server + get_addr_failure, Counter, Number of general failures when calling GetAddrInfo API + network_failure, Counter, Number of failures due to network connectivity processing_failure, Counter, Number of failures when processing data from the DNS server + socket_failure, Counter, Number of failed attempts to obtain a file descriptor to the socket to the DNS server + timeout, Counter, Number of queries that resulted in a timeout diff --git a/source/common/network/apple_dns_impl.cc b/source/common/network/apple_dns_impl.cc index 4d363ddc0bdf6..bd0a790c609d1 100644 --- a/source/common/network/apple_dns_impl.cc +++ b/source/common/network/apple_dns_impl.cc @@ -78,6 +78,7 @@ ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, DNSServiceErrorType error = pending_resolution->dnsServiceGetAddrInfo(dns_lookup_family); if (error != kDNSServiceErr_NoError) { ENVOY_LOG(warn, "DNS resolver error ({}) in dnsServiceGetAddrInfo for {}", error, dns_name); + chargeGetAddrInfoErrorStats(error); return nullptr; } @@ -105,6 +106,23 @@ ActiveDnsQuery* AppleDnsResolverImpl::resolve(const std::string& dns_name, return nullptr; } +void AppleDnsResolverImpl::chargeGetAddrInfoErrorStats(DNSServiceErrorType error_code) { + switch (error_code) { + case kDNSServiceErr_DefunctConnection: + stats_.connection_failure_.inc(); + break; + case kDNSServiceErr_NoRouter: + stats_.network_failure_.inc(); + break; + case kDNSServiceErr_Timeout: + stats_.timeout_.inc(); + break; + default: + stats_.get_addr_failure_.inc(); + break; + } +} + AppleDnsResolverImpl::PendingResolution::PendingResolution(AppleDnsResolverImpl& parent, ResolveCb callback, Event::Dispatcher& dispatcher, @@ -222,9 +240,7 @@ void AppleDnsResolverImpl::PendingResolution::onDNSServiceGetAddrInfoReply( // Generic error handling. if (error_code != kDNSServiceErr_NoError) { - // TODO(junr03): consider creating stats for known error types (timeout, refused connection, - // etc.). Currently a bit challenging because there is no scope access wired through. Current - // query gets a failure status + parent_.chargeGetAddrInfoErrorStats(error_code); pending_cb_.status_ = ResolutionStatus::Failure; pending_cb_.responses_.clear(); diff --git a/source/common/network/apple_dns_impl.h b/source/common/network/apple_dns_impl.h index 81647737f3741..d788f076586d6 100644 --- a/source/common/network/apple_dns_impl.h +++ b/source/common/network/apple_dns_impl.h @@ -44,8 +44,11 @@ using DnsServiceSingleton = ThreadSafeSingleton; */ #define ALL_APPLE_DNS_RESOLVER_STATS(COUNTER) \ COUNTER(connection_failure) \ + COUNTER(get_addr_failure) \ + COUNTER(network_failure) \ + COUNTER(processing_failure) \ COUNTER(socket_failure) \ - COUNTER(processing_failure) + COUNTER(timeout) /** * Struct definition for all DNS resolver stats. @see stats_macros.h @@ -69,6 +72,8 @@ class AppleDnsResolverImpl : public DnsResolver, protected Logger::Loggable(dispatcher_, stats_store_); } + void checkErrorStat(DNSServiceErrorType error_code) { + switch (error_code) { + case kDNSServiceErr_DefunctConnection: + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "dns.apple.connection_failure")->value()); + break; + case kDNSServiceErr_NoRouter: + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "dns.apple.network_failure")->value()); + break; + case kDNSServiceErr_Timeout: + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "dns.apple.timeout")->value()); + break; + default: + EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "dns.apple.get_addr_failure")->value()); + break; + } + } + + void synchronousWithError(DNSServiceErrorType error_code) { + EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, _, _, _, _, _, _)) + .WillOnce(Return(error_code)); + + EXPECT_EQ(nullptr, resolver_->resolve( + "foo.com", Network::DnsLookupFamily::Auto, + [](DnsResolver::ResolutionStatus, std::list&&) -> void { + // This callback should never be executed. + FAIL(); + })); + + checkErrorStat(error_code); + } + + void completeWithError(DNSServiceErrorType error_code) { + const std::string hostname = "foo.com"; + sockaddr_in addr4; + addr4.sin_family = AF_INET; + EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &addr4.sin_addr)); + addr4.sin_port = htons(6502); + + Network::Address::Ipv4Instance address(&addr4); + absl::Notification dns_callback_executed; + + EXPECT_CALL(dns_service_, + dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, + kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, + StrEq(hostname.c_str()), _, _)) + .WillOnce(DoAll( + // Have the API call synchronously call the provided callback. + WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { + callback(nullptr, 0, 0, error_code, hostname.c_str(), nullptr, 30, context); + })), + Return(kDNSServiceErr_NoError))); + + // The returned value is nullptr because the query has already been fulfilled. Verify that the + // callback ran via notification. + EXPECT_EQ(nullptr, resolver_->resolve( + hostname, Network::DnsLookupFamily::Auto, + [&dns_callback_executed](DnsResolver::ResolutionStatus status, + std::list&& responses) -> void { + EXPECT_EQ(DnsResolver::ResolutionStatus::Failure, status); + EXPECT_TRUE(responses.empty()); + dns_callback_executed.Notify(); + })); + dns_callback_executed.WaitForNotification(); + checkErrorStat(error_code); + } + protected: MockDnsService dns_service_; TestThreadsafeSingletonInjector dns_service_injector_{&dns_service_}; @@ -374,16 +440,20 @@ TEST_F(AppleDnsImplFakeApiTest, ErrorInProcessResult) { EXPECT_EQ(1, TestUtility::findCounter(stats_store_, "dns.apple.processing_failure")->value()); } -TEST_F(AppleDnsImplFakeApiTest, SynchronousErrorInGetAddrInfo) { - EXPECT_CALL(dns_service_, dnsServiceGetAddrInfo(_, _, _, _, _, _, _)) - .WillOnce(Return(kDNSServiceErr_Unknown)); +TEST_F(AppleDnsImplFakeApiTest, SynchronousGeneralErrorInGetAddrInfo) { + synchronousWithError(kDNSServiceErr_Unknown); +} - EXPECT_EQ(nullptr, - resolver_->resolve("foo.com", Network::DnsLookupFamily::Auto, - [](DnsResolver::ResolutionStatus, std::list&&) -> void { - // This callback should never be executed. - FAIL(); - })); +TEST_F(AppleDnsImplFakeApiTest, SynchronousNetworkErrorInGetAddrInfo) { + synchronousWithError(kDNSServiceErr_NoRouter); +} + +TEST_F(AppleDnsImplFakeApiTest, SynchronousConnectionErrorInGetAddrInfo) { + synchronousWithError(kDNSServiceErr_DefunctConnection); +} + +TEST_F(AppleDnsImplFakeApiTest, SynchronousTimeoutInGetAddrInfo) { + synchronousWithError(kDNSServiceErr_Timeout); } TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletion) { @@ -423,38 +493,20 @@ TEST_F(AppleDnsImplFakeApiTest, QuerySynchronousCompletion) { dns_callback_executed.WaitForNotification(); } -TEST_F(AppleDnsImplFakeApiTest, QueryCompletedWithError) { - const std::string hostname = "foo.com"; - sockaddr_in addr4; - addr4.sin_family = AF_INET; - EXPECT_EQ(1, inet_pton(AF_INET, "1.2.3.4", &addr4.sin_addr)); - addr4.sin_port = htons(6502); +TEST_F(AppleDnsImplFakeApiTest, QueryCompletedWithGeneralError) { + completeWithError(kDNSServiceErr_Unknown); +} - Network::Address::Ipv4Instance address(&addr4); - absl::Notification dns_callback_executed; +TEST_F(AppleDnsImplFakeApiTest, QueryCompletedWithNetworkError) { + completeWithError(kDNSServiceErr_NoRouter); +} - EXPECT_CALL(dns_service_, - dnsServiceGetAddrInfo(_, kDNSServiceFlagsTimeout, 0, - kDNSServiceProtocol_IPv4 | kDNSServiceProtocol_IPv6, - StrEq(hostname.c_str()), _, _)) - .WillOnce(DoAll( - // Have the API call synchronously call the provided callback. - WithArgs<5, 6>(Invoke([&](DNSServiceGetAddrInfoReply callback, void* context) -> void { - callback(nullptr, 0, 0, kDNSServiceErr_Unknown, hostname.c_str(), nullptr, 30, context); - })), - Return(kDNSServiceErr_NoError))); +TEST_F(AppleDnsImplFakeApiTest, QueryCompletedWithConnectionError) { + completeWithError(kDNSServiceErr_DefunctConnection); +} - // The returned value is nullptr because the query has already been fulfilled. Verify that the - // callback ran via notification. - EXPECT_EQ(nullptr, resolver_->resolve( - hostname, Network::DnsLookupFamily::Auto, - [&dns_callback_executed](DnsResolver::ResolutionStatus status, - std::list&& responses) -> void { - EXPECT_EQ(DnsResolver::ResolutionStatus::Failure, status); - EXPECT_TRUE(responses.empty()); - dns_callback_executed.Notify(); - })); - dns_callback_executed.WaitForNotification(); +TEST_F(AppleDnsImplFakeApiTest, QueryCompletedWithTimeout) { + completeWithError(kDNSServiceErr_Timeout); } TEST_F(AppleDnsImplFakeApiTest, MultipleAddresses) { From eb157b7eb88658aa67010eea13b7eb57538257aa Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Wed, 21 Jul 2021 06:43:16 -0700 Subject: [PATCH 20/57] bazel: disable -Wrange-loop-analysis on macOS (#17428) The version of clang that ships with Xcode 12 has false positives with this warning that might be fixed by https://reviews.llvm.org/D73007 In the meantime we can disable it entirely as discussed on https://github.com/envoyproxy/envoy/pull/17393 Signed-off-by: Keith Smiley --- bazel/envoy_internal.bzl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bazel/envoy_internal.bzl b/bazel/envoy_internal.bzl index f8dd4eba57651..6f9c9d83e30b3 100644 --- a/bazel/envoy_internal.bzl +++ b/bazel/envoy_internal.bzl @@ -76,6 +76,10 @@ def envoy_copts(repository, test = False): repository + "//bazel:windows_x86_64": ["-wd4834", "-Zc:preprocessor", "-Wv:19.4"] if test else ["-Zc:preprocessor", "-Wv:19.4"], repository + "//bazel:clang_cl_build": ["-Wno-unused-result"] if test else [], "//conditions:default": [], + }) + select({ + # TODO: Remove once https://reviews.llvm.org/D73007 is in the lowest supported Xcode version + repository + "//bazel:apple": ["-Wno-range-loop-analysis"], + "//conditions:default": [], }) + select({ repository + "//bazel:no_debug_info": ["-g0"], "//conditions:default": [], From ecc17c72929edf71e986292860c2d6d3562e405b Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 21 Jul 2021 09:45:07 -0400 Subject: [PATCH 21/57] test: ratcheting coverage (#17425) Signed-off-by: Alyssa Wilk --- test/per_file_coverage.sh | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 98cf57972dba4..967c1f81a94ef 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -3,7 +3,7 @@ # directory:coverage_percent # for existing directories with low coverage. declare -a KNOWN_LOW_COVERAGE=( -"source/common:96.3" # Raise when QUIC coverage goes up +"source/common:96.5" # Raise when QUIC coverage goes up "source/common/api:75.3" "source/common/api/posix:73.9" "source/common/common:96.3" @@ -17,9 +17,9 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/signal:84.5" # Death tests don't report LCOV "source/common/singleton:95.8" "source/common/thread:0.0" # Death tests don't report LCOV -"source/common/matcher:93.3" +"source/common/matcher:95.0" "source/common/quic:90.6" -"source/common/tracing:95.7" +"source/common/tracing:96.1" "source/common/watchdog:42.9" # Death tests don't report LCOV "source/exe:94.3" "source/extensions/common/crypto:91.5" @@ -27,7 +27,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/common/wasm:95.3" # flaky: be careful adjusting "source/extensions/common/wasm/null:77.8" "source/extensions/common/wasm/v8:85.4" -"source/extensions/common:96.5" "source/extensions/filters/common/expr:96.4" "source/extensions/filters/common/fault:94.6" "source/extensions/filters/common/rbac:87.5" @@ -40,25 +39,22 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/filters/network/common:96.2" "source/extensions/filters/network/common/redis:96.3" "source/extensions/filters/network/dubbo_proxy:96.2" -"source/extensions/filters/network/dubbo_proxy/router:95.3" "source/extensions/filters/network/mongo_proxy:94.0" "source/extensions/filters/network/sni_cluster:90.3" "source/extensions/filters/network/sni_dynamic_forward_proxy:90.9" "source/extensions/health_checkers:95.9" "source/extensions/health_checkers/redis:95.9" "source/extensions/quic_listeners:85.1" -"source/extensions/stat_sinks/common:96.5" "source/extensions/stat_sinks/common/statsd:96.5" -"source/extensions/stat_sinks/graphite_statsd:85.2" +"source/extensions/stat_sinks/graphite_statsd:85.7" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.4" -"source/extensions/tracers/opencensus:91.6" +"source/extensions/tracers:96.5" +"source/extensions/tracers/opencensus:92.5" "source/extensions/tracers/xray:94.0" "source/extensions/transport_sockets:95.7" "source/extensions/transport_sockets/tls/cert_validator:96.5" "source/extensions/transport_sockets/tls/private_key:76.9" "source/extensions/transport_sockets/tls:95.1" -"source/extensions/wasm_runtime:40.0" "source/extensions/wasm_runtime/wamr:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wasmtime:0.0" # Not enabled in coverage build "source/extensions/wasm_runtime/wavm:0.0" # Not enabled in coverage build @@ -66,7 +62,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/watchdog/profile_action:85.7" "source/server:94.4" # flaky: be careful adjusting. See https://github.com/envoyproxy/envoy/issues/15239 "source/server/admin:95.8" -"source/server/config_validation:78.3" +"source/server/config_validation:78.7" ) [[ -z "${SRCDIR}" ]] && SRCDIR="${PWD}" From 5c08797a2f274ac696c29ef5731848a7b8887494 Mon Sep 17 00:00:00 2001 From: Sunil Narasimhamurthy <13044744+suniltheta@users.noreply.github.com> Date: Wed, 21 Jul 2021 07:56:39 -0700 Subject: [PATCH 22/57] xray: aws xray tracer span data bug fix (#17171) Update xray segment data to contain error, fault & throttle tag in the "Errors and exceptions" section as per the xray documentation at https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html Signed-off-by: Sunil Narasimhamurthy <13044744+suniltheta@users.noreply.github.com> --- docs/root/version_history/current.rst | 2 + source/extensions/tracers/xray/BUILD | 1 + source/extensions/tracers/xray/daemon.proto | 20 ++ source/extensions/tracers/xray/tracer.cc | 38 +-- source/extensions/tracers/xray/tracer.h | 42 ++- test/extensions/tracers/xray/tracer_test.cc | 359 ++++++++++++++++---- 6 files changed, 371 insertions(+), 91 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index ef1983f2996d7..2a318236bbdf1 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -29,6 +29,8 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. + Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` diff --git a/source/extensions/tracers/xray/BUILD b/source/extensions/tracers/xray/BUILD index 4944d38060b91..b3044a98df675 100644 --- a/source/extensions/tracers/xray/BUILD +++ b/source/extensions/tracers/xray/BUILD @@ -45,6 +45,7 @@ envoy_cc_library( "//source/common/common:hex_lib", "//source/common/common:macros", "//source/common/common:random_generator_lib", + "//source/common/http:codes_lib", "//source/common/http:header_map_lib", "//source/common/http:headers_lib", "//source/common/json:json_loader_lib", diff --git a/source/extensions/tracers/xray/daemon.proto b/source/extensions/tracers/xray/daemon.proto index c1ae2ef1782b4..2054bced7630d 100644 --- a/source/extensions/tracers/xray/daemon.proto +++ b/source/extensions/tracers/xray/daemon.proto @@ -9,19 +9,39 @@ import "google/protobuf/struct.proto"; // see https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html message Segment { + // The logical name of the service that handled the request. Configured via + // `segment_name` in XRayConfig. string name = 1 [(validate.rules).string = {min_len: 1}]; + // A 64-bit identifier for the segment, unique among segments in the same trace. string id = 2 [(validate.rules).string = {min_len: 1}]; + // A unique identifier that connects all segments and sub-segments originating + // from a single client request. string trace_id = 3 [(validate.rules).string = {len: 35}]; + // The time the segment was created. double start_time = 4 [(validate.rules).double = {gt: 0}]; + // The time the segment was closed. double end_time = 5 [(validate.rules).double = {gt: 0}]; + // The type of the resource originating the segment. Configured via `origin` field in + // `XRayConfig.SegmentFields`. e.g. “AWS::AppMesh::Proxy”. string origin = 9; + // Segment ID of the sub-segment's parent segment. string parent_id = 6; + // AWS resource metadata dictionary. Configured via `aws` field in + // `XRayConfig.SegmentFields`. google.protobuf.Struct aws = 10; + // To indicate that a client error occurred. + bool error = 11; + // To indicate that a server error occurred. + bool fault = 12; + // To indicate that a request was throttled. + bool throttle = 13; + // Data block to record details about the HTTP request. http_annotations http = 7; message http_annotations { google.protobuf.Struct request = 1; google.protobuf.Struct response = 2; } + // Object containing one or more fields that X-Ray indexes for use with filter expressions. map annotations = 8; } diff --git a/source/extensions/tracers/xray/tracer.cc b/source/extensions/tracers/xray/tracer.cc index bec05c4fa1018..6c033ef31b3bc 100644 --- a/source/extensions/tracers/xray/tracer.cc +++ b/source/extensions/tracers/xray/tracer.cc @@ -71,6 +71,9 @@ void Span::finishSpan() { time_point_cast(time_source_.systemTime()).time_since_epoch().count()); s.set_origin(origin()); s.set_parent_id(parentId()); + s.set_error(clientError()); + s.set_fault(serverError()); + s.set_throttle(isThrottled()); auto* aws = s.mutable_aws()->mutable_fields(); for (const auto& field : aws_metadata_) { @@ -161,48 +164,45 @@ void Span::setTag(absl::string_view name, absl::string_view value) { // https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html#api-segmentdocuments-http constexpr auto SpanContentLength = "content_length"; constexpr auto SpanMethod = "method"; - constexpr auto SpanStatus = "status"; - constexpr auto SpanUserAgent = "user_agent"; constexpr auto SpanUrl = "url"; constexpr auto SpanClientIp = "client_ip"; constexpr auto SpanXForwardedFor = "x_forwarded_for"; - constexpr auto HttpUrl = "http.url"; - constexpr auto HttpMethod = "http.method"; - constexpr auto HttpStatusCode = "http.status_code"; - constexpr auto HttpUserAgent = "user_agent"; - constexpr auto HttpResponseSize = "response_size"; - constexpr auto PeerAddress = "peer.address"; - if (name.empty() || value.empty()) { return; } - if (name == HttpUrl) { + if (name == Tracing::Tags::get().HttpUrl) { http_request_annotations_.emplace(SpanUrl, ValueUtil::stringValue(std::string(value))); - } else if (name == HttpMethod) { + } else if (name == Tracing::Tags::get().HttpMethod) { http_request_annotations_.emplace(SpanMethod, ValueUtil::stringValue(std::string(value))); - } else if (name == HttpUserAgent) { - http_request_annotations_.emplace(SpanUserAgent, ValueUtil::stringValue(std::string(value))); - } else if (name == HttpStatusCode) { + } else if (name == Tracing::Tags::get().UserAgent) { + http_request_annotations_.emplace(Tracing::Tags::get().UserAgent, + ValueUtil::stringValue(std::string(value))); + } else if (name == Tracing::Tags::get().HttpStatusCode) { uint64_t status_code; if (!absl::SimpleAtoi(value, &status_code)) { - ENVOY_LOG(debug, "{} must be a number, given: {}", HttpStatusCode, value); + ENVOY_LOG(debug, "{} must be a number, given: {}", Tracing::Tags::get().HttpStatusCode, + value); return; } - http_response_annotations_.emplace(SpanStatus, ValueUtil::numberValue(status_code)); - } else if (name == HttpResponseSize) { + setResponseStatusCode(status_code); + http_response_annotations_.emplace(Tracing::Tags::get().Status, + ValueUtil::numberValue(status_code)); + } else if (name == Tracing::Tags::get().ResponseSize) { uint64_t response_size; if (!absl::SimpleAtoi(value, &response_size)) { - ENVOY_LOG(debug, "{} must be a number, given: {}", HttpResponseSize, value); + ENVOY_LOG(debug, "{} must be a number, given: {}", Tracing::Tags::get().ResponseSize, value); return; } http_response_annotations_.emplace(SpanContentLength, ValueUtil::numberValue(response_size)); - } else if (name == PeerAddress) { + } else if (name == Tracing::Tags::get().PeerAddress) { http_request_annotations_.emplace(SpanClientIp, ValueUtil::stringValue(std::string(value))); // In this case, PeerAddress refers to the client's actual IP address, not // the address specified in the HTTP X-Forwarded-For header. http_request_annotations_.emplace(SpanXForwardedFor, ValueUtil::boolValue(false)); + } else if (name == Tracing::Tags::get().Error && value == Tracing::Tags::get().True) { + setServerError(); } else { custom_annotations_.emplace(name, value); } diff --git a/source/extensions/tracers/xray/tracer.h b/source/extensions/tracers/xray/tracer.h index bd3c772be3160..623eddd88638a 100644 --- a/source/extensions/tracers/xray/tracer.h +++ b/source/extensions/tracers/xray/tracer.h @@ -10,7 +10,9 @@ #include "source/common/common/empty_string.h" #include "source/common/common/hex.h" #include "source/common/common/random_generator.h" +#include "source/common/http/codes.h" #include "source/common/protobuf/utility.h" +#include "source/common/tracing/common_values.h" #include "source/extensions/tracers/xray/daemon_broker.h" #include "source/extensions/tracers/xray/sampling_strategy.h" #include "source/extensions/tracers/xray/xray_configuration.h" @@ -36,7 +38,8 @@ class Span : public Tracing::Span, Logger::Loggable { */ Span(TimeSource& time_source, Random::RandomGenerator& random, DaemonBroker& broker) : time_source_(time_source), random_(random), broker_(broker), - id_(Hex::uint64ToHex(random_.random())), sampled_(true) {} + id_(Hex::uint64ToHex(random_.random())), server_error_(false), response_status_code_(0), + sampled_(true) {} /** * Sets the Span's trace ID. @@ -99,14 +102,6 @@ class Span : public Tracing::Span, Logger::Loggable { aws_metadata_ = aws_metadata; } - /** - * Gets the AWS metadata - * field of the Span. - */ - const absl::flat_hash_map& awsMetadata() { - return aws_metadata_; - } - /** * Sets the recording start time of the traced operation/request. */ @@ -120,6 +115,16 @@ class Span : public Tracing::Span, Logger::Loggable { */ void setSampled(bool sampled) override { sampled_ = sampled; }; + /** + * Sets the server error as true for the traced operation/request. + */ + void setServerError() { server_error_ = true; }; + + /** + * Sets the http response status code for the traced operation/request. + */ + void setResponseStatusCode(uint64_t status_code) { response_status_code_ = status_code; }; + /** * Adds X-Ray trace header to the set of outgoing headers. */ @@ -147,6 +152,23 @@ class Span : public Tracing::Span, Logger::Loggable { */ bool sampled() const { return sampled_; } + /** + * Determines if a server error occurred (response status code was 5XX Server Error). + */ + bool serverError() const { return server_error_; } + + /** + * Determines if a client error occurred (response status code was 4XX Client Error). + */ + bool clientError() const { return Http::CodeUtility::is4xx(response_status_code_); } + + /** + * Determines if a request was throttled (response status code was 429 Too Many Requests). + */ + bool isThrottled() const { + return Http::Code::TooManyRequests == static_cast(response_status_code_); + } + /** * Not used by X-Ray because the Spans are "logged" (serialized) to the X-Ray daemon. */ @@ -183,6 +205,8 @@ class Span : public Tracing::Span, Logger::Loggable { absl::flat_hash_map http_request_annotations_; absl::flat_hash_map http_response_annotations_; absl::flat_hash_map custom_annotations_; + bool server_error_; + uint64_t response_status_code_; bool sampled_; }; diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index 51c8a5ef66cd1..cbac645b0d496 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -5,6 +5,7 @@ #include "source/common/protobuf/utility.h" #include "source/extensions/tracers/xray/daemon.pb.h" +#include "source/extensions/tracers/xray/daemon.pb.validate.h" #include "source/extensions/tracers/xray/tracer.h" #include "source/extensions/tracers/xray/xray_configuration.h" @@ -34,23 +35,49 @@ struct MockDaemonBroker : DaemonBroker { MOCK_METHOD(void, send, (std::string const&), (const, override)); }; +struct TraceProperties { + TraceProperties(const std::string span_name, const std::string origin_name, + const std::string aws_key_value, const std::string operation_name, + const std::string http_method, const std::string http_url, + const std::string user_agent) + : span_name(span_name), origin_name(origin_name), aws_key_value(aws_key_value), + operation_name(operation_name), http_method(http_method), http_url(http_url), + user_agent(user_agent) {} + const std::string span_name; + const std::string origin_name; + const std::string aws_key_value; + const std::string operation_name; + const std::string http_method; + const std::string http_url; + const std::string user_agent; +}; + class XRayTracerTest : public ::testing::Test { public: - XRayTracerTest() : broker_(std::make_unique("127.0.0.1:2000")) {} - + XRayTracerTest() + : broker_(std::make_unique("127.0.0.1:2000")), + expected_(std::make_unique("Service 1", "AWS::Service::Proxy", + "test_value", "Create", "POST", "/first/second", + "Mozilla/5.0 (Macintosh; Intel Mac OS X)")) {} absl::flat_hash_map aws_metadata_; NiceMock server_; std::unique_ptr broker_; + std::unique_ptr expected_; + void commonAsserts(daemon::Segment& s); }; +void XRayTracerTest::commonAsserts(daemon::Segment& s) { + EXPECT_EQ(expected_->span_name, s.name().c_str()); + EXPECT_EQ(expected_->origin_name, s.origin().c_str()); + EXPECT_EQ(expected_->aws_key_value, s.aws().fields().at("key").string_value().c_str()); + EXPECT_EQ(expected_->http_method, + s.http().request().fields().at("method").string_value().c_str()); + EXPECT_EQ(expected_->http_url, s.http().request().fields().at("url").string_value().c_str()); + EXPECT_EQ(expected_->user_agent, + s.http().request().fields().at(Tracing::Tags::get().UserAgent).string_value().c_str()); +} + TEST_F(XRayTracerTest, SerializeSpanTest) { - constexpr auto expected_span_name = "Service 1"; - constexpr auto expected_origin_name = "AWS::Service::Proxy"; - constexpr auto expected_aws_key_value = "test_value"; - constexpr auto expected_operation_name = "Create"; - constexpr auto expected_http_method = "POST"; - constexpr auto expected_http_url = "/first/second"; - constexpr auto expected_user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X)"; constexpr uint32_t expected_status_code = 202; constexpr uint32_t expected_content_length = 1337; constexpr auto expected_client_ip = "10.0.0.100"; @@ -61,42 +88,194 @@ TEST_F(XRayTracerTest, SerializeSpanTest) { ASSERT_FALSE(json.empty()); daemon::Segment s; MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); - ASSERT_FALSE(s.trace_id().empty()); - ASSERT_FALSE(s.id().empty()); - ASSERT_EQ(1, s.annotations().size()); - ASSERT_TRUE(s.parent_id().empty()); - ASSERT_STREQ(expected_span_name, s.name().c_str()); - ASSERT_STREQ(expected_origin_name, s.origin().c_str()); - ASSERT_STREQ(expected_aws_key_value, s.aws().fields().at("key").string_value().c_str()); - ASSERT_STREQ(expected_http_method, - s.http().request().fields().at("method").string_value().c_str()); - ASSERT_STREQ(expected_http_url, s.http().request().fields().at("url").string_value().c_str()); - ASSERT_STREQ(expected_user_agent, - s.http().request().fields().at("user_agent").string_value().c_str()); - ASSERT_DOUBLE_EQ(expected_status_code, - s.http().response().fields().at("status").number_value()); - ASSERT_DOUBLE_EQ(expected_content_length, - s.http().response().fields().at("content_length").number_value()); - ASSERT_STREQ(expected_client_ip, + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_EQ(1, s.annotations().size()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_FALSE(s.fault()); /*server error*/ + EXPECT_FALSE(s.error()); /*client error*/ + EXPECT_FALSE(s.throttle()); /*request throttled*/ + EXPECT_EQ(expected_status_code, + s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); + EXPECT_EQ(expected_content_length, + s.http().response().fields().at("content_length").number_value()); + EXPECT_STREQ(expected_client_ip, s.http().request().fields().at("client_ip").string_value().c_str()); - ASSERT_EQ(expected_x_forwarded_for, + EXPECT_EQ(expected_x_forwarded_for, s.http().request().fields().at("x_forwarded_for").bool_value()); - ASSERT_STREQ(expected_upstream_address, s.annotations().at("upstream_address").c_str()); + EXPECT_STREQ(expected_upstream_address, + s.annotations().at(Tracing::Tags::get().UpstreamAddress).c_str()); }; EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); - aws_metadata_.insert({"key", ValueUtil::stringValue(expected_aws_key_value)}); - Tracer tracer{expected_span_name, expected_origin_name, aws_metadata_, - std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; - auto span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(), + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), + absl::nullopt /*headers*/); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().HttpStatusCode, absl::StrFormat("%d", expected_status_code)); + span->setTag(Tracing::Tags::get().ResponseSize, absl::StrFormat("%d", expected_content_length)); + span->setTag(Tracing::Tags::get().PeerAddress, expected_client_ip); + span->setTag(Tracing::Tags::get().UpstreamAddress, expected_upstream_address); + span->finishSpan(); +} + +TEST_F(XRayTracerTest, SerializeSpanTestServerError) { + constexpr auto expected_error = "true"; + constexpr uint32_t expected_status_code = 503; + + auto on_send = [&](const std::string& json) { + ASSERT_FALSE(json.empty()); + daemon::Segment s; + MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_TRUE(s.fault()); /*server error*/ + EXPECT_FALSE(s.error()); /*client error*/ + EXPECT_EQ(expected_status_code, + s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); + }; + + EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), + absl::nullopt /*headers*/); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().Error, expected_error); + span->setTag(Tracing::Tags::get().HttpStatusCode, absl::StrFormat("%d", expected_status_code)); + span->finishSpan(); +} + +TEST_F(XRayTracerTest, SerializeSpanTestClientError) { + constexpr uint32_t expected_status_code = 404; + + auto on_send = [&](const std::string& json) { + ASSERT_FALSE(json.empty()); + daemon::Segment s; + MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_FALSE(s.fault()); /*server error*/ + EXPECT_TRUE(s.error()); /*client error*/ + EXPECT_FALSE(s.throttle()); /*request throttled*/ + EXPECT_EQ(expected_status_code, + s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); + }; + + EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), + absl::nullopt /*headers*/); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().HttpStatusCode, absl::StrFormat("%d", expected_status_code)); + span->finishSpan(); +} + +TEST_F(XRayTracerTest, SerializeSpanTestClientErrorWithThrottle) { + constexpr uint32_t expected_status_code = 429; + + auto on_send = [&](const std::string& json) { + ASSERT_FALSE(json.empty()); + daemon::Segment s; + MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_FALSE(s.fault()); /*server error*/ + EXPECT_TRUE(s.error()); /*client error*/ + EXPECT_TRUE(s.throttle()); /*request throttled*/ + EXPECT_EQ(expected_status_code, + s.http().response().fields().at(Tracing::Tags::get().Status).number_value()); + }; + + EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), + absl::nullopt /*headers*/); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().HttpStatusCode, absl::StrFormat("%d", expected_status_code)); + span->finishSpan(); +} + +TEST_F(XRayTracerTest, SerializeSpanTestWithEmptyValue) { + auto on_send = [&](const std::string& json) { + ASSERT_FALSE(json.empty()); + daemon::Segment s; + MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_FALSE(s.http().request().fields().contains(Tracing::Tags::get().Status)); + }; + + EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), + absl::nullopt /*headers*/); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().HttpStatusCode, ""); // Send empty string for value + span->finishSpan(); +} + +TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { + constexpr auto expected_status_code = "ok"; // status code which is not a number + constexpr auto expected_content_length = "huge"; // response length which is not a number + + auto on_send = [&](const std::string& json) { + ASSERT_FALSE(json.empty()); + daemon::Segment s; + MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); + commonAsserts(s); + EXPECT_FALSE(s.trace_id().empty()); + EXPECT_FALSE(s.id().empty()); + EXPECT_TRUE(s.parent_id().empty()); + EXPECT_FALSE(s.http().request().fields().contains(Tracing::Tags::get().Status)); + EXPECT_FALSE(s.http().request().fields().contains("content_length")); + }; + + EXPECT_CALL(*broker_, send(_)).WillOnce(Invoke(on_send)); + aws_metadata_.insert({"key", ValueUtil::stringValue(expected_->aws_key_value)}); + Tracer tracer{expected_->span_name, expected_->origin_name, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); - span->setTag("http.method", expected_http_method); - span->setTag("http.url", expected_http_url); - span->setTag("user_agent", expected_user_agent); - span->setTag("http.status_code", absl::StrFormat("%d", expected_status_code)); - span->setTag("response_size", absl::StrFormat("%d", expected_content_length)); - span->setTag("peer.address", expected_client_ip); - span->setTag("upstream_address", expected_upstream_address); + span->setTag(Tracing::Tags::get().HttpMethod, expected_->http_method); + span->setTag(Tracing::Tags::get().HttpUrl, expected_->http_url); + span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); + span->setTag(Tracing::Tags::get().HttpStatusCode, expected_status_code); + span->setTag(Tracing::Tags::get().ResponseSize, expected_content_length); span->finishSpan(); } @@ -115,7 +294,16 @@ TEST_F(XRayTracerTest, BaggageNotImplemented) { span->finishSpan(); // Baggage isn't supported so getBaggage should always return empty - ASSERT_EQ("", span->getBaggage("baggage_key")); + EXPECT_EQ("", span->getBaggage("baggage_key")); +} + +TEST_F(XRayTracerTest, LogNotImplemented) { + Tracer tracer{"" /*span name*/, "" /*origin*/, aws_metadata_, + std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; + auto span = tracer.createNonSampledSpan(); + span->log(SystemTime{std::chrono::duration(100)}, "dummy log value"); + span->finishSpan(); + // Nothing to assert here as log is a dummy function } TEST_F(XRayTracerTest, GetTraceId) { @@ -125,20 +313,18 @@ TEST_F(XRayTracerTest, GetTraceId) { span->finishSpan(); // This method is unimplemented and a noop. - ASSERT_EQ(span->getTraceIdAsHex(), ""); + EXPECT_EQ(span->getTraceIdAsHex(), ""); } TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { NiceMock config; - constexpr auto expected_span_name = "Service 1"; - constexpr auto expected_operation_name = "Create"; const auto& broker = *broker_; - Tracer tracer{expected_span_name, "", + Tracer tracer{expected_->span_name, "", aws_metadata_, std::move(broker_), server_.timeSource(), server_.api().randomGenerator()}; // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(999)); - auto parent_span = tracer.startSpan(expected_operation_name, server_.timeSource().systemTime(), + auto parent_span = tracer.startSpan(expected_->operation_name, server_.timeSource().systemTime(), absl::nullopt /*headers*/); const XRay::Span* xray_parent_span = static_cast(parent_span.get()); @@ -146,11 +332,12 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { ASSERT_FALSE(json.empty()); daemon::Segment s; MessageUtil::loadFromJson(json, s, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(s); // Hex encoded 64 bit identifier - ASSERT_STREQ("00000000000003e7", s.parent_id().c_str()); - ASSERT_STREQ(expected_span_name, s.name().c_str()); - ASSERT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str()); - ASSERT_STREQ("0000003d25bebe62", s.id().c_str()); + EXPECT_STREQ("00000000000003e7", s.parent_id().c_str()); + EXPECT_EQ(expected_->span_name, s.name().c_str()); + EXPECT_STREQ(xray_parent_span->traceId().c_str(), s.trace_id().c_str()); + EXPECT_STREQ("0000003d25bebe62", s.id().c_str()); }; EXPECT_CALL(broker, send(_)).WillOnce(Invoke(on_send)); @@ -158,7 +345,7 @@ TEST_F(XRayTracerTest, ChildSpanHasParentInfo) { // Span id taken from random generator EXPECT_CALL(server_.api_.random_, random()).WillOnce(Return(262626262626)); auto child = - parent_span->spawnChild(config, expected_operation_name, server_.timeSource().systemTime()); + parent_span->spawnChild(config, expected_->operation_name, server_.timeSource().systemTime()); child->finishSpan(); } @@ -178,8 +365,53 @@ TEST_F(XRayTracerTest, UseExistingHeaderInformation) { auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); const XRay::Span* xray_span = static_cast(span.get()); - ASSERT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); - ASSERT_STREQ(xray_header.parent_id_.c_str(), xray_span->parentId().c_str()); + EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); + EXPECT_STREQ(xray_header.parent_id_.c_str(), xray_span->parentId().c_str()); +} + +TEST_F(XRayTracerTest, DontStartSpanOnNonSampledSpans) { + XRayHeader xray_header; + xray_header.trace_id_ = "a"; + xray_header.parent_id_ = "b"; + xray_header.sample_decision_ = + SamplingDecision::NotSampled; // not sampled means we should panic on calling startSpan + constexpr auto span_name = "my span"; + constexpr auto operation_name = "my operation"; + + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; + Tracing::SpanPtr span; + ASSERT_DEATH(span = + tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header), + "panic: not reached"); +} + +TEST_F(XRayTracerTest, UnknownSpanStillSampled) { + XRayHeader xray_header; + xray_header.trace_id_ = "a"; + xray_header.parent_id_ = "b"; + xray_header.sample_decision_ = SamplingDecision::Unknown; + constexpr auto span_name = "my span"; + constexpr auto operation_name = "my operation"; + + Tracer tracer{span_name, + "", + aws_metadata_, + std::move(broker_), + server_.timeSource(), + server_.api().randomGenerator()}; + auto span = tracer.startSpan(operation_name, server_.timeSource().systemTime(), xray_header); + + const XRay::Span* xray_span = static_cast(span.get()); + EXPECT_STREQ(xray_header.trace_id_.c_str(), xray_span->traceId().c_str()); + EXPECT_STREQ(xray_header.parent_id_.c_str(), xray_span->parentId().c_str()); + // Doesn't matter if the x-ray header says that the sampling decision if unknown, + // as soon as we start a span it is by default sampled. + EXPECT_TRUE(xray_span->sampled()); } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { @@ -198,9 +430,9 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeader) { span->injectContext(request_headers); auto header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_FALSE(header.empty()); - ASSERT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); - ASSERT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); - ASSERT_NE(header[0]->value().getStringView().find("Sampled=1"), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Sampled=1"), absl::string_view::npos); } TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { @@ -216,9 +448,9 @@ TEST_F(XRayTracerTest, SpanInjectContextHasXRayHeaderNonSampled) { span->injectContext(request_headers); auto header = request_headers.get(Http::LowerCaseString{XRayTraceHeader}); ASSERT_FALSE(header.empty()); - ASSERT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); - ASSERT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); - ASSERT_NE(header[0]->value().getStringView().find("Sampled=0"), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Root="), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Parent="), absl::string_view::npos); + EXPECT_NE(header[0]->value().getStringView().find("Sampled=0"), absl::string_view::npos); } TEST_F(XRayTracerTest, TraceIDFormatTest) { @@ -233,10 +465,10 @@ TEST_F(XRayTracerTest, TraceIDFormatTest) { // logic to create a trace ID XRay::Span* xray_span = span.get(); std::vector parts = absl::StrSplit(xray_span->traceId(), absl::ByChar('-')); - ASSERT_EQ(3, parts.size()); - ASSERT_EQ(1, parts[0].length()); - ASSERT_EQ(8, parts[1].length()); - ASSERT_EQ(24, parts[2].length()); + EXPECT_EQ(3, parts.size()); + EXPECT_EQ(1, parts[0].length()); + EXPECT_EQ(8, parts[1].length()); + EXPECT_EQ(24, parts[2].length()); } class XRayDaemonTest : public testing::TestWithParam {}; @@ -256,7 +488,7 @@ TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { auto span = tracer.startSpan("ingress" /*operation name*/, server.timeSource().systemTime(), absl::nullopt /*headers*/); - span->setTag("http.status_code", "202"); + span->setTag(Tracing::Tags::get().HttpStatusCode, "202"); span->finishSpan(); Network::UdpRecvData datagram; @@ -275,6 +507,7 @@ TEST_P(XRayDaemonTest, VerifyUdpPacketContents) { // Deserialize the body to verify it. source::extensions::tracers::xray::daemon::Segment seg; MessageUtil::loadFromJson(body, seg, ProtobufMessage::getNullValidationVisitor()); + TestUtility::validate(seg); EXPECT_STREQ("my_segment", seg.name().c_str()); for (auto&& f : seg.http().request().fields()) { // there should only be a single field From 83e96c52e75551871f55e682d0fa2f700be6776a Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 21 Jul 2021 11:25:43 -0400 Subject: [PATCH 23/57] http: switching from XFP to scheme (#17372) Corrects all Envoy uses of ForwardedProto which actually want request URI over to :scheme As a reminder, XFP indicates the encryption of the (original) downstream connection where :scheme is part of the URI and the resource requested. It's legal (though unusual) to request http:// urls over a TLS connection for HTTP/2. It's possible (if ill advised) to have an internal mesh forwarding https schemed requests in the clear. Current uses of X-Forwarded-Proto are in the HCM, clearing XFP from untrusted users (unchanged) in the HCM, setting absent XFP based on downstream transport security (unchanged) in the HCM setting absent :scheme to XFP (unchanged) in buildOriginalUri, changing from using XFP to scheme (changed. new URIs should be based on original URIs not on transport security. in the router, clearing default port based on XFP (unchanged) in the router serving redirect URLs based on scheme (changed - used to be XFP but is now based on the scheme of the original URI) in the router, applying SSL route redirect based on XFP (unchanged) in the router, using :scheme for internal redirect url checks (changed - used to use XFP. new URIs should be based on original URI) in the cache filter, using :scheme to serve content (changed we used to serve based on XFP but if http://foo.com/ differs from https://foo.com and the http version is requested over a TLS connection the http response should be served) in oath2 serving redirect URLs based on scheme (changed this used to be based on SFP but URLs should be based on original URL scheme) Risk Level: High Testing: updated tests Docs Changes: inline Release Notes: inline Runtime guard: envoy.reloadable_features.correct_scheme_and_xfp Fixes #14587 Signed-off-by: Alyssa Wilk --- .../http/http_conn_man/headers.rst | 4 ++ docs/root/faq/debugging/xfp_vs_scheme.rst | 14 +++++++ docs/root/faq/overview.rst | 1 + docs/root/version_history/current.rst | 3 ++ source/common/http/conn_manager_utility.cc | 2 + source/common/http/utility.cc | 9 ++++- source/common/http/utility.h | 9 +++++ source/common/router/config_impl.cc | 25 ++++++++----- source/common/router/router.cc | 12 ++++-- source/common/runtime/runtime_features.cc | 2 + source/common/tracing/http_tracer_impl.cc | 1 + .../filters/http/cache/cacheability_utils.cc | 6 +-- .../filters/http/cache/http_cache.cc | 12 +++--- .../extensions/filters/http/oauth2/filter.cc | 3 +- test/common/http/async_client_impl_test.cc | 3 +- test/common/http/common.cc | 19 +++++++--- test/common/http/common.h | 3 +- test/common/http/utility_test.cc | 8 ++-- .../config_impl_headermap_benchmark_test.cc | 6 +-- test/common/router/config_impl_test.cc | 37 ++++++++++--------- test/common/router/router_ratelimit_test.cc | 7 +++- test/common/router/router_test.cc | 3 +- test/common/router/router_test_base.cc | 2 +- test/common/tracing/http_tracer_impl_test.cc | 36 +++++++----------- .../http/cache/cache_custom_headers_test.cc | 2 +- .../filters/http/cache/cache_filter_test.cc | 2 +- .../http/cache/cacheability_utils_test.cc | 18 ++++----- .../filters/http/cache/http_cache_test.cc | 6 +-- .../simple_http_cache_test.cc | 2 +- .../filters/http/ext_proc/filter_test.cc | 30 ++------------- .../ext_proc/streaming_integration_test.cc | 4 +- .../filters/http/oauth2/filter_test.cc | 26 ++++++------- test/integration/integration_test.cc | 1 + test/tools/router_check/router.cc | 1 + 34 files changed, 170 insertions(+), 149 deletions(-) create mode 100644 docs/root/faq/debugging/xfp_vs_scheme.rst diff --git a/docs/root/configuration/http/http_conn_man/headers.rst b/docs/root/configuration/http/http_conn_man/headers.rst index 9fd0e2fa0c0b9..5ea4e7946dc84 100644 --- a/docs/root/configuration/http/http_conn_man/headers.rst +++ b/docs/root/configuration/http/http_conn_man/headers.rst @@ -25,6 +25,8 @@ This default behavior can be overridden via the :ref:`scheme_header_transformati ` configuration option. +The *:scheme* header will be used by Envoy over *x-forwarded-proto* where the URI scheme is wanted, for example serving content from cache based on the *:scheme* header rather than X-Forwarded-Proto, or setting the scheme of redirects based on the scheme of the original URI. See :ref:`why_is_envoy_using_xfp_or_scheme` for more details. + .. [1] Edge Envoys often have plaintext HTTP/1.1 listeners. If Envoy trusts absolute URL scheme from fully qualfied URLs, a MiTM can adjust relative URLs to https absolute URLs, and inadvertantly cause the Envoy's upstream to send PII or other sensitive data over what it then believes is a secure connection. .. [2] Unlike HTTP/1.1, HTTP/2 is in practice always served over TLS via ALPN for edge Envoys. In mesh networks using insecure HTTP/2, if the downstream is not trusted to set scheme, the :ref:`scheme_header_transformation ` should be used. @@ -368,6 +370,8 @@ If the scheme is changed via the :ref:`scheme_header_transformation ` configuration option, *x-forwarded-proto* will be updated as well. +The *x-forwarded-proto* header will be used by Envoy over *:scheme* where the underlying encryption is wanted, for example clearing default ports based on *x-forwarded-proto*. See :ref:`why_is_envoy_using_xfp_or_scheme` for more details. + .. _config_http_conn_man_headers_x-request-id: x-request-id diff --git a/docs/root/faq/debugging/xfp_vs_scheme.rst b/docs/root/faq/debugging/xfp_vs_scheme.rst new file mode 100644 index 0000000000000..6bdb4550dc996 --- /dev/null +++ b/docs/root/faq/debugging/xfp_vs_scheme.rst @@ -0,0 +1,14 @@ +.. _why_is_envoy_using_xfp_or_scheme: + +Why is Envoy operating on X-Forwarded-Proto instead of :scheme or vice-versa? +============================================================================= + +With almost all requests, the value of the X-Forwarded-Proto header and the :scheme +header (if present) will be the same. Generally users request https:// resources over +TLS connections and http:// resources in the clear. However, it is entirely possible +for a user to request http:// content over a TLS connection or in internal meshes to forward +https:// requests in cleartext. In these cases Envoy will attempt to use the :scheme +header when refering to content (say serving a given entity out of cache based on the URL +scheme) and the X-Forwarded-Proto header when doing operations related to underlying +encryption (stripping the default port based on if the request was TLS on port 443, or +cleartext on port 80) diff --git a/docs/root/faq/overview.rst b/docs/root/faq/overview.rst index 33d8f906f0eaf..4da3fd0dde41d 100644 --- a/docs/root/faq/overview.rst +++ b/docs/root/faq/overview.rst @@ -36,6 +36,7 @@ Debugging debugging/why_is_envoy_404ing_connect_requests debugging/why_is_envoy_sending_413s debugging/why_is_my_route_not_found + debugging/xfp_vs_scheme Performance ----------- diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 2a318236bbdf1..5261f906b4ec3 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -10,6 +10,9 @@ Minor Behavior Changes *Changes that may cause incompatibilities for some users, but should not for most* * grpc: gRPC async client can be cached and shared accross filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. +* http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ + (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior + can be reverted by setting runtime guard ``correct_scheme_and_xfp`` to false. * http: set the default :ref:`lazy headermap threshold ` to 3, which defines the minimal number of headers in a request/response/trailers required for using a dictionary in addition to the list. Setting the `envoy.http.headermap.lazy_map_min_size` runtime diff --git a/source/common/http/conn_manager_utility.cc b/source/common/http/conn_manager_utility.cc index a4b0f3131f94b..2171e9d3f6b12 100644 --- a/source/common/http/conn_manager_utility.cc +++ b/source/common/http/conn_manager_utility.cc @@ -165,10 +165,12 @@ ConnectionManagerUtility::MutateRequestHeadersResult ConnectionManagerUtility::m request_headers.setReferenceForwardedProto(connection.ssl() ? Headers::get().SchemeValues.Https : Headers::get().SchemeValues.Http); } + if (config.schemeToSet().has_value()) { request_headers.setScheme(config.schemeToSet().value()); request_headers.setForwardedProto(config.schemeToSet().value()); } + // If :scheme is not set, sets :scheme based on X-Forwarded-Proto if a valid scheme, // else encryption level. // X-Forwarded-Proto and :scheme may still differ if different values are sent from downstream. diff --git a/source/common/http/utility.cc b/source/common/http/utility.cc index 3320090c1c3a6..cc472f8e1cecd 100644 --- a/source/common/http/utility.cc +++ b/source/common/http/utility.cc @@ -768,6 +768,13 @@ const std::string& Utility::getProtocolString(const Protocol protocol) { NOT_REACHED_GCOVR_EXCL_LINE; } +absl::string_view Utility::getScheme(const RequestHeaderMap& headers) { + if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.correct_scheme_and_xfp")) { + return headers.getSchemeValue(); + } + return headers.getForwardedProtoValue(); +} + std::string Utility::buildOriginalUri(const Http::RequestHeaderMap& request_headers, const absl::optional max_path_length) { if (!request_headers.Path()) { @@ -781,7 +788,7 @@ std::string Utility::buildOriginalUri(const Http::RequestHeaderMap& request_head path = path.substr(0, max_path_length.value()); } - return absl::StrCat(request_headers.getForwardedProtoValue(), "://", + return absl::StrCat(Http::Utility::getScheme(request_headers), "://", request_headers.getHostValue(), path); } diff --git a/source/common/http/utility.h b/source/common/http/utility.h index c5dd2e270d0cb..20d5ae98b223e 100644 --- a/source/common/http/utility.h +++ b/source/common/http/utility.h @@ -395,6 +395,15 @@ bool sanitizeConnectionHeader(Http::RequestHeaderMap& headers); */ const std::string& getProtocolString(const Protocol p); +/** + * Return the scheme of the request. + * For legacy code (envoy.reloadable_features.correct_scheme_and_xfp == false) this + * will be the value of the X-Forwarded-Proto header value. By default it will + * return the scheme if present, otherwise the value of X-Forwarded-Proto if + * present. + */ +absl::string_view getScheme(const RequestHeaderMap& headers); + /** * Constructs the original URI sent from the client from * the request headers. diff --git a/source/common/router/config_impl.cc b/source/common/router/config_impl.cc index b1432fa02bbf7..79ffc9aa09bb6 100644 --- a/source/common/router/config_impl.cc +++ b/source/common/router/config_impl.cc @@ -750,6 +750,9 @@ absl::string_view RouteEntryImplBase::processRequestHost(const Http::RequestHead if (host_end != absl::string_view::npos) { absl::string_view request_port = request_host.substr(host_end); + // In the rare case that X-Forwarded-Proto and scheme disagree (say http URL over an HTTPS + // connection), do port stripping based on X-Forwarded-Proto so http://foo.com:80 won't + // have the port stripped when served over TLS. absl::string_view request_protocol = headers.getForwardedProtoValue(); bool remove_port = !new_port.empty(); @@ -781,8 +784,9 @@ std::string RouteEntryImplBase::newPath(const Http::RequestHeaderMap& headers) c } else if (https_redirect_) { final_scheme = Http::Headers::get().SchemeValues.Https; } else { - ASSERT(headers.ForwardedProto()); - final_scheme = headers.getForwardedProtoValue(); + // Serve the redirect URL based on the scheme of the original URL, not the + // security of the underlying connection. + final_scheme = Http::Utility::getScheme(headers); } if (!port_redirect_.empty()) { @@ -1367,18 +1371,21 @@ RouteConstSharedPtr VirtualHostImpl::getRouteFromEntries(const RouteCallback& cb const Http::RequestHeaderMap& headers, const StreamInfo::StreamInfo& stream_info, uint64_t random_value) const { - // No x-forwarded-proto header. This normally only happens when ActiveStream::decodeHeaders - // bails early (as it rejects a request), so there is no routing is going to happen anyway. - const auto* forwarded_proto_header = headers.ForwardedProto(); - if (forwarded_proto_header == nullptr) { + // In the rare case that X-Forwarded-Proto and scheme disagree (say http URL over an HTTPS + // connection), force a redirect based on underlying protocol, rather than URL + // scheme, so don't force a redirect for a http:// url served over a TLS + // connection. + const absl::string_view scheme = headers.getForwardedProtoValue(); + if (scheme.empty()) { + // No scheme header. This normally only happens when ActiveStream::decodeHeaders + // bails early (as it rejects a request), or a buggy filter removes the :scheme header. return nullptr; } // First check for ssl redirect. - if (ssl_requirements_ == SslRequirements::All && forwarded_proto_header->value() != "https") { + if (ssl_requirements_ == SslRequirements::All && scheme != "https") { return SSL_REDIRECT_ROUTE; - } else if (ssl_requirements_ == SslRequirements::ExternalOnly && - forwarded_proto_header->value() != "https" && + } else if (ssl_requirements_ == SslRequirements::ExternalOnly && scheme != "https" && !Http::HeaderUtility::isEnvoyInternalRequest(headers)) { return SSL_REDIRECT_ROUTE; } diff --git a/source/common/router/router.cc b/source/common/router/router.cc index 79ea7b165065e..0a5bb031136bb 100644 --- a/source/common/router/router.cc +++ b/source/common/router/router.cc @@ -51,7 +51,7 @@ uint32_t getLength(const Buffer::Instance* instance) { return instance ? instanc bool schemeIsHttp(const Http::RequestHeaderMap& downstream_headers, const Network::Connection& connection) { - if (downstream_headers.getForwardedProtoValue() == Http::Headers::get().SchemeValues.Http) { + if (Http::Utility::getScheme(downstream_headers) == Http::Headers::get().SchemeValues.Http) { return true; } if (!connection.ssl()) { @@ -83,8 +83,12 @@ void FilterUtility::setUpstreamScheme(Http::RequestHeaderMap& headers, bool down if (Http::HeaderUtility::schemeIsValid(headers.getSchemeValue())) { return; } - if (Http::HeaderUtility::schemeIsValid(headers.getForwardedProtoValue())) { - headers.setScheme(headers.getForwardedProtoValue()); + // After all the changes in https://github.com/envoyproxy/envoy/issues/14587 + // this path should only occur if a buggy filter has removed the :scheme + // header. In that case best-effort set from X-Forwarded-Proto. + absl::string_view xfp = headers.getForwardedProtoValue(); + if (Http::HeaderUtility::schemeIsValid(xfp)) { + headers.setScheme(xfp); return; } } @@ -1516,7 +1520,7 @@ bool Filter::convertRequestHeadersForInternalRedirect(Http::RequestHeaderMap& do } const auto& policy = route_entry_->internalRedirectPolicy(); - // Don't allow serving TLS responses over plaintext unless allowed by policy. + // Don't change the scheme from the original request const bool scheme_is_http = schemeIsHttp(downstream_headers, *callbacks_->connection()); const bool target_is_http = absolute_url.scheme() == Http::Headers::get().SchemeValues.Http; if (!policy.isCrossSchemeRedirectAllowed() && scheme_is_http != target_is_http) { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index f497453df0cb9..fec0f2bc740f6 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -60,6 +60,8 @@ constexpr const char* runtime_features[] = { "envoy.reloadable_features.allow_response_for_timeout", "envoy.reloadable_features.check_unsupported_typed_per_filter_config", "envoy.reloadable_features.check_ocsp_policy", + "envoy.reloadable_features.correct_scheme_and_xfp", + "envoy.reloadable_features.disable_tls_inspector_injection", "envoy.reloadable_features.dont_add_content_length_for_bodiless_requests", "envoy.reloadable_features.enable_compression_without_content_length_header", "envoy.reloadable_features.grpc_bridge_stats_disabled", diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 35263ac7ce052..7df66b8189d18 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -16,6 +16,7 @@ #include "source/common/grpc/common.h" #include "source/common/http/codes.h" #include "source/common/http/header_map_impl.h" +#include "source/common/http/header_utility.h" #include "source/common/http/headers.h" #include "source/common/http/utility.h" #include "source/common/protobuf/utility.h" diff --git a/source/extensions/filters/http/cache/cacheability_utils.cc b/source/extensions/filters/http/cache/cacheability_utils.cc index 20fe9107ebfe5..5a34dcb87c949 100644 --- a/source/extensions/filters/http/cache/cacheability_utils.cc +++ b/source/extensions/filters/http/cache/cacheability_utils.cc @@ -4,6 +4,7 @@ #include "source/common/common/macros.h" #include "source/common/common/utility.h" +#include "source/common/http/utility.h" #include "source/extensions/filters/http/cache/cache_custom_headers.h" namespace Envoy { @@ -33,7 +34,7 @@ const std::vector& conditionalHeaders() { bool CacheabilityUtils::canServeRequestFromCache(const Http::RequestHeaderMap& headers) { const absl::string_view method = headers.getMethodValue(); - const absl::string_view forwarded_proto = headers.getForwardedProtoValue(); + const absl::string_view scheme = Http::Utility::getScheme(headers); const Http::HeaderValues& header_values = Http::Headers::get(); // Check if the request contains any conditional headers. @@ -52,8 +53,7 @@ bool CacheabilityUtils::canServeRequestFromCache(const Http::RequestHeaderMap& h return headers.Path() && headers.Host() && !headers.getInline(CacheCustomHeaders::authorization()) && (method == header_values.MethodValues.Get || method == header_values.MethodValues.Head) && - (forwarded_proto == header_values.SchemeValues.Http || - forwarded_proto == header_values.SchemeValues.Https); + (scheme == header_values.SchemeValues.Http || scheme == header_values.SchemeValues.Https); } bool CacheabilityUtils::isCacheableResponse(const Http::ResponseHeaderMap& headers, diff --git a/source/extensions/filters/http/cache/http_cache.cc b/source/extensions/filters/http/cache/http_cache.cc index 2fef083676bfb..3a24900fb9822 100644 --- a/source/extensions/filters/http/cache/http_cache.cc +++ b/source/extensions/filters/http/cache/http_cache.cc @@ -9,6 +9,7 @@ #include "source/common/http/header_utility.h" #include "source/common/http/headers.h" +#include "source/common/http/utility.h" #include "source/common/protobuf/utility.h" #include "source/extensions/filters/http/cache/cache_custom_headers.h" #include "source/extensions/filters/http/cache/cache_headers_utils.h" @@ -29,17 +30,14 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst // CacheFilter doesn't create LookupRequests for such requests. ASSERT(request_headers.Path(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " "with null Path."); - ASSERT( - request_headers.ForwardedProto(), - "Can't form cache lookup key for malformed Http::RequestHeaderMap with null ForwardedProto."); ASSERT(request_headers.Host(), "Can't form cache lookup key for malformed Http::RequestHeaderMap " "with null Host."); - const Http::HeaderString& forwarded_proto = request_headers.ForwardedProto()->value(); + absl::string_view scheme = Http::Utility::getScheme(request_headers); const auto& scheme_values = Http::Headers::get().SchemeValues; - ASSERT(forwarded_proto == scheme_values.Http || forwarded_proto == scheme_values.Https); + ASSERT(scheme == scheme_values.Http || scheme == scheme_values.Https); initializeRequestCacheControl(request_headers); - // TODO(toddmgreer): Let config determine whether to include forwarded_proto, host, and + // TODO(toddmgreer): Let config determine whether to include scheme, host, and // query params. // TODO(toddmgreer): get cluster name. if (request_headers.getMethodValue() == Http::Headers::get().MethodValues.Get) { @@ -51,7 +49,7 @@ LookupRequest::LookupRequest(const Http::RequestHeaderMap& request_headers, Syst key_.set_cluster_name("cluster_name_goes_here"); key_.set_host(std::string(request_headers.getHostValue())); key_.set_path(std::string(request_headers.getPathValue())); - key_.set_clear_http(forwarded_proto == scheme_values.Http); + key_.set_clear_http(scheme == scheme_values.Http); vary_headers_ = vary_allow_list.possibleVariedHeaders(request_headers); } diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index f7731ee44d7a2..8a42db24cd90e 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -386,8 +386,7 @@ Http::FilterHeadersStatus OAuth2Filter::signOutUser(const Http::RequestHeaderMap Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap( {{Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::Found))}})}; - const std::string new_path = - absl::StrCat(headers.ForwardedProto()->value().getStringView(), "://", host_, "/"); + const std::string new_path = absl::StrCat(Http::Utility::getScheme(headers), "://", host_, "/"); response_headers->addReference(Http::Headers::get().SetCookie, SignoutCookieValue); response_headers->addReference(Http::Headers::get().SetCookie, SignoutBearerTokenValue); response_headers->setLocation(new_path); diff --git a/test/common/http/async_client_impl_test.cc b/test/common/http/async_client_impl_test.cc index 4e5bfe53b0db1..961aff304c181 100644 --- a/test/common/http/async_client_impl_test.cc +++ b/test/common/http/async_client_impl_test.cc @@ -1304,7 +1304,8 @@ TEST_F(AsyncClientImplTest, StreamTimeoutHeadReply) { })); RequestMessagePtr message{new RequestMessageImpl()}; - HttpTestUtility::addDefaultHeaders(message->headers(), "HEAD"); + message->headers().setMethod("HEAD"); + HttpTestUtility::addDefaultHeaders(message->headers(), false); EXPECT_CALL(stream_encoder_, encodeHeaders(HeaderMapEqualRef(&message->headers()), true)); timer_ = new NiceMock(&dispatcher_); EXPECT_CALL(*timer_, enableTimer(std::chrono::milliseconds(40), _)); diff --git a/test/common/http/common.cc b/test/common/http/common.cc index cd37ab9b48453..d289b021b94ba 100644 --- a/test/common/http/common.cc +++ b/test/common/http/common.cc @@ -5,11 +5,18 @@ #include "envoy/http/header_map.h" namespace Envoy { -void HttpTestUtility::addDefaultHeaders(Http::RequestHeaderMap& headers, - const std::string default_method) { - headers.setScheme("http"); - headers.setMethod(default_method); - headers.setHost("host"); - headers.setPath("/"); +void HttpTestUtility::addDefaultHeaders(Http::RequestHeaderMap& headers, bool overwrite) { + if (overwrite || headers.getSchemeValue().empty()) { + headers.setScheme("http"); + } + if (overwrite || headers.getMethodValue().empty()) { + headers.setMethod("GET"); + } + if (overwrite || headers.getHostValue().empty()) { + headers.setHost("host"); + } + if (overwrite || headers.getPathValue().empty()) { + headers.setPath("/"); + } } } // namespace Envoy diff --git a/test/common/http/common.h b/test/common/http/common.h index b23c14535845c..2e78fb57e84c8 100644 --- a/test/common/http/common.h +++ b/test/common/http/common.h @@ -66,7 +66,6 @@ struct ConnPoolCallbacks : public Http::ConnectionPool::Callbacks { */ class HttpTestUtility { public: - static void addDefaultHeaders(Http::RequestHeaderMap& headers, - const std::string default_method = "GET"); + static void addDefaultHeaders(Http::RequestHeaderMap& headers, bool overwrite = true); }; } // namespace Envoy diff --git a/test/common/http/utility_test.cc b/test/common/http/utility_test.cc index f86935dece080..62fcd5f17ff0b 100644 --- a/test/common/http/utility_test.cc +++ b/test/common/http/utility_test.cc @@ -1233,7 +1233,7 @@ TEST(HttpUtility, TestRejectNominatedXForwardedHost) { EXPECT_EQ(sanitized_headers, request_headers); } -TEST(HttpUtility, TestRejectNominatedXForwardedProto) { +TEST(HttpUtility, TestRejectNominatedForwardedProto) { Http::TestRequestHeaderMapImpl request_headers = { {":method", "GET"}, {":path", "/"}, @@ -1317,7 +1317,7 @@ TEST(HttpUtility, TestRejectTeHeaderTooLong) { TEST(HttpUtility, TestRejectUriWithNoPath) { Http::TestRequestHeaderMapImpl request_headers_no_path = { - {":method", "GET"}, {":authority", "example.com"}, {"x-forwarded-proto", "http"}}; + {":method", "GET"}, {":authority", "example.com"}, {":scheme", "http"}}; EXPECT_EQ(Utility::buildOriginalUri(request_headers_no_path, {}), ""); } @@ -1325,7 +1325,7 @@ TEST(HttpUtility, TestTruncateUri) { Http::TestRequestHeaderMapImpl request_headers_truncated_path = {{":method", "GET"}, {":path", "/hello_world"}, {":authority", "example.com"}, - {"x-forwarded-proto", "http"}}; + {":scheme", "http"}}; EXPECT_EQ(Utility::buildOriginalUri(request_headers_truncated_path, 2), "http://example.com/h"); } @@ -1334,7 +1334,7 @@ TEST(HttpUtility, TestUriUsesOriginalPath) { {":method", "GET"}, {":path", "/hello_world"}, {":authority", "example.com"}, - {"x-forwarded-proto", "http"}, + {":scheme", "http"}, {"x-envoy-original-path", "/goodbye_world"}}; EXPECT_EQ(Utility::buildOriginalUri(request_headers_truncated_path, {}), "http://example.com/goodbye_world"); diff --git a/test/common/router/config_impl_headermap_benchmark_test.cc b/test/common/router/config_impl_headermap_benchmark_test.cc index b30332475c855..b8281af1fecf4 100644 --- a/test/common/router/config_impl_headermap_benchmark_test.cc +++ b/test/common/router/config_impl_headermap_benchmark_test.cc @@ -60,10 +60,8 @@ static void manyCountryRoutesLongHeaders(benchmark::State& state) { ProtobufMessage::getNullValidationVisitor(), true); const auto stream_info = NiceMock(); - auto req_headers = Http::TestRequestHeaderMapImpl{{":authority", "www.lyft.com"}, - {":path", "/"}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}}; + auto req_headers = Http::TestRequestHeaderMapImpl{ + {":authority", "www.lyft.com"}, {":path", "/"}, {":method", "GET"}, {":scheme", "http"}}; // Add dummy headers to reach ~100 headers (limit per request). for (int i = 0; i < 90; i++) { req_headers.addCopy(Http::LowerCaseString(absl::StrCat("dummyheader", i)), "some_value"); diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index edec064382238..fba1a9ab1f98d 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -114,23 +114,23 @@ class TestConfigImpl : public ConfigImpl { Http::TestRequestHeaderMapImpl genPathlessHeaders(const std::string& host, const std::string& method) { - return Http::TestRequestHeaderMapImpl{{":authority", host}, {":method", method}, - {"x-safe", "safe"}, {"x-global-nope", "global"}, - {"x-vhost-nope", "vhost"}, {"x-route-nope", "route"}, - {"x-forwarded-proto", "http"}}; + return Http::TestRequestHeaderMapImpl{ + {":authority", host}, {":method", method}, {"x-safe", "safe"}, + {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, {"x-route-nope", "route"}, + {"x-forwarded-proto", "http"}, {":scheme", "http"}}; } Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, - const std::string& method, - const std::string& forwarded_proto) { - auto hdrs = Http::TestRequestHeaderMapImpl{ - {":authority", host}, {":path", path}, - {":method", method}, {"x-safe", "safe"}, - {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, - {"x-route-nope", "route"}, {"x-forwarded-proto", forwarded_proto}}; + const std::string& method, const std::string& scheme) { + auto hdrs = + Http::TestRequestHeaderMapImpl{{":authority", host}, {":path", path}, + {":method", method}, {"x-safe", "safe"}, + {"x-global-nope", "global"}, {"x-vhost-nope", "vhost"}, + {"x-route-nope", "route"}, {":scheme", scheme}, + {"x-forwarded-proto", scheme}}; - if (forwarded_proto.empty()) { - hdrs.remove("x-forwarded-proto"); + if (scheme.empty()) { + hdrs.remove(":scheme"); } return hdrs; @@ -746,7 +746,7 @@ TEST_F(RouteMatcherTest, TestRoutes) { {}); TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); - // No host header, no x-forwarded-proto and no path header testing. + // No host header, no scheme and no path header testing. EXPECT_EQ(nullptr, config.route(Http::TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, 0)); EXPECT_EQ(nullptr, config.route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}, @@ -755,7 +755,7 @@ TEST_F(RouteMatcherTest, TestRoutes) { 0)); EXPECT_EQ(nullptr, config.route(Http::TestRequestHeaderMapImpl{{":authority", "foo"}, {":method", "CONNECT"}, - {"x-forwarded-proto", "http"}}, + {":scheme", "http"}}, 0)); // Base routing testing. @@ -4203,22 +4203,23 @@ TEST_F(RouteMatcherTest, NoProtocolInHeadersWhenTlsIsRequired) { factory_context_.cluster_manager_.initializeClusters({"www"}, {}); TestConfigImpl config(parseRouteConfigurationFromYaml(yaml), factory_context_, true); - // route may be called early in some edge cases and "x-forwarded-proto" will not be set. + // route may be called early in some edge cases and ":scheme" will not be set. Http::TestRequestHeaderMapImpl headers{{":authority", "www.lyft.com"}, {":path", "/"}}; EXPECT_EQ(nullptr, config.route(headers, 0)); } /** * @brief Generate headers for testing - * @param ssl set true to insert "x-forwarded-proto: https", else "x-forwarded-proto: http" + * @param ssl set true to insert "":scheme: https", else ":scheme http" * @param internal nullopt for no such "x-envoy-internal" header, or explicit "true/false" * @return Http::TestRequestHeaderMapImpl */ static Http::TestRequestHeaderMapImpl genRedirectHeaders(const std::string& host, const std::string& path, bool ssl, absl::optional internal) { + std::string scheme = ssl ? "https" : "http"; Http::TestRequestHeaderMapImpl headers{ - {":authority", host}, {":path", path}, {"x-forwarded-proto", ssl ? "https" : "http"}}; + {":authority", host}, {":path", path}, {":scheme", scheme}, {"x-forwarded-proto", scheme}}; if (internal.has_value()) { headers.addCopy("x-envoy-internal", internal.value() ? "true" : "false"); } diff --git a/test/common/router/router_ratelimit_test.cc b/test/common/router/router_ratelimit_test.cc index 358c9513f5cd2..5c6b57ce3f59c 100644 --- a/test/common/router/router_ratelimit_test.cc +++ b/test/common/router/router_ratelimit_test.cc @@ -71,8 +71,11 @@ TEST(BadRateLimitConfiguration, ActionsMissingRequiredFields) { static Http::TestRequestHeaderMapImpl genHeaders(const std::string& host, const std::string& path, const std::string& method) { - return Http::TestRequestHeaderMapImpl{ - {":authority", host}, {":path", path}, {":method", method}, {"x-forwarded-proto", "http"}}; + return Http::TestRequestHeaderMapImpl{{":authority", host}, + {":path", path}, + {":method", method}, + {"x-forwarded-proto", "http"}, + {":scheme", "http"}}; } class RateLimitConfiguration : public testing::Test { diff --git a/test/common/router/router_test.cc b/test/common/router/router_test.cc index 1f4bc9d6b64b8..badfe10da7024 100644 --- a/test/common/router/router_test.cc +++ b/test/common/router/router_test.cc @@ -4467,7 +4467,6 @@ TEST_F(RouterTest, InternalRedirectRejectedByPredicate) { TEST_F(RouterTest, HttpInternalRedirectSucceeded) { enableRedirects(3); setNumPreviousRedirect(2); - default_request_headers_.setForwardedProto("http"); sendRequest(); EXPECT_CALL(callbacks_, clearRouteCache()); @@ -4489,6 +4488,7 @@ TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { auto ssl_connection = std::make_shared(); enableRedirects(3); setNumPreviousRedirect(1); + default_request_headers_.setScheme("https"); sendRequest(); @@ -4508,6 +4508,7 @@ TEST_F(RouterTest, HttpsInternalRedirectSucceeded) { TEST_F(RouterTest, CrossSchemeRedirectAllowedByPolicy) { auto ssl_connection = std::make_shared(); enableRedirects(); + default_request_headers_.setScheme("https"); sendRequest(); diff --git a/test/common/router/router_test_base.cc b/test/common/router/router_test_base.cc index 6a8a3d0c2446a..11a23de807f1b 100644 --- a/test/common/router/router_test_base.cc +++ b/test/common/router/router_test_base.cc @@ -219,7 +219,7 @@ void RouterTestBase::sendRequest(bool end_stream) { upstream_stream_info_, Http::Protocol::Http10); return nullptr; })); - HttpTestUtility::addDefaultHeaders(default_request_headers_); + HttpTestUtility::addDefaultHeaders(default_request_headers_, false); router_.decodeHeaders(default_request_headers_, end_stream); } diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index e3289d042f3f7..158e39f9073e6 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -131,7 +131,7 @@ TEST_F(HttpConnManFinalizerImplTest, OriginalAndLongPath) { {"x-envoy-original-path", path}, {":method", "GET"}, {":path", ""}, - {"x-forwarded-proto", "http"}}; + {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -161,10 +161,8 @@ TEST_F(HttpConnManFinalizerImplTest, NoGeneratedId) { const auto remote_address = Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)}; - Http::TestRequestHeaderMapImpl request_headers{{":path", ""}, - {"x-envoy-original-path", path}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}}; + Http::TestRequestHeaderMapImpl request_headers{ + {":path", ""}, {"x-envoy-original-path", path}, {":method", "GET"}, {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -194,8 +192,7 @@ TEST_F(HttpConnManFinalizerImplTest, Connect) { const auto remote_address = Network::Address::InstanceConstSharedPtr{ new Network::Address::Ipv4Instance(expected_ip, 0, nullptr)}; - Http::TestRequestHeaderMapImpl request_headers{{":method", "CONNECT"}, - {"x-forwarded-proto", "http"}}; + Http::TestRequestHeaderMapImpl request_headers{{":method", "CONNECT"}, {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; @@ -318,10 +315,8 @@ TEST_F(HttpConnManFinalizerImplTest, UpstreamClusterTagSet) { } TEST_F(HttpConnManFinalizerImplTest, SpanOptionalHeaders) { - Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, - {":path", "/test"}, - {":method", "GET"}, - {"x-forwarded-proto", "https"}}; + Http::TestRequestHeaderMapImpl request_headers{ + {"x-request-id", "id"}, {":path", "/test"}, {":method", "GET"}, {":scheme", "https"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; const std::string expected_ip = "10.0.0.100"; @@ -362,10 +357,8 @@ TEST_F(HttpConnManFinalizerImplTest, SpanOptionalHeaders) { } TEST_F(HttpConnManFinalizerImplTest, UnixDomainSocketPeerAddressTag) { - Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, - {":path", "/test"}, - {":method", "GET"}, - {"x-forwarded-proto", "https"}}; + Http::TestRequestHeaderMapImpl request_headers{ + {"x-request-id", "id"}, {":path", "/test"}, {":method", "GET"}, {":scheme", "https"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; const std::string path_{TestEnvironment::unixDomainSocketPath("foo")}; @@ -388,7 +381,7 @@ TEST_F(HttpConnManFinalizerImplTest, SpanCustomTags) { Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, {":path", "/test"}, {":method", "GET"}, - {"x-forwarded-proto", "https"}, + {":scheme", "https"}, {"x-bb", "b"}}; ProtobufWkt::Struct fake_struct; @@ -502,10 +495,8 @@ tag: dd-10, } TEST_F(HttpConnManFinalizerImplTest, SpanPopulatedFailureResponse) { - Http::TestRequestHeaderMapImpl request_headers{{"x-request-id", "id"}, - {":path", "/test"}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}}; + Http::TestRequestHeaderMapImpl request_headers{ + {"x-request-id", "id"}, {":path", "/test"}, {":method", "GET"}, {":scheme", "http"}}; Http::TestResponseHeaderMapImpl response_headers; Http::TestResponseTrailerMapImpl response_trailers; const std::string expected_ip = "10.0.0.100"; @@ -567,7 +558,6 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcOkStatus) { {":path", "/pb.Foo/Bar"}, {":authority", "example.com:80"}, {"content-type", "application/grpc"}, - {"x-forwarded-proto", "http"}, {"te", "trailers"}}; Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, @@ -618,7 +608,7 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcErrorTag) { {":authority", "example.com:80"}, {"content-type", "application/grpc"}, {"grpc-timeout", "10s"}, - {"x-forwarded-proto", "http"}, + {":scheme", "http"}, {"te", "trailers"}}; Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, @@ -662,7 +652,7 @@ TEST_F(HttpConnManFinalizerImplTest, GrpcTrailersOnly) { {":path", "/pb.Foo/Bar"}, {":authority", "example.com:80"}, {"content-type", "application/grpc"}, - {"x-forwarded-proto", "http"}, + {":scheme", "http"}, {"te", "trailers"}}; Http::TestResponseHeaderMapImpl response_headers{{":status", "200"}, diff --git a/test/extensions/filters/http/cache/cache_custom_headers_test.cc b/test/extensions/filters/http/cache/cache_custom_headers_test.cc index f7d54b16cb43a..593da5921f765 100644 --- a/test/extensions/filters/http/cache/cache_custom_headers_test.cc +++ b/test/extensions/filters/http/cache/cache_custom_headers_test.cc @@ -19,7 +19,7 @@ TEST(CacheCustomHeadersTest, EnsureCacheCustomHeadersGettersDoNotFail) { Http::TestRequestHeaderMapImpl request_headers_{ {":path", "/"}, {":method", "GET"}, - {"x-forwarded-proto", "https"}, + {":scheme", "https"}, {":authority", "example.com"}, {"authorization", "Basic abc123def456"}, {"pragma", "no-cache"}, diff --git a/test/extensions/filters/http/cache/cache_filter_test.cc b/test/extensions/filters/http/cache/cache_filter_test.cc index ff8357b0d4f00..3fecae678b985 100644 --- a/test/extensions/filters/http/cache/cache_filter_test.cc +++ b/test/extensions/filters/http/cache/cache_filter_test.cc @@ -123,7 +123,7 @@ class CacheFilterTest : public ::testing::Test { Event::SimulatedTimeSystem time_source_; DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; Http::TestRequestHeaderMapImpl request_headers_{ - {":path", "/"}, {":method", "GET"}, {"x-forwarded-proto", "https"}}; + {":path", "/"}, {":method", "GET"}, {":scheme", "https"}}; Http::TestResponseHeaderMapImpl response_headers_{{":status", "200"}, {"cache-control", "public,max-age=3600"}}; NiceMock decoder_callbacks_; diff --git a/test/extensions/filters/http/cache/cacheability_utils_test.cc b/test/extensions/filters/http/cache/cacheability_utils_test.cc index 79b4b3f836a95..7c208861712dd 100644 --- a/test/extensions/filters/http/cache/cacheability_utils_test.cc +++ b/test/extensions/filters/http/cache/cacheability_utils_test.cc @@ -14,18 +14,14 @@ namespace { class CanServeRequestFromCacheTest : public testing::Test { protected: - Http::TestRequestHeaderMapImpl request_headers_ = {{":path", "/"}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}, - {":authority", "test.com"}}; + Http::TestRequestHeaderMapImpl request_headers_ = { + {":path", "/"}, {":method", "GET"}, {":scheme", "http"}, {":authority", "test.com"}}; }; class RequestConditionalHeadersTest : public testing::TestWithParam { protected: - Http::TestRequestHeaderMapImpl request_headers_ = {{":path", "/"}, - {":method", "GET"}, - {"x-forwarded-proto", "http"}, - {":authority", "test.com"}}; + Http::TestRequestHeaderMapImpl request_headers_ = { + {":path", "/"}, {":method", "GET"}, {":scheme", "http"}, {":authority", "test.com"}}; std::string conditionalHeader() const { return GetParam(); } }; @@ -78,11 +74,11 @@ TEST_F(CanServeRequestFromCacheTest, MethodHeader) { EXPECT_FALSE(CacheabilityUtils::canServeRequestFromCache(request_headers_)); } -TEST_F(CanServeRequestFromCacheTest, ForwardedProtoHeader) { +TEST_F(CanServeRequestFromCacheTest, SchemeHeader) { EXPECT_TRUE(CacheabilityUtils::canServeRequestFromCache(request_headers_)); - request_headers_.setForwardedProto("ftp"); + request_headers_.setScheme("ftp"); EXPECT_FALSE(CacheabilityUtils::canServeRequestFromCache(request_headers_)); - request_headers_.removeForwardedProto(); + request_headers_.removeScheme(); EXPECT_FALSE(CacheabilityUtils::canServeRequestFromCache(request_headers_)); } diff --git a/test/extensions/filters/http/cache/http_cache_test.cc b/test/extensions/filters/http/cache/http_cache_test.cc index 9ccc413033f9e..3ffc8089b47ec 100644 --- a/test/extensions/filters/http/cache/http_cache_test.cc +++ b/test/extensions/filters/http/cache/http_cache_test.cc @@ -43,10 +43,8 @@ class LookupRequestTest : public testing::TestWithParam { LookupRequestTest() : vary_allow_list_(getConfig().allowed_vary_headers()) {} DateFormatter formatter_{"%a, %d %b %Y %H:%M:%S GMT"}; - Http::TestRequestHeaderMapImpl request_headers_{{":path", "/"}, - {":method", "GET"}, - {"x-forwarded-proto", "https"}, - {":authority", "example.com"}}; + Http::TestRequestHeaderMapImpl request_headers_{ + {":path", "/"}, {":method", "GET"}, {":scheme", "https"}, {":authority", "example.com"}}; VaryHeader vary_allow_list_; diff --git a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc index 0e895d0c6efae..eaad85d95d162 100644 --- a/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc +++ b/test/extensions/filters/http/cache/simple_http_cache/simple_http_cache_test.cc @@ -32,7 +32,7 @@ class SimpleHttpCacheTest : public testing::Test { SimpleHttpCacheTest() : vary_allow_list_(getConfig().allowed_vary_headers()) { request_headers_.setMethod("GET"); request_headers_.setHost("example.com"); - request_headers_.setForwardedProto("https"); + request_headers_.setScheme("https"); request_headers_.setCopy(Http::CustomHeaders::get().CacheControl, "max-age=3600"); } diff --git a/test/extensions/filters/http/ext_proc/filter_test.cc b/test/extensions/filters/http/ext_proc/filter_test.cc index 73640c79af78b..cde78e9e16be0 100644 --- a/test/extensions/filters/http/ext_proc/filter_test.cc +++ b/test/extensions/filters/http/ext_proc/filter_test.cc @@ -64,6 +64,8 @@ class HttpFilterTest : public testing::Test { filter_ = std::make_unique(config_, std::move(client_)); filter_->setEncoderFilterCallbacks(encoder_callbacks_); filter_->setDecoderFilterCallbacks(decoder_callbacks_); + HttpTestUtility::addDefaultHeaders(request_headers_); + request_headers_.setMethod("POST"); } ExternalProcessorStreamPtr doStart(ExternalProcessorCallbacks& callbacks) { @@ -211,7 +213,6 @@ TEST_F(HttpFilterTest, SimplestPost) { EXPECT_TRUE(config_->failureModeAllow()); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 10); request_headers_.addCopy(LowerCaseString("x-some-other-header"), "yes"); @@ -272,7 +273,6 @@ TEST_F(HttpFilterTest, PostAndChangeHeaders) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("x-some-other-header"), "yes"); request_headers_.addCopy(LowerCaseString("x-do-we-want-this"), "no"); @@ -356,8 +356,6 @@ TEST_F(HttpFilterTest, PostAndRespondImmediately) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); Http::TestResponseHeaderMapImpl immediate_response_headers; @@ -419,8 +417,6 @@ TEST_F(HttpFilterTest, PostAndRespondImmediatelyOnResponse) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); processRequestHeaders(false, absl::nullopt); @@ -481,7 +477,6 @@ TEST_F(HttpFilterTest, PostAndChangeRequestBodyBuffered) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -546,7 +541,6 @@ TEST_F(HttpFilterTest, PostAndChangeRequestBodyBufferedComesFast) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -614,7 +608,6 @@ TEST_F(HttpFilterTest, PostAndChangeRequestBodyBufferedComesALittleFast) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -682,7 +675,6 @@ TEST_F(HttpFilterTest, PostAndChangeBothBodiesBufferedOneChunk) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -756,7 +748,6 @@ TEST_F(HttpFilterTest, PostAndChangeBothBodiesBufferedMultiChunk) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -843,7 +834,6 @@ TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { )EOF"); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); request_headers_.addCopy(LowerCaseString("content-length"), 100); @@ -885,8 +875,6 @@ TEST_F(HttpFilterTest, RespondImmediatelyDefault) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); Http::TestResponseHeaderMapImpl immediate_response_headers; @@ -923,8 +911,6 @@ TEST_F(HttpFilterTest, RespondImmediatelyGrpcError) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); Http::TestResponseHeaderMapImpl immediate_response_headers; @@ -965,7 +951,6 @@ TEST_F(HttpFilterTest, PostAndFail) { EXPECT_FALSE(config_->failureModeAllow()); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); // Oh no! The remote server had a failure! @@ -1010,7 +995,6 @@ TEST_F(HttpFilterTest, PostAndFailOnResponse) { EXPECT_FALSE(config_->failureModeAllow()); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); EXPECT_FALSE(last_request_.async_mode()); @@ -1069,7 +1053,6 @@ TEST_F(HttpFilterTest, PostAndIgnoreFailure) { EXPECT_TRUE(config_->failureModeAllow()); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); // Oh no! The remote server had a failure which we will ignore @@ -1107,7 +1090,6 @@ TEST_F(HttpFilterTest, PostAndClose) { EXPECT_FALSE(config_->failureModeAllow()); // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); EXPECT_FALSE(last_request_.async_mode()); @@ -1151,7 +1133,6 @@ TEST_F(HttpFilterTest, ProcessingModeRequestHeadersOnly) { response_trailer_mode: "SKIP" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); EXPECT_FALSE(last_request_.async_mode()); @@ -1198,7 +1179,6 @@ TEST_F(HttpFilterTest, ProcessingModeOverrideResponseHeaders) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); processRequestHeaders( @@ -1248,7 +1228,6 @@ TEST_F(HttpFilterTest, ProcessingModeResponseHeadersOnly) { response_trailer_mode: "SKIP" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); EXPECT_EQ(FilterHeadersStatus::Continue, filter_->decodeHeaders(request_headers_, false)); Buffer::OwnedImpl first_chunk("foo"); @@ -1290,9 +1269,6 @@ TEST_F(HttpFilterTest, ClearRouteCache) { response_body_mode: "BUFFERED" )EOF"); - // Create synthetic HTTP request - HttpTestUtility::addDefaultHeaders(request_headers_, "GET"); - EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); EXPECT_CALL(decoder_callbacks_, clearRouteCache()); @@ -1330,7 +1306,7 @@ TEST_F(HttpFilterTest, ReplaceRequest) { cluster_name: "ext_proc_server" )EOF"); - HttpTestUtility::addDefaultHeaders(request_headers_); + request_headers_.setMethod("GET"); EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); Buffer::OwnedImpl req_buffer; diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 4fd5d7a581246..59e03fe215773 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -85,8 +85,8 @@ class StreamingIntegrationTest : public HttpIntegrationTest, sendClientRequestHeaders(absl::optional> cb) { auto conn = makeClientConnection(lookupPort("http")); codec_client_ = makeHttpConnection(std::move(conn)); - Http::TestRequestHeaderMapImpl headers; - HttpTestUtility::addDefaultHeaders(headers, std::string("POST")); + Http::TestRequestHeaderMapImpl headers{{":method", "POST"}}; + HttpTestUtility::addDefaultHeaders(headers, false); if (cb) { (*cb)(headers); } diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index db54e404a7ed4..d232adb224d24 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -101,7 +101,7 @@ class OAuth2Test : public testing::Test { endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); endpoint->mutable_timeout()->set_seconds(1); - p.set_redirect_uri("%REQ(x-forwarded-proto)%://%REQ(:authority)%" + TEST_CALLBACK); + p.set_redirect_uri("%REQ(:scheme)%://%REQ(:authority)%" + TEST_CALLBACK); p.mutable_redirect_path_matcher()->mutable_path()->set_exact(TEST_CALLBACK); p.set_authorization_endpoint("https://auth.example.com/oauth/authorize/"); p.mutable_signout_path()->mutable_path()->set_exact("/_signout"); @@ -250,7 +250,7 @@ TEST_F(OAuth2Test, DefaultAuthScope) { endpoint->set_cluster("auth.example.com"); endpoint->set_uri("auth.example.com/_oauth"); endpoint->mutable_timeout()->set_seconds(1); - p.set_redirect_uri("%REQ(x-forwarded-proto)%://%REQ(:authority)%" + TEST_CALLBACK); + p.set_redirect_uri("%REQ(:scheme)%://%REQ(:authority)%" + TEST_CALLBACK); p.mutable_redirect_path_matcher()->mutable_path()->set_exact(TEST_CALLBACK); p.set_authorization_endpoint("https://auth.example.com/oauth/authorize/"); p.mutable_signout_path()->mutable_path()->set_exact("/_signout"); @@ -286,7 +286,6 @@ TEST_F(OAuth2Test, DefaultAuthScope) { {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, {Http::Headers::get().Scheme.get(), "http"}, - {Http::Headers::get().ForwardedProto.get(), "http"}, }; Http::TestResponseHeaderMapImpl response_headers{ @@ -320,7 +319,7 @@ TEST_F(OAuth2Test, RequestSignout) { {Http::Headers::get().Path.get(), "/_signout"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, }; Http::TestResponseHeaderMapImpl response_headers{ @@ -350,7 +349,7 @@ TEST_F(OAuth2Test, OAuthOkPass) { {Http::Headers::get().Path.get(), "/anypath"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::CustomHeaders::get().Authorization.get(), "Bearer injected_malice!"}, }; @@ -358,7 +357,7 @@ TEST_F(OAuth2Test, OAuthOkPass) { {Http::Headers::get().Path.get(), "/anypath"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::CustomHeaders::get().Authorization.get(), "Bearer legit_token"}, }; @@ -394,7 +393,6 @@ TEST_F(OAuth2Test, OAuthErrorNonOAuthHttpCallback) { {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, {Http::Headers::get().Scheme.get(), "http"}, - {Http::Headers::get().ForwardedProto.get(), "http"}, }; Http::TestResponseHeaderMapImpl response_headers{ @@ -459,7 +457,7 @@ TEST_F(OAuth2Test, OAuthCallbackStartsAuthentication) { Http::TestRequestHeaderMapImpl request_headers{ {Http::Headers::get().Path.get(), "/_oauth?code=123&state=https://asdf&method=GET"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, }; @@ -704,7 +702,7 @@ TEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) { {Http::Headers::get().Path.get(), "/test?name=admin&level=trace"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Post}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, }; // This is the immediate response - a redirect to the auth cluster. @@ -739,7 +737,7 @@ TEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) { "2Ftest%3Fname%3Dadmin%26level%3Dtrace"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, }; // Deliberately fail the HMAC validation check. @@ -783,7 +781,7 @@ TEST_F(OAuth2Test, OAuthBearerTokenFlowFromHeader) { {Http::Headers::get().Path.get(), "/test?role=bearer"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-header-token"}, }; // Expected decoded headers after the callback & validation of the bearer token is complete. @@ -791,7 +789,7 @@ TEST_F(OAuth2Test, OAuthBearerTokenFlowFromHeader) { {Http::Headers::get().Path.get(), "/test?role=bearer"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-header-token"}, }; @@ -811,13 +809,13 @@ TEST_F(OAuth2Test, OAuthBearerTokenFlowFromQueryParameters) { {Http::Headers::get().Path.get(), "/test?role=bearer&token=xyz-queryparam-token"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, }; Http::TestRequestHeaderMapImpl request_headers_after{ {Http::Headers::get().Path.get(), "/test?role=bearer&token=xyz-queryparam-token"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().ForwardedProto.get(), "https"}, + {Http::Headers::get().Scheme.get(), "https"}, {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-queryparam-token"}, }; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index 8dd52277396b2..48f68e41e49ac 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1096,6 +1096,7 @@ TEST_P(IntegrationTest, AbsolutePathUsingHttpsDisallowedAtFrontline) { } TEST_P(IntegrationTest, AbsolutePathUsingHttpsAllowedInternally) { + autonomous_upstream_ = true; // Sent an HTTPS request over non-TLS. It will be allowed for non-front-line Envoys // and match the configured redirect. auto host = config_helper_.createVirtualHost("www.redirect.com", "/"); diff --git a/test/tools/router_check/router.cc b/test/tools/router_check/router.cc index 65ee00c428842..4d21e67c5a752 100644 --- a/test/tools/router_check/router.cc +++ b/test/tools/router_check/router.cc @@ -95,6 +95,7 @@ ToolConfig ToolConfig::create(const envoy::RouterCheckToolSchema::ValidationItem request_headers->addCopy(":path", check_config.input().path()); request_headers->addCopy(":method", check_config.input().method()); request_headers->addCopy("x-forwarded-proto", check_config.input().ssl() ? "https" : "http"); + request_headers->addCopy(":scheme", check_config.input().ssl() ? "https" : "http"); if (check_config.input().internal()) { request_headers->addCopy("x-envoy-internal", "true"); From 323f48373d1971c78737aab1849fc430dc18c1c5 Mon Sep 17 00:00:00 2001 From: phlax Date: Wed, 21 Jul 2021 17:25:01 +0100 Subject: [PATCH 24/57] dist: Add package signing utility (#17289) Signed-off-by: Ryan Northey --- tools/distribution/BUILD | 15 + tools/distribution/sign.py | 419 ++++++++++ tools/distribution/tests/test_sign.py | 1014 +++++++++++++++++++++++++ tools/gpg/identity.py | 5 + tools/gpg/tests/test_identity.py | 28 + 5 files changed, 1481 insertions(+) create mode 100644 tools/distribution/BUILD create mode 100644 tools/distribution/sign.py create mode 100644 tools/distribution/tests/test_sign.py diff --git a/tools/distribution/BUILD b/tools/distribution/BUILD new file mode 100644 index 0000000000000..6778780e56817 --- /dev/null +++ b/tools/distribution/BUILD @@ -0,0 +1,15 @@ +load("//bazel:envoy_build_system.bzl", "envoy_package") +load("//tools/base:envoy_python.bzl", "envoy_py_binary") + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_py_binary( + name = "tools.distribution.sign", + deps = [ + "//tools/base:runner", + "//tools/base:utils", + "//tools/gpg:identity", + ], +) diff --git a/tools/distribution/sign.py b/tools/distribution/sign.py new file mode 100644 index 0000000000000..e9830f0871255 --- /dev/null +++ b/tools/distribution/sign.py @@ -0,0 +1,419 @@ +#!/usr/bin/env python3 + +# You will need to have the respective system tools required for +# package signing to use this tool. +# +# For example you will need debsign to sign debs, and rpmsign to +# sign rpms. +# +# usage +# +# with bazel: +# +# bazel run //tools/distribution:sign -- -h +# +# alternatively, if you have the necessary python deps available +# +# PYTHONPATH=. ./tools/distribution/sign.py -h +# +# python requires: coloredlogs, frozendict, python-gnupg, verboselogs +# + +import argparse +import os +import shutil +import subprocess +import sys +import tarfile +from functools import cached_property +from itertools import chain +from typing import Iterator, Optional, Type + +import verboselogs + +from tools.base import runner, utils +from tools.gpg import identity + +# Replacable `__` maintainer/gpg config - python interpolation doesnt work easily +# with this string +RPMMACRO_TEMPLATE = """ +%_signature gpg +%_gpg_path __GPG_CONFIG__ +%_gpg_name __MAINTAINER__ +%_gpgbin __GPG_BIN__ +%__gpg_sign_cmd %{__gpg} gpg --force-v3-sigs --batch --verbose --no-armor --no-secmem-warning -u "%{_gpg_name}" -sbo %{__signature_filename} --digest-algo sha256 %{__plaintext_filename}' +""" + + +class SigningError(Exception): + pass + + +# Base directory signing util + + +class DirectorySigningUtil(object): + """Base class for signing utils - eg for deb or rpm packages""" + + command_name = None + _package_type = None + ext = None + + def __init__( + self, + path: str, + maintainer: identity.GPGIdentity, + log: verboselogs.VerboseLogger, + command: Optional[str] = ""): + self.path = path + self.maintainer = maintainer + self.log = log + self._command = command + + @cached_property + def command(self) -> str: + """Provided command name/path or path to available system version""" + command = self._command or shutil.which(self.command_name) + if command: + return command + raise SigningError(f"Signing software missing ({self.package_type}): {self.command_name}") + + @property + def command_args(self) -> tuple: + return () + + @property + def package_type(self) -> str: + return self._package_type or self.ext + + @property + def pkg_files(self) -> tuple: + """Tuple of paths to package files to sign""" + # TODO?(phlax): check maintainer/packager field matches key id + return tuple( + os.path.join(self.path, filename) + for filename in os.listdir(self.path) + if filename.endswith(f".{self.ext}")) + + def sign(self) -> None: + """Sign the packages""" + for pkg in self.pkg_files: + self.sign_pkg(pkg) + + def sign_command(self, pkg_file: str) -> tuple: + """Tuple of command parts to sign a specific package""" + return (self.command,) + self.command_args + (pkg_file,) + + def sign_pkg(self, pkg_file: str) -> None: + """Sign a specific package file""" + pkg_name = os.path.basename(pkg_file) + self.log.notice(f"Sign package ({self.package_type}): {pkg_name}") + response = subprocess.run( + self.sign_command(pkg_file), capture_output=True, encoding="utf-8") + + if response.returncode: + raise SigningError(response.stdout + response.stderr) + + self.log.success(f"Signed package ({self.package_type}): {pkg_name}") + + +# Runner + + +class PackageSigningRunner(runner.Runner): + """For a given `package_type` and `path` this will run the relevant signing + util for the packages they contain. + """ + + _signing_utils = () + + @classmethod + def register_util(cls, name: str, util: Type[DirectorySigningUtil]) -> None: + """Register util for signing a package type""" + cls._signing_utils = getattr(cls, "_signing_utils") + ((name, util),) + + @property + def extract(self) -> bool: + return self.args.extract + + @cached_property + def maintainer(self) -> identity.GPGIdentity: + """A representation of the maintainer with GPG capabilities""" + return self.maintainer_class(self.maintainer_name, self.maintainer_email, self.log) + + @property + def maintainer_class(self) -> Type[identity.GPGIdentity]: + return identity.GPGIdentity + + @property + def maintainer_email(self) -> str: + """Email of the maintainer if set""" + return self.args.maintainer_email + + @property + def maintainer_name(self) -> str: + """Name of the maintainer if set""" + return self.args.maintainer_name + + @property + def package_type(self) -> str: + """Package type - eg deb/rpm""" + return self.args.package_type + + @property + def path(self) -> str: + """Path to the packages directory""" + return self.args.path + + @property + def tar(self) -> bool: + return self.args.tar + + @cached_property + def signing_utils(self) -> dict: + """Configured signing utils - eg `DebSigningUtil`, `RPMSigningUtil`""" + return dict(getattr(self, "_signing_utils")) + + def add_arguments(self, parser: argparse.ArgumentParser) -> None: + super().add_arguments(parser) + parser.add_argument( + "path", default="", help="Path to the directory containing packages to sign") + parser.add_argument( + "--extract", + action="store_true", + help= + "If set, treat the path as a tarball containing directories according to package_type") + parser.add_argument("--tar", help="Path to save the signed packages as tar file") + parser.add_argument( + "--type", + default="", + choices=[c for c in self.signing_utils] + [""], + help="Package type to sign") + parser.add_argument( + "--maintainer-name", + default="", + help="Maintainer name to match when searching for a GPG key to match with") + parser.add_argument( + "--maintainer-email", + default="", + help="Maintainer email to match when searching for a GPG key to match with") + + def archive(self, path: str) -> None: + with tarfile.open(self.tar, "w") as tar: + tar.add(path, arcname=".") + + def get_signing_util(self, package_type: str, path: str) -> DirectorySigningUtil: + return self.signing_utils[package_type](path, self.maintainer, self.log) + + @runner.catches((identity.GPGError, SigningError)) + def run(self) -> Optional[int]: + if self.extract: + self.sign_tarball() + else: + self.sign_directory() + self.log.success("Successfully signed packages") + + def sign(self, package_type: str, path: str) -> None: + self.log.notice(f"Signing {package_type}s ({self.maintainer}) {path}") + self.get_signing_util(package_type, path).sign() + + def sign_all(self, path: str) -> None: + for package_type in os.listdir(path): + if package_type in self.signing_utils: + target = os.path.join(path, package_type) + self.sign(package_type, target) + + def sign_directory(self) -> None: + self.sign(self.package_type, self.path) + if self.tar: + self.archive(self.path) + + def sign_tarball(self) -> None: + if not self.tar: + raise SigningError("You must set a `--tar` file to save to when `--extract` is set") + with utils.untar(self.path) as tardir: + self.sign_all(tardir) + self.archive(tardir) + + +# RPM + + +class RPMMacro(object): + """`.rpmmacros` configuration for rpmsign""" + + _macro_filename = ".rpmmacros" + + def __init__(self, home: str, overwrite: bool = False, **kwargs): + self.home = home + self.overwrite = bool(overwrite) + self.kwargs = kwargs + + @property + def path(self) -> str: + return os.path.join(self.home, self._macro_filename) + + @property + def macro(self) -> str: + macro = self.template + for k, v in self.kwargs.items(): + macro = macro.replace(f"__{k.upper()}__", v) + return macro + + @property + def template(self) -> str: + return RPMMACRO_TEMPLATE + + def write(self) -> None: + if not self.overwrite and os.path.exists(self.path): + return + with open(self.path, "w") as f: + f.write(self.macro) + + +class RPMSigningUtil(DirectorySigningUtil): + """Sign all RPM packages in a given directory""" + + command_name = "rpmsign" + ext = "rpm" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.setup() + + @cached_property + def command(self) -> str: + if not os.path.basename(self.maintainer.gpg_bin) == "gpg2": + raise SigningError("GPG2 is required to sign RPM packages") + return super().command + + @cached_property + def command_args(self) -> tuple: + return ("--key-id", self.maintainer.fingerprint, "--addsign") + + @property + def rpmmacro(self) -> Type[RPMMacro]: + return RPMMacro + + def setup(self) -> None: + """Create the .rpmmacros file if it doesn't exist""" + self.rpmmacro( + self.maintainer.home, + maintainer=self.maintainer.name, + gpg_bin=self.maintainer.gpg_bin, + gpg_config=self.maintainer.gnupg_home).write() + + def sign_pkg(self, pkg_file: str) -> None: + os.chmod(pkg_file, 0o755) + super().sign_pkg(pkg_file) + + +# Deb + + +class DebChangesFiles(object): + """Creates a set of `changes` files for specific distros from a src + `changes` file. + + eg, if src changes file is `envoy_1.100.changes` and `Distribution:` + field is `buster bullseye`, it creates: + + `envoy_1.100.changes` -> `envoy_1.100.buster.changes` + `envoy_1.100.changes` -> `envoy_1.100.bullseye.changes` + + while replacing any instances of the original distribution name in + the respective changes files, eg: + + `buster bullseye` -> `buster` + `buster bullseye` -> `bullseye` + + finally, it removes the src changes file. + """ + + def __init__(self, src): + self.src = src + + def __iter__(self) -> Iterator[str]: + """Iterate the required changes files, creating them, yielding the paths + of the newly created files, and deleting the original + """ + for path in self.files: + yield path + os.unlink(self.src) + + @cached_property + def distributions(self) -> str: + """Find and parse the `Distributions` header in the `changes` file""" + with open(self.src) as f: + line = f.readline() + while line: + if not line.startswith("Distribution:"): + line = f.readline() + continue + return line.split(":")[1].strip() + raise SigningError(f"Did not find Distribution field in changes file {self.src}") + + @property + def files(self) -> Iterator[str]: + """Create changes files for each distro, yielding the paths""" + for distro in self.distributions.split(): + yield self.changes_file(distro) + + def changes_file(self, distro: str) -> str: + """Create a `changes` file for a specific distro""" + target = self.changes_file_path(distro) + with open(target, "w") as df: + with open(self.src) as f: + df.write(f.read().replace(self.distributions, distro)) + return target + + def changes_file_path(self, distro: str) -> str: + """Path to write the new changes file to""" + return ".".join([os.path.splitext(self.src)[0], distro, "changes"]) + + +class DebSigningUtil(DirectorySigningUtil): + """Sign all `changes` packages in a given directory + + the `.changes` spec allows a single `.changes` file to have multiple `Distributions` listed. + + but, most package repos require a single signed `.change` file per distribution, with only one + distribution listed. + + this extracts the `.changes` files to -> per-distro `filename.distro.changes`, and removes + the original, before signing the files. + """ + + command_name = "debsign" + ext = "changes" + _package_type = "deb" + + @cached_property + def command_args(self) -> tuple: + return ("-k", self.maintainer.fingerprint) + + @property + def changes_files(self) -> Type[DebChangesFiles]: + return DebChangesFiles + + @cached_property + def pkg_files(self) -> tuple: + """Mangled .changes paths""" + return tuple(chain.from_iterable(self.changes_files(src) for src in super().pkg_files)) + + +# Setup + + +def _register_utils() -> None: + PackageSigningRunner.register_util("deb", DebSigningUtil) + PackageSigningRunner.register_util("rpm", RPMSigningUtil) + + +def main(*args) -> int: + _register_utils() + return PackageSigningRunner(*args).run() + + +if __name__ == "__main__": + sys.exit(main(*sys.argv[1:])) diff --git a/tools/distribution/tests/test_sign.py b/tools/distribution/tests/test_sign.py new file mode 100644 index 0000000000000..e2c80d8af5b36 --- /dev/null +++ b/tools/distribution/tests/test_sign.py @@ -0,0 +1,1014 @@ +import types +from unittest.mock import MagicMock, PropertyMock + +import pytest + +from tools.base import runner +from tools.distribution import sign +from tools.gpg import identity + + +# DirectorySigningUtil + +@pytest.mark.parametrize("command", ["", None, "COMMAND", "OTHERCOMMAND"]) +def test_util_constructor(command): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + args = ("PATH", maintainer, "LOG") + if command is not None: + args += (command, ) + util = sign.DirectorySigningUtil(*args) + assert util.path == "PATH" + assert util.maintainer == maintainer + assert util.log == "LOG" + assert util._command == (command or "") + assert util.command_args == () + + +@pytest.mark.parametrize("command_name", ["", None, "CMD", "OTHERCMD"]) +@pytest.mark.parametrize("command", ["", None, "COMMAND", "OTHERCOMMAND"]) +@pytest.mark.parametrize("which", ["", None, "PATH", "OTHERPATH"]) +def test_util_command(patches, command_name, command, which): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG", command=command) + patched = patches( + "shutil", + ("DirectorySigningUtil.package_type", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + if command_name is not None: + util.command_name = command_name + + with patched as (m_shutil, m_type): + m_shutil.which.return_value = which + + if not which and not command: + with pytest.raises(sign.SigningError) as e: + util.command + + assert ( + list(m_shutil.which.call_args) + == [(command_name,), {}]) + assert ( + e.value.args[0] + == f"Signing software missing ({m_type.return_value}): {command_name}") + return + + result = util.command + + assert "command" in util.__dict__ + assert not m_type.called + + if command: + assert not m_shutil.which.called + assert result == command + return + + assert ( + list(m_shutil.which.call_args) + == [(command_name,), {}]) + assert result == m_shutil.which.return_value + + +def test_util_sign(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + patched = patches( + "DirectorySigningUtil.sign_pkg", + ("DirectorySigningUtil.pkg_files", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_sign, m_pkgs): + m_pkgs.return_value = ("PKG1", "PKG2", "PKG3") + assert not util.sign() + + assert ( + list(list(c) for c in m_sign.call_args_list) + == [[('PKG1',), {}], + [('PKG2',), {}], + [('PKG3',), {}]]) + + +def test_util_sign_command(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + patched = patches( + ("DirectorySigningUtil.command", dict(new_callable=PropertyMock)), + ("DirectorySigningUtil.command_args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_command, m_args): + m_args.return_value = ("ARG1", "ARG2", "ARG3") + assert ( + util.sign_command("PACKAGE") + == (m_command.return_value, ) + m_args.return_value + ("PACKAGE", )) + + +@pytest.mark.parametrize("returncode", [0, 1]) +def test_util_sign_pkg(patches, returncode): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + patched = patches( + "os", + "subprocess", + "DirectorySigningUtil.sign_command", + ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), + ("DirectorySigningUtil.package_type", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + util.log = MagicMock() + + with patched as (m_os, m_subproc, m_command, m_log, m_type): + m_subproc.run.return_value.returncode = returncode + if returncode: + with pytest.raises(sign.SigningError) as e: + util.sign_pkg("PACKAGE") + else: + assert not util.sign_pkg("PACKAGE") + + assert ( + list(m_os.path.basename.call_args) + == [('PACKAGE',), {}]) + assert ( + list(util.log.notice.call_args) + == [(f"Sign package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) + assert ( + list(m_command.call_args) + == [('PACKAGE',), {}]) + assert ( + list(m_subproc.run.call_args) + == [(m_command.return_value,), + {'capture_output': True, + 'encoding': 'utf-8'}]) + + if not returncode: + assert ( + list(util.log.success.call_args) + == [(f"Signed package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) + return + assert e.value.args[0] == m_subproc.run.return_value.stdout + m_subproc.run.return_value.stderr + + +@pytest.mark.parametrize("ext", ["EXT1", "EXT2"]) +@pytest.mark.parametrize("package_type", [None, "", "TYPE1", "TYPE2"]) +def test_util_package_type(ext, package_type): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + util.ext = ext + util._package_type = package_type + assert util.package_type == package_type or ext + + +@pytest.mark.parametrize( + "files", + [[], + ["abc", "xyz"], + ["abc.EXT", "xyz.EXT", "abc.FOO", "abc.BAR"], + ["abc.NOTEXT", "xyz.NOTEXT"]]) +def test_util_pkg_files(patches, files): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + patched = patches( + "os", + ("DirectorySigningUtil.ext", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + with patched as (m_os, m_ext): + m_ext.return_value = "EXT" + m_os.listdir.return_value = files + result = util.pkg_files + + expected = [fname for fname in files if fname.endswith(".EXT")] + + assert ( + list(m_os.listdir.call_args) + == [("PATH",), {}]) + if not expected: + assert not m_os.path.join.called + assert not result + else: + assert ( + result + == tuple( + m_os.path.join.return_value + for fname in expected)) + assert ( + list(list(c) for c in m_os.path.join.call_args_list) + == [[("PATH", fname), {}] + for fname in expected]) + + assert "pkg_files" not in util.__dict__ + + +# PackageSigningRunner + +def test_packager_constructor(): + packager = sign.PackageSigningRunner("x", "y", "z") + assert isinstance(packager, runner.Runner) + assert packager.maintainer_class == identity.GPGIdentity + assert packager._signing_utils == () + + +def test_packager_cls_register_util(): + assert sign.PackageSigningRunner._signing_utils == () + + class Util1(object): + pass + + class Util2(object): + pass + + sign.PackageSigningRunner.register_util("util1", Util1) + assert ( + sign.PackageSigningRunner._signing_utils + == (('util1', Util1),)) + + sign.PackageSigningRunner.register_util("util2", Util2) + assert ( + sign.PackageSigningRunner._signing_utils + == (('util1', Util1), + ('util2', Util2),)) + + +def test_packager_extract(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.extract == m_args.return_value.extract + + assert "extract" not in packager.__dict__ + + +def test_packager_maintainer(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.maintainer_class", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.maintainer_email", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.maintainer_name", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_log, m_class, m_email, m_name): + assert packager.maintainer == m_class.return_value.return_value + + assert ( + list(m_class.return_value.call_args) + == [(m_name.return_value, m_email.return_value, m_log.return_value), {}]) + + assert "maintainer" in packager.__dict__ + + +def test_packager_maintainer_email(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.maintainer_email == m_args.return_value.maintainer_email + + assert "maintainer_email" not in packager.__dict__ + + +def test_packager_maintainer_name(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.maintainer_name == m_args.return_value.maintainer_name + + assert "maintainer_name" not in packager.__dict__ + + +def test_packager_package_type(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.package_type == m_args.return_value.package_type + + assert "package_type" not in packager.__dict__ + + +def test_packager_path(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.path == m_args.return_value.path + + assert "path" not in packager.__dict__ + + +def test_packager_tar(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_args, ): + assert packager.tar == m_args.return_value.tar + + assert "tar" not in packager.__dict__ + + +def test_packager_signing_utils(): + packager = sign.PackageSigningRunner("x", "y", "z") + _utils = (("NAME1", "UTIL1"), ("NAME2", "UTIL2")) + packager._signing_utils = _utils + assert packager.signing_utils == dict(_utils) + + +def test_packager_add_arguments(): + packager = sign.PackageSigningRunner("x", "y", "z") + parser = MagicMock() + packager.add_arguments(parser) + assert ( + list(list(c) for c in parser.add_argument.call_args_list) + == [[('--log-level', '-l'), + {'choices': ['debug', 'info', 'warn', 'error'], + 'default': 'info', + 'help': 'Log level to display'}], + [('path',), + {'default': '', + 'help': 'Path to the directory containing packages to sign'}], + [('--extract',), + {'action': 'store_true', + 'help': 'If set, treat the path as a tarball containing directories ' + 'according to package_type'}], + [('--tar',), + {'help': 'Path to save the signed packages as tar file'}], + [('--type',), + {'choices': ['util1', 'util2', ''], + 'default': '', + 'help': 'Package type to sign'}], + [('--maintainer-name',), + {'default': '', 'help': 'Maintainer name to match when searching for a GPG key to match with'}], + [('--maintainer-email',), + {'default': '', + 'help': 'Maintainer email to match when searching for a GPG key to match with'}]]) + + +def test_packager_archive(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "tarfile", + ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_tarfile, m_tar): + assert not packager.archive("PATH") + + assert ( + list(m_tarfile.open.call_args) + == [(m_tar.return_value, 'w'), {}]) + assert ( + list(m_tarfile.open.return_value.__enter__.return_value.add.call_args) + == [('PATH',), {'arcname': '.'}]) + + +def test_packager_get_signing_util(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_log, m_maintainer, m_utils): + assert packager.get_signing_util("UTIL", "PATH") == m_utils.return_value.__getitem__.return_value.return_value + + assert ( + list(m_utils.return_value.__getitem__.call_args) + == [("UTIL",), {}]) + assert ( + list(m_utils.return_value.__getitem__.return_value.call_args) + == [("PATH", m_maintainer.return_value, m_log.return_value), {}]) + + +@pytest.mark.parametrize("extract", [True, False]) +@pytest.mark.parametrize("raises", [None, Exception, identity.GPGError, sign.SigningError]) +def test_packager_run(patches, extract, raises): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "PackageSigningRunner.sign_tarball", + "PackageSigningRunner.sign_directory", + ("PackageSigningRunner.extract", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_tarb, m_dir, m_extract, m_log): + m_extract.return_value = extract + if raises: + _error = raises("AN ERROR OCCURRED") + m_extract.side_effect = _error + + if raises == Exception: + with pytest.raises(raises): + packager.run() + else: + assert packager.run() == (1 if raises else None) + + if raises: + assert not m_tarb.called + assert not m_dir.called + assert not m_log.return_value.success.called + + if raises == Exception: + return + assert ( + list(m_log.return_value.error.call_args) + == [(str(_error),), {}]) + return + + assert ( + list(m_log.return_value.success.call_args) + == [('Successfully signed packages',), {}]) + + if extract: + assert ( + list(m_tarb.call_args) + == [(), {}]) + assert not m_dir.called + return + assert not m_tarb.called + assert ( + list(m_dir.call_args) + == [(), {}]) + + +def test_packager_sign(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "PackageSigningRunner.get_signing_util", + ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_util, m_log, m_maintainer): + assert not packager.sign("PACKAGE_TYPE", "PATH") + + assert ( + list(m_log.return_value.notice.call_args) + == [(f"Signing PACKAGE_TYPEs ({m_maintainer.return_value}) PATH",), {}]) + assert ( + list(m_util.call_args) + == [('PACKAGE_TYPE', 'PATH'), {}]) + assert ( + list(m_util.return_value.sign.call_args) + == [(), {}]) + + +@pytest.mark.parametrize("utils", [[], ["a", "b", "c"]]) +@pytest.mark.parametrize("listdir", [[], ["a", "b"], ["b", "c"], ["c", "d"]]) +def test_packager_sign_all(patches, listdir, utils): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "os", + "PackageSigningRunner.sign", + ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_os, m_sign, m_utils): + m_os.listdir.return_value = listdir + m_utils.return_value = utils + assert not packager.sign_all("PATH") + assert ( + list(m_os.listdir.call_args) + == [('PATH',), {}]) + expected = [x for x in listdir if x in utils] + assert ( + list(list(c) for c in m_os.path.join.call_args_list) + == [[('PATH', k), {}] for k in expected]) + assert ( + list(list(c) for c in m_sign.call_args_list) + == [[(k, m_os.path.join.return_value), {}] for k in expected]) + + +@pytest.mark.parametrize("tar", [True, False]) +def test_packager_sign_directory(patches, tar): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "PackageSigningRunner.archive", + "PackageSigningRunner.sign", + ("PackageSigningRunner.package_type", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.path", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_archive, m_sign, m_type, m_path, m_tar): + m_tar.return_value = tar + assert not packager.sign_directory() + + assert ( + list(m_sign.call_args) + == [(m_type.return_value, m_path.return_value), {}]) + if not tar: + assert not m_archive.called + return + + assert ( + list(m_archive.call_args) + == [(m_path.return_value, ), {}]) + + +@pytest.mark.parametrize("tar", [True, False]) +def test_packager_sign_tarball(patches, tar): + packager = sign.PackageSigningRunner("x", "y", "z") + patched = patches( + "utils", + "PackageSigningRunner.archive", + "PackageSigningRunner.sign_all", + ("PackageSigningRunner.path", dict(new_callable=PropertyMock)), + ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_utils, m_archive, m_sign, m_path, m_tar): + m_tar.return_value = tar + if not tar: + with pytest.raises(sign.SigningError) as e: + packager.sign_tarball() + else: + assert not packager.sign_tarball() + + if not tar: + assert ( + e.value.args[0] + == 'You must set a `--tar` file to save to when `--extract` is set') + assert not m_utils.untar.called + assert not m_sign.called + assert not m_archive.called + return + + assert ( + list(m_utils.untar.call_args) + == [(m_path.return_value,), {}]) + assert ( + list(m_sign.call_args) + == [(m_utils.untar.return_value.__enter__.return_value,), {}]) + assert ( + list(m_archive.call_args) + == [(m_utils.untar.return_value.__enter__.return_value,), {}]) + + +# RPMMacro + +@pytest.mark.parametrize("overwrite", [[], None, True, False]) +@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) +def test_rpmmacro_constructor(patches, overwrite, kwargs): + rpmmacro = ( + sign.RPMMacro("HOME", overwrite=overwrite, **kwargs) + if overwrite != [] + else sign.RPMMacro("HOME", **kwargs)) + assert rpmmacro._macro_filename == ".rpmmacros" + assert rpmmacro.home == "HOME" + assert rpmmacro.overwrite == bool(overwrite or False) + assert rpmmacro.kwargs == kwargs + assert rpmmacro.template == sign.RPMMACRO_TEMPLATE + + +def test_rpmmacro_path(patches): + rpmmacro = sign.RPMMacro("HOME") + patched = patches( + "os", + prefix="tools.distribution.sign") + with patched as (m_os, ): + assert rpmmacro.path == m_os.path.join.return_value + + assert ( + list(m_os.path.join.call_args) + == [('HOME', rpmmacro._macro_filename), {}]) + + +@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) +def test_rpmmacro_macro(patches, kwargs): + rpmmacro = sign.RPMMacro("HOME", **kwargs) + patched = patches( + ("RPMMacro.template", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + with patched as (m_template, ): + result = rpmmacro.macro + + expected = m_template.return_value + for k, v in kwargs.items(): + assert ( + list(expected.replace.call_args) + == [(f"__{k.upper()}__", v), {}]) + expected = expected.replace.return_value + + assert result == expected + assert "macro" not in rpmmacro.__dict__ + + +@pytest.mark.parametrize("overwrite", [True, False]) +@pytest.mark.parametrize("exists", [True, False]) +def test_rpmmacro_write(patches, overwrite, exists): + rpmmacro = sign.RPMMacro("HOME") + patched = patches( + "open", + "os", + ("RPMMacro.macro", dict(new_callable=PropertyMock)), + ("RPMMacro.path", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + rpmmacro.overwrite = overwrite + + with patched as (m_open, m_os, m_macro, m_path): + m_os.path.exists.return_value = exists + assert not rpmmacro.write() + + if not overwrite: + assert ( + list(m_os.path.exists.call_args) + == [(m_path.return_value,), {}]) + else: + assert not m_os.path.join.called + assert not m_os.exists.join.called + + if not overwrite and exists: + assert not m_open.called + return + + assert ( + list(m_open.call_args) + == [(m_path.return_value, 'w'), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.write.call_args) + == [(m_macro.return_value,), {}]) + + +# RPMSigningUtil + +@pytest.mark.parametrize("args", [(), ("ARG1", "ARG2")]) +@pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) +def test_rpmsign_constructor(patches, args, kwargs): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + patched = patches( + "RPMSigningUtil.setup", + "DirectorySigningUtil.__init__", + prefix="tools.distribution.sign") + + with patched as (m_setup, m_super): + rpmsign = sign.RPMSigningUtil("PATH", maintainer, *args, **kwargs) + + assert isinstance(rpmsign, sign.DirectorySigningUtil) + assert rpmsign.ext == "rpm" + assert rpmsign.command_name == "rpmsign" + assert ( + list(m_setup.call_args) + == [(), {}]) + assert ( + list(m_super.call_args) + == [('PATH', maintainer) + args, kwargs]) + assert rpmsign.rpmmacro == sign.RPMMacro + + +@pytest.mark.parametrize("gpg2", [True, False]) +def test_rpmsign_command(patches, gpg2): + maintainer = identity.GPGIdentity() + patched = patches( + "os", + "RPMSigningUtil.__init__", + ("DirectorySigningUtil.command", dict(new_callable=PropertyMock)), + ("identity.GPGIdentity.gpg_bin", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_os, m_init, m_super, m_gpg): + m_os.path.basename.return_value = "gpg2" if gpg2 else "notgpg2" + m_init.return_value = None + rpmsign = sign.RPMSigningUtil("PATH", maintainer, "LOG") + rpmsign.maintainer = maintainer + + if gpg2: + assert rpmsign.command == m_super.return_value + else: + with pytest.raises(sign.SigningError) as e: + rpmsign.command + + assert ( + e.value.args[0] + == 'GPG2 is required to sign RPM packages') + + assert ( + list(m_os.path.basename.call_args) + == [(m_gpg.return_value,), {}]) + if gpg2: + assert "command" in rpmsign.__dict__ + else: + assert "command" not in rpmsign.__dict__ + + +def test_rpmsign_command_args(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + patched = patches( + "RPMSigningUtil.setup", + ("identity.GPGIdentity.fingerprint", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_setup, m_fingerprint): + rpmsign = sign.RPMSigningUtil("PATH", maintainer, "LOG") + assert ( + rpmsign.command_args + == ("--key-id", m_fingerprint.return_value, + "--addsign")) + + assert "command_args" in rpmsign.__dict__ + + +class DummyRPMSigningUtil(sign.RPMSigningUtil): + + def __init__(self, path, maintainer): + self.path = path + self.maintainer = maintainer + + +def test_rpmsign_setup(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = MagicMock() + + rpmsign = DummyRPMSigningUtil("PATH", maintainer) + + patched = patches( + ("RPMSigningUtil.rpmmacro", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_macro, ): + assert not rpmsign.setup() + + assert ( + list(m_macro.return_value.call_args) + == [(maintainer.home,), + {'maintainer': maintainer.name, + 'gpg_bin': maintainer.gpg_bin, + 'gpg_config': maintainer.gnupg_home}]) + + +def test_rpmsign_sign_pkg(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + rpmsign = DummyRPMSigningUtil("PATH", maintainer) + patched = patches( + "os", + "DirectorySigningUtil.sign_pkg", + prefix="tools.distribution.sign") + + with patched as (m_os, m_sign): + assert not rpmsign.sign_pkg("FILE") + + assert ( + list(m_os.chmod.call_args) + == [('FILE', 0o755), {}]) + assert ( + list(m_sign.call_args) + == [('FILE',), {}]) + + +# DebChangesFiles + +def test_changes_constructor(): + changes = sign.DebChangesFiles("SRC") + assert changes.src == "SRC" + + +def test_changes_dunder_iter(patches): + changes = sign.DebChangesFiles("SRC") + + patched = patches( + "os", + ("DebChangesFiles.files", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + _files = ["FILE1", "FILE2", "FILE3"] + + with patched as (m_os, m_files): + m_files.return_value = _files + result = changes.__iter__() + assert list(result) == _files + + assert isinstance(result, types.GeneratorType) + assert ( + list(m_os.unlink.call_args) + == [('SRC',), {}]) + + +@pytest.mark.parametrize( + "lines", + [([], None), + (["FOO", "BAR"], None), + (["FOO", "BAR", "Distribution: distro1"], "distro1"), + (["FOO", "BAR", "Distribution: distro1 distro2"], "distro1 distro2"), + (["FOO", "BAR", "Distribution: distro1 distro2", "BAZ"], "distro1 distro2"), + (["FOO", "BAR", "", "Distribution: distro1 distro2"], None)]) +def test_changes_distributions(patches, lines): + lines, expected = lines + changes = sign.DebChangesFiles("SRC") + patched = patches( + "open", + prefix="tools.distribution.sign") + + class DummyFile(object): + line = 0 + + def __init__(self, lines): + self.lines = lines + + def readline(self): + if len(self.lines) > self.line: + line = self.lines[self.line] + self.line += 1 + return line + + _file = DummyFile(lines) + + with patched as (m_open, ): + m_open.return_value.__enter__.return_value.readline.side_effect = _file.readline + if expected: + assert changes.distributions == expected + else: + with pytest.raises(sign.SigningError) as e: + changes.distributions + assert ( + e.value.args[0] + == "Did not find Distribution field in changes file SRC") + + if "" in lines: + lines = lines[:lines.index("")] + + if expected: + breakon = 0 + for line in lines: + if line.startswith("Distribution:"): + break + breakon += 1 + lines = lines[:breakon] + count = len(lines) + 1 + assert ( + list(list(c) for c in m_open.return_value.__enter__.return_value.readline.call_args_list) + == [[(), {}]] * count) + + +def test_changes_files(patches): + changes = sign.DebChangesFiles("SRC") + + patched = patches( + "DebChangesFiles.changes_file", + ("DebChangesFiles.distributions", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_changes, m_distros): + m_distros.return_value = "DISTRO1 DISTRO2 DISTRO3" + result = changes.files + assert list(result) == [m_changes.return_value] * 3 + + assert isinstance(result, types.GeneratorType) + assert ( + list(list(c) for c in m_changes.call_args_list) + == [[('DISTRO1',), {}], + [('DISTRO2',), {}], + [('DISTRO3',), {}]]) + + +def test_changes_changes_file(patches): + changes = sign.DebChangesFiles("SRC") + patched = patches( + "open", + "DebChangesFiles.changes_file_path", + ("DebChangesFiles.distributions", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_open, m_path, m_distros): + assert ( + changes.changes_file("DISTRO") + == m_path.return_value) + + assert ( + list(m_path.call_args) + == [('DISTRO',), {}]) + assert ( + list(list(c) for c in m_open.call_args_list) + == [[(m_path.return_value, 'w'), {}], + [('SRC',), {}]]) + assert ( + list(m_open.return_value.__enter__.return_value.write.call_args) + == [(m_open.return_value.__enter__.return_value.read.return_value.replace.return_value,), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.call_args) + == [(), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.return_value.replace.call_args) + == [(m_distros.return_value, 'DISTRO'), {}]) + + +@pytest.mark.parametrize( + "path", + [("SRC", "SRC.DISTRO.changes"), + ("SRC.changes", "SRC.DISTRO.changes"), + ("SRC.FOO.BAR.changes", "SRC.FOO.BAR.DISTRO.changes")]) +def test_changes_file_path(path): + path, expected = path + changes = sign.DebChangesFiles(path) + assert changes.changes_file_path("DISTRO") == expected + + +# DebSigningUtil + +@pytest.mark.parametrize("args", [(), ("ARG1", ), ("ARG2", )]) +def test_debsign_constructor(patches, args): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + debsign = sign.DebSigningUtil("PATH", maintainer, "LOG", *args) + + assert isinstance(debsign, sign.DirectorySigningUtil) + assert debsign.ext == "changes" + assert debsign.command_name == "debsign" + assert debsign._package_type == "deb" + assert debsign.changes_files == sign.DebChangesFiles + assert debsign.path == "PATH" + assert debsign.maintainer == maintainer + assert debsign.log == "LOG" + + +def test_debsign_command_args(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + patched = patches( + ("identity.GPGIdentity.fingerprint", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_fingerprint, ): + debsign = sign.DebSigningUtil("PATH", maintainer, "LOG") + assert ( + debsign.command_args + == ("-k", m_fingerprint.return_value)) + + assert "command_args" in debsign.__dict__ + + +def test_debsign_pkg_files(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + debsign = sign.DebSigningUtil("PATH", maintainer, "LOG") + patched = patches( + "chain", + ("DirectorySigningUtil.pkg_files", dict(new_callable=PropertyMock)), + ("DebSigningUtil.changes_files", dict(new_callable=PropertyMock)), + prefix="tools.distribution.sign") + + with patched as (m_chain, m_pkg, m_changes): + m_pkg.return_value = ("FILE1", "FILE2", "FILE3") + m_chain.from_iterable.side_effect = lambda _iter: list(_iter) + assert ( + debsign.pkg_files + == (m_changes.return_value.return_value, ) * 3) + + assert m_chain.from_iterable.called + assert ( + list(list(c) for c in m_changes.return_value.call_args_list) + == [[('FILE1',), {}], [('FILE2',), {}], [('FILE3',), {}]]) + + +# Module + +def test_sign_main(patches, command_main): + patched = patches( + "_register_utils", + prefix="tools.distribution.sign") + + with patched as (m_reg, ): + command_main( + sign.main, + "tools.distribution.sign.PackageSigningRunner") + + assert ( + list(m_reg.call_args) + == [(), {}]) + + +def test_sign_register_utils(patches, command_main): + patched = patches( + "PackageSigningRunner.register_util", + prefix="tools.distribution.sign") + + with patched as (m_reg, ): + sign._register_utils() + + assert ( + list(list(c) for c in m_reg.call_args_list) + == [[('deb', sign.DebSigningUtil), {}], + [('rpm', sign.RPMSigningUtil), {}]]) diff --git a/tools/gpg/identity.py b/tools/gpg/identity.py index 179adece3874e..9d8a51aa278ce 100644 --- a/tools/gpg/identity.py +++ b/tools/gpg/identity.py @@ -1,6 +1,7 @@ import logging import os import pwd +import shutil from functools import cached_property from email.utils import formataddr, parseaddr from typing import Optional @@ -45,6 +46,10 @@ def fingerprint(self) -> str: def gpg(self) -> gnupg.GPG: return gnupg.GPG() + @cached_property + def gpg_bin(self) -> str: + return shutil.which("gpg2") or shutil.which("gpg") + @property def gnupg_home(self) -> str: return os.path.join(self.home, ".gnupg") diff --git a/tools/gpg/tests/test_identity.py b/tools/gpg/tests/test_identity.py index d8ccc8280207a..7e191d929cf0a 100644 --- a/tools/gpg/tests/test_identity.py +++ b/tools/gpg/tests/test_identity.py @@ -94,6 +94,34 @@ def test_identity_gnupg_home(patches): assert "gnupg_home" not in gpg.__dict__ +@pytest.mark.parametrize("gpg", [None, "GPG"]) +@pytest.mark.parametrize("gpg2", [None, "GPG2"]) +def test_identity_gpg_bin(patches, gpg, gpg2): + gpg = identity.GPGIdentity() + patched = patches( + "shutil", + prefix="tools.gpg.identity") + + def _get_bin(_cmd): + if _cmd == "gpg2" and gpg2: + return gpg2 + if _cmd == "gpg" and gpg: + return gpg + + with patched as (m_shutil, ): + m_shutil.which.side_effect = _get_bin + assert gpg.gpg_bin == gpg2 or gpg + + if gpg2: + assert ( + list(list(c) for c in m_shutil.which.call_args_list) + == [[('gpg2',), {}]]) + return + assert ( + list(list(c) for c in m_shutil.which.call_args_list) + == [[('gpg2',), {}], [('gpg',), {}]]) + + def test_identity_home(patches): gpg = identity.GPGIdentity() patched = patches( From 463eda2f3895fbb24c1fcd085a15d00d12a94079 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Wed, 21 Jul 2021 13:16:05 -0400 Subject: [PATCH 25/57] config: extending usage of EnvoyMobileHttpConnectionManager (#17407) #17110 the EnvoyMobileHttpConnectionManager but it turns out it's instantiated either as a standard network filter (needs a name) or as an API listener (which only accepted HttpConnectionManager) Fixing both issues. Risk Level: low Testing: unit tests Docs Changes: n/a Release Notes: n/a Part of envoyproxy/envoy-mobile#1540 Signed-off-by: Alyssa Wilk --- .../config/listener/v3/api_listener.proto | 1 + .../listener/v4alpha/api_listener.proto | 1 + .../v3/http_connection_manager.proto | 1 + .../v4alpha/http_connection_manager.proto | 1 + .../config/listener/v3/api_listener.proto | 1 + .../listener/v4alpha/api_listener.proto | 1 + .../v3/http_connection_manager.proto | 1 + .../v4alpha/http_connection_manager.proto | 1 + source/extensions/extensions_build_config.bzl | 44 +++++++++---------- source/extensions/extensions_metadata.yaml | 5 +++ .../network/http_connection_manager/config.cc | 9 +++- .../network/http_connection_manager/config.h | 3 +- .../filters/network/well_known_names.h | 3 ++ source/server/api_listener_impl.cc | 18 ++++++-- .../envoy_mobile_http_connection_manager_1 | 21 +++++++++ .../common/fuzz/uber_per_readfilter.cc | 18 +++++++- test/server/api_listener_test.cc | 35 +++++++++++++++ tools/extensions/extensions_check.py | 5 ++- tools/protodoc/protodoc.py | 4 +- 19 files changed, 141 insertions(+), 32 deletions(-) create mode 100644 test/extensions/filters/network/common/fuzz/network_readfilter_corpus/envoy_mobile_http_connection_manager_1 diff --git a/api/envoy/config/listener/v3/api_listener.proto b/api/envoy/config/listener/v3/api_listener.proto index 1dc94edc74b9c..77db7caaff5c0 100644 --- a/api/envoy/config/listener/v3/api_listener.proto +++ b/api/envoy/config/listener/v3/api_listener.proto @@ -23,6 +23,7 @@ message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, diff --git a/api/envoy/config/listener/v4alpha/api_listener.proto b/api/envoy/config/listener/v4alpha/api_listener.proto index e78cf7e7c81bc..518caf879ad5e 100644 --- a/api/envoy/config/listener/v4alpha/api_listener.proto +++ b/api/envoy/config/listener/v4alpha/api_listener.proto @@ -23,6 +23,7 @@ message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 48d4ce4ee0641..959906d880cd1 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -1008,6 +1008,7 @@ message RequestIDExtension { // [#protodoc-title: Envoy Mobile HTTP connection manager] // HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] message EnvoyMobileHttpConnectionManager { // The configuration for the underlying HttpConnectionManager which will be // instantiated for Envoy mobile. diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 307d4f2055e20..bf3cc9ef34a49 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -993,6 +993,7 @@ message RequestIDExtension { // [#protodoc-title: Envoy Mobile HTTP connection manager] // HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] message EnvoyMobileHttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3." diff --git a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto index 1dc94edc74b9c..77db7caaff5c0 100644 --- a/generated_api_shadow/envoy/config/listener/v3/api_listener.proto +++ b/generated_api_shadow/envoy/config/listener/v3/api_listener.proto @@ -23,6 +23,7 @@ message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, diff --git a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto index e78cf7e7c81bc..518caf879ad5e 100644 --- a/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto +++ b/generated_api_shadow/envoy/config/listener/v4alpha/api_listener.proto @@ -23,6 +23,7 @@ message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager (HTTP) + // envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index ae058212a7c3e..533340aaf194b 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -1020,6 +1020,7 @@ message RequestIDExtension { // [#protodoc-title: Envoy Mobile HTTP connection manager] // HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] message EnvoyMobileHttpConnectionManager { // The configuration for the underlying HttpConnectionManager which will be // instantiated for Envoy mobile. diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 6dc2d7dc88b01..8d2799e750d61 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -1017,6 +1017,7 @@ message RequestIDExtension { // [#protodoc-title: Envoy Mobile HTTP connection manager] // HTTP connection manager for use in Envoy mobile. +// [#extension: envoy.filters.network.envoy_mobile_http_connection_manager] message EnvoyMobileHttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3." diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index 8ee2ef00e95e7..7ea4c9c24e1f0 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -125,28 +125,28 @@ EXTENSIONS = { # Network filters # - "envoy.filters.network.client_ssl_auth": "//source/extensions/filters/network/client_ssl_auth:config", - "envoy.filters.network.connection_limit": "//source/extensions/filters/network/connection_limit:config", - "envoy.filters.network.direct_response": "//source/extensions/filters/network/direct_response:config", - "envoy.filters.network.dubbo_proxy": "//source/extensions/filters/network/dubbo_proxy:config", - "envoy.filters.network.echo": "//source/extensions/filters/network/echo:config", - "envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config", - "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", - "envoy.filters.network.kafka_broker": "//source/extensions/filters/network/kafka:kafka_broker_config_lib", - "envoy.filters.network.local_ratelimit": "//source/extensions/filters/network/local_ratelimit:config", - "envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config", - "envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config", - "envoy.filters.network.postgres_proxy": "//source/extensions/filters/network/postgres_proxy:config", - "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", - "envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config", - "envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config", - "envoy.filters.network.rocketmq_proxy": "//source/extensions/filters/network/rocketmq_proxy:config", - "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", - "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", - "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", - "envoy.filters.network.sni_dynamic_forward_proxy": "//source/extensions/filters/network/sni_dynamic_forward_proxy:config", - "envoy.filters.network.wasm": "//source/extensions/filters/network/wasm:config", - "envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", + "envoy.filters.network.client_ssl_auth": "//source/extensions/filters/network/client_ssl_auth:config", + "envoy.filters.network.connection_limit": "//source/extensions/filters/network/connection_limit:config", + "envoy.filters.network.direct_response": "//source/extensions/filters/network/direct_response:config", + "envoy.filters.network.dubbo_proxy": "//source/extensions/filters/network/dubbo_proxy:config", + "envoy.filters.network.echo": "//source/extensions/filters/network/echo:config", + "envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config", + "envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config", + "envoy.filters.network.kafka_broker": "//source/extensions/filters/network/kafka:kafka_broker_config_lib", + "envoy.filters.network.local_ratelimit": "//source/extensions/filters/network/local_ratelimit:config", + "envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config", + "envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config", + "envoy.filters.network.postgres_proxy": "//source/extensions/filters/network/postgres_proxy:config", + "envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config", + "envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config", + "envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config", + "envoy.filters.network.rocketmq_proxy": "//source/extensions/filters/network/rocketmq_proxy:config", + "envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config", + "envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config", + "envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config", + "envoy.filters.network.sni_dynamic_forward_proxy": "//source/extensions/filters/network/sni_dynamic_forward_proxy:config", + "envoy.filters.network.wasm": "//source/extensions/filters/network/wasm:config", + "envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config", # # UDP filters diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index 31cdfad79124c..5dda2861d1292 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -338,6 +338,11 @@ envoy.filters.network.http_connection_manager: - envoy.filters.network security_posture: robust_to_untrusted_downstream status: stable +envoy.filters.network.envoy_mobile_http_connection_manager: + categories: + - envoy.filters.network + security_posture: robust_to_untrusted_downstream + status: stable envoy.filters.network.kafka_broker: categories: - envoy.filters.network diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 4d9c4128fc8ff..4e5b9c4757d78 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -818,7 +818,8 @@ std::function HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& proto_config, - Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks) { + Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks, + bool clear_hop_by_hop_headers) { Utility::Singletons singletons = Utility::createSingletons(context); @@ -831,11 +832,15 @@ HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( // reference count. // Keep in mind the lambda capture list **doesn't** determine the destruction order, but it's fine // as these captured objects are also global singletons. - return [singletons, filter_config, &context, &read_callbacks]() -> Http::ApiListenerPtr { + return [singletons, filter_config, &context, &read_callbacks, + clear_hop_by_hop_headers]() -> Http::ApiListenerPtr { auto conn_manager = std::make_unique( *filter_config, context.drainDecision(), context.api().randomGenerator(), context.httpContext(), context.runtime(), context.localInfo(), context.clusterManager(), context.overloadManager(), context.dispatcher().timeSource()); + if (!clear_hop_by_hop_headers) { + conn_manager->setClearHopByHopResponseHeaders(false); + } // This factory creates a new ConnectionManagerImpl in the absence of its usual environment as // an L4 filter, so this factory needs to take a few actions. diff --git a/source/extensions/filters/network/http_connection_manager/config.h b/source/extensions/filters/network/http_connection_manager/config.h index b23fd8194c49f..4cf54d0618a8d 100644 --- a/source/extensions/filters/network/http_connection_manager/config.h +++ b/source/extensions/filters/network/http_connection_manager/config.h @@ -323,7 +323,8 @@ class HttpConnectionManagerFactory { static std::function createHttpConnectionManagerFactoryFromProto( const envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& proto_config, - Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks); + Server::Configuration::FactoryContext& context, Network::ReadFilterCallbacks& read_callbacks, + bool clear_hop_by_hop_headers); }; /** diff --git a/source/extensions/filters/network/well_known_names.h b/source/extensions/filters/network/well_known_names.h index 2bfddcf7d0922..dc2027c542c74 100644 --- a/source/extensions/filters/network/well_known_names.h +++ b/source/extensions/filters/network/well_known_names.h @@ -24,6 +24,9 @@ class NetworkFilterNameValues { const std::string RocketmqProxy = "envoy.filters.network.rocketmq_proxy"; // Dubbo proxy filter const std::string DubboProxy = "envoy.filters.network.dubbo_proxy"; + // Envoy mobile http connection manager. + const std::string EnvoyMobileHttpConnectionManager = + "envoy.filters.network.http_connection_manager"; // HTTP connection manager filter const std::string HttpConnectionManager = "envoy.filters.network.http_connection_manager"; // Local rate limit filter diff --git a/source/server/api_listener_impl.cc b/source/server/api_listener_impl.cc index 7005ec11d0f25..e9963392cca5c 100644 --- a/source/server/api_listener_impl.cc +++ b/source/server/api_listener_impl.cc @@ -32,13 +32,25 @@ void ApiListenerImplBase::SyntheticReadCallbacks::SyntheticConnection::raiseConn HttpApiListener::HttpApiListener(const envoy::config::listener::v3::Listener& config, ListenerManagerImpl& parent, const std::string& name) : ApiListenerImplBase(config, parent, name) { + TRY_ASSERT_MAIN_THREAD auto typed_config = MessageUtil::anyConvertAndValidate< - envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( - config.api_listener().api_listener(), factory_context_.messageValidationVisitor()); + envoy::extensions::filters::network::http_connection_manager::v3:: + EnvoyMobileHttpConnectionManager>(config.api_listener().api_listener(), + factory_context_.messageValidationVisitor()); http_connection_manager_factory_ = Envoy::Extensions::NetworkFilters::HttpConnectionManager:: HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( - typed_config, factory_context_, read_callbacks_); + typed_config.config(), factory_context_, read_callbacks_, false); + END_TRY + catch (const EnvoyException& e) { + auto typed_config = MessageUtil::anyConvertAndValidate< + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( + config.api_listener().api_listener(), factory_context_.messageValidationVisitor()); + + http_connection_manager_factory_ = Envoy::Extensions::NetworkFilters::HttpConnectionManager:: + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + typed_config, factory_context_, read_callbacks_, true); + } } Http::ApiListenerOptRef HttpApiListener::http() { diff --git a/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/envoy_mobile_http_connection_manager_1 b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/envoy_mobile_http_connection_manager_1 new file mode 100644 index 0000000000000..45e88160b44c4 --- /dev/null +++ b/test/extensions/filters/network/common/fuzz/network_readfilter_corpus/envoy_mobile_http_connection_manager_1 @@ -0,0 +1,21 @@ +config { + name: "envoy.filters.network.http_connection_manager" + typed_config { + type_url: "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager" + value: "\022\002B\001\"\000J\004(\001J\000z\002\010\001\220\001\001" + } +} +actions { + on_data { + data: "y" + } +} +actions { + on_new_connection { + } +} +actions { + advance_time { + milliseconds: 655360 + } +} diff --git a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc index d2cea6371550a..7c70fa1bd44e4 100644 --- a/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc +++ b/test/extensions/filters/network/common/fuzz/uber_per_readfilter.cc @@ -26,6 +26,7 @@ std::vector UberFilterFuzzer::filterNames() { Server::Configuration::NamedNetworkFilterConfigFactory>::factories(); const std::vector supported_filter_names = { NetworkFilterNames::get().ClientSslAuth, NetworkFilterNames::get().ExtAuthorization, + NetworkFilterNames::get().EnvoyMobileHttpConnectionManager, // A dedicated http_connection_manager fuzzer can be found in // test/common/http/conn_manager_impl_fuzz_test.cc NetworkFilterNames::get().HttpConnectionManager, NetworkFilterNames::get().LocalRateLimit, @@ -80,7 +81,8 @@ void UberFilterFuzzer::perFilterSetup(const std::string& filter_name) { pipe_addr_); read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setRemoteAddress( pipe_addr_); - } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager) { + } else if (filter_name == NetworkFilterNames::get().HttpConnectionManager || + filter_name == NetworkFilterNames::get().EnvoyMobileHttpConnectionManager) { read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setLocalAddress( pipe_addr_); read_filter_callbacks_->connection_.stream_info_.downstream_address_provider_->setRemoteAddress( @@ -152,6 +154,20 @@ void UberFilterFuzzer::checkInvalidInputForFuzzer(const std::string& filter_name "http_conn_manager trying to use Quiche which we won't fuzz here. Config:\n{}", config.DebugString())); } + } else if (filter_name == NetworkFilterNames::get().EnvoyMobileHttpConnectionManager) { + envoy::extensions::filters::network::http_connection_manager::v3:: + EnvoyMobileHttpConnectionManager& config = + dynamic_cast(*config_message); + if (config.config().codec_type() == + envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager:: + HTTP3) { + // Quiche is still in progress and http_conn_manager has a dedicated fuzzer. + // So we won't fuzz it here with complex mocks. + throw EnvoyException(absl::StrCat("envoy_mobile_http_conn_manager trying to use Quiche which " + "we won't fuzz here. Config:\n{}", + config.DebugString())); + } } } diff --git a/test/server/api_listener_test.cc b/test/server/api_listener_test.cc index 4572e40a1ce6f..f1c269a8f8c17 100644 --- a/test/server/api_listener_test.cc +++ b/test/server/api_listener_test.cc @@ -65,6 +65,41 @@ name: test_api_listener ASSERT_TRUE(http_api_listener.http().has_value()); } +TEST_F(ApiListenerTest, MobileApiListener) { + const std::string yaml = R"EOF( +name: test_api_listener +address: + socket_address: + address: 127.0.0.1 + port_value: 1234 +api_listener: + api_listener: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.EnvoyMobileHttpConnectionManager + config: + stat_prefix: hcm + route_config: + name: api_router + virtual_hosts: + - name: api + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: dynamic_forward_proxy_cluster + )EOF"; + + const envoy::config::listener::v3::Listener config = parseListenerFromV3Yaml(yaml); + server_.server_factory_context_->cluster_manager_.initializeClusters( + {"dynamic_forward_proxy_cluster"}, {}); + auto http_api_listener = HttpApiListener(config, *listener_manager_, config.name()); + + ASSERT_EQ("test_api_listener", http_api_listener.name()); + ASSERT_EQ(ApiListener::Type::HttpApiListener, http_api_listener.type()); + ASSERT_TRUE(http_api_listener.http().has_value()); +} + TEST_F(ApiListenerTest, HttpApiListenerThrowsWithBadConfig) { const std::string yaml = R"EOF( name: test_api_listener diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index b3e0b6865c608..7f12c3e3de46f 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -134,7 +134,10 @@ def check_registered(self) -> None: missing_metadata = self.all_extensions - set(self.metadata.keys()) for extension in only_metadata: - self.error("registered", [f"Metadata for unused extension found: {extension}"]) + # Skip envoy_mobile_http_connection_manager as it is built with + # http_connection_manager + if extension != "envoy.filters.network.envoy_mobile_http_connection_manager": + self.error("registered", [f"Metadata for unused extension found: {extension}"]) for extension in missing_metadata: self.error("registered", [f"Metadata missing for extension: {extension}"]) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 2edcd91031491..2e6c5296bb1be 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -247,8 +247,8 @@ def format_extension(extension): categories = extension_metadata["categories"] except KeyError as e: sys.stderr.write( - f"\n\nDid you forget to add '{extension}' to source/extensions/extensions_build_config.bzl?\n\n" - ) + f"\n\nDid you forget to add '{extension}' to source/extensions/extensions_build_config.bzl " + "or source/extensions/extensions_metadata.yaml?\n\n") exit(1) # Raising the error buries the above message in tracebacks. return EXTENSION_TEMPLATE.render( From 271e67488b57bb600a1352c35aa9a7b8f1158441 Mon Sep 17 00:00:00 2001 From: Greg Brail Date: Wed, 21 Jul 2021 19:20:26 -0700 Subject: [PATCH 26/57] ext_proc: Implement STREAMED body processing mode (#17069) Implement this mode, which allows an external processor to receive the body one chunk at a time and examine or modify the body while it is being processed. Risk Level: Medium. The mode is only engaged if used. However an external processor must be coded carefully, particularly if it expects to stream both request and response bodies simultaneously, since each will proceed in its own ordering. Testing: New unit and integration tests. Docs Changes: Updated proto docs to reflect that the mode is now supported. Release Notes: External processing servers may now use the STREAMED processing mode. In this mode, chunks of the body are forwarded to the external processing server when they arrive. Depending on how the upstream system is implemented, request body chunks may be delivered before or after the response headers, and request and response body chunks may be interleaved if the upstream system delivers them that way. An external processor should be carefully coded so that it does not assume that a particular ordering will be implemented. Signed-off-by: Gregory Brail --- .../http/ext_proc/v3alpha/ext_proc.proto | 9 +- .../http/ext_proc/v3alpha/ext_proc.proto | 9 +- .../filters/http/ext_proc/ext_proc.cc | 108 +++- .../filters/http/ext_proc/ext_proc.h | 11 +- .../filters/http/ext_proc/processor_state.cc | 156 +++++- .../filters/http/ext_proc/processor_state.h | 68 ++- test/extensions/filters/http/ext_proc/BUILD | 2 + .../filters/http/ext_proc/filter_test.cc | 429 +++++++++++++++- .../filters/http/ext_proc/ordering_test.cc | 36 ++ .../ext_proc/streaming_integration_test.cc | 477 +++++++++++++++--- .../filters/http/ext_proc/test_processor.cc | 14 + .../filters/http/ext_proc/test_processor.h | 22 +- test/extensions/filters/http/ext_proc/utils.h | 10 + 13 files changed, 1206 insertions(+), 145 deletions(-) diff --git a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto index 76fa69198dfe4..352403ad5b20d 100644 --- a/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/api/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto @@ -26,18 +26,19 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The filter will send the "request_headers" and "response_headers" messages by default. // In addition, if the "processing mode" is set , the "request_body" and "response_body" // messages will be sent if the corresponding fields of the "processing_mode" are -// set to BUFFERED, and trailers will be sent if the corresponding fields are set -// to SEND. The other body processing modes are not +// set to BUFFERED or STREAMED, and trailers will be sent if the corresponding fields are set +// to SEND. The BUFFERED_PARTIAL body processing mode is not // implemented yet. The filter will also respond to "immediate_response" messages // at any point in the stream. // As designed, the filter supports up to six different processing steps, which are in the // process of being implemented: +// // * Request headers: IMPLEMENTED -// * Request body: Only BUFFERED mode is implemented +// * Request body: BUFFERED_PARTIAL processing mode is not yet implemented // * Request trailers: IMPLEMENTED // * Response headers: IMPLEMENTED -// * Response body: Only BUFFERED mode is implemented +// * Response body: BUFFERED_PARTIAL processing mode is not yet implemented // * Response trailers: IMPLEMENTED // The filter communicates with an external gRPC service that can use it to do a variety of things diff --git a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto index 76fa69198dfe4..352403ad5b20d 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.proto @@ -26,18 +26,19 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // The filter will send the "request_headers" and "response_headers" messages by default. // In addition, if the "processing mode" is set , the "request_body" and "response_body" // messages will be sent if the corresponding fields of the "processing_mode" are -// set to BUFFERED, and trailers will be sent if the corresponding fields are set -// to SEND. The other body processing modes are not +// set to BUFFERED or STREAMED, and trailers will be sent if the corresponding fields are set +// to SEND. The BUFFERED_PARTIAL body processing mode is not // implemented yet. The filter will also respond to "immediate_response" messages // at any point in the stream. // As designed, the filter supports up to six different processing steps, which are in the // process of being implemented: +// // * Request headers: IMPLEMENTED -// * Request body: Only BUFFERED mode is implemented +// * Request body: BUFFERED_PARTIAL processing mode is not yet implemented // * Request trailers: IMPLEMENTED // * Response headers: IMPLEMENTED -// * Response body: Only BUFFERED mode is implemented +// * Response body: BUFFERED_PARTIAL processing mode is not yet implemented // * Response trailers: IMPLEMENTED // The filter communicates with an external gRPC service that can use it to do a variety of things diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index 2e55d836ac4fc..05e7efd5eac8e 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -82,6 +82,7 @@ FilterHeadersStatus Filter::onHeaders(ProcessorState& state, ENVOY_LOG(debug, "Sending headers message"); stream_->send(std::move(req), false); stats_.stream_msgs_sent_.inc(); + state.setPaused(true); return FilterHeadersStatus::StopIteration; } @@ -105,18 +106,15 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b if (end_stream) { state.setCompleteBodyAvailable(true); } - if (state.bodyReplaced()) { ENVOY_LOG(trace, "Clearing body chunk because CONTINUE_AND_REPLACE was returned"); data.drain(data.length()); return FilterDataStatus::Continue; } - if (processing_complete_) { ENVOY_LOG(trace, "Continuing (processing complete)"); return FilterDataStatus::Continue; } - bool just_added_trailers = false; Http::HeaderMap* new_trailers = nullptr; if (end_stream && state.sendTrailers()) { @@ -129,19 +127,36 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b state.setTrailersAvailable(true); just_added_trailers = true; } - if (state.callbackState() == ProcessorState::CallbackState::HeadersCallback) { ENVOY_LOG(trace, "Header processing still in progress -- holding body data"); // We don't know what to do with the body until the response comes back. // We must buffer it in case we need it when that happens. if (end_stream) { + state.setPaused(true); return FilterDataStatus::StopIterationAndBuffer; } else { // Raise a watermark to prevent a buffer overflow until the response comes back. + state.setPaused(true); state.requestWatermark(); return FilterDataStatus::StopIterationAndWatermark; } } + if (state.callbackState() == ProcessorState::CallbackState::StreamedBodyCallbackFinishing) { + // We were previously streaming the body, but there are more chunks waiting + // to be processed, so we can't send the body yet. + // Move the data for our chunk into a queue so that we can re-inject it later + // when the processor returns. See the comments below for more details on how + // this works in general. + ENVOY_LOG(trace, "Enqueuing data while we wait for processing to finish"); + state.enqueueStreamingChunk(data, end_stream, false); + if (end_stream) { + // But we need to buffer the last chunk because it's our last chance to do stuff + state.setPaused(true); + return FilterDataStatus::StopIterationNoBuffer; + } else { + return FilterDataStatus::Continue; + } + } FilterDataStatus result; switch (state.bodyMode()) { @@ -160,19 +175,60 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b // The body has been buffered and we need to send the buffer ENVOY_LOG(debug, "Sending request body message"); state.addBufferedData(data); - sendBodyChunk(state, *state.bufferedData(), true); + sendBodyChunk(state, *state.bufferedData(), + ProcessorState::CallbackState::BufferedBodyCallback, true); // Since we just just moved the data into the buffer, return NoBuffer // so that we do not buffer this chunk twice. + state.setPaused(true); result = FilterDataStatus::StopIterationNoBuffer; break; } - ENVOY_LOG(trace, "onData: Buffering"); + state.setPaused(true); result = FilterDataStatus::StopIterationAndBuffer; break; + case ProcessingMode::STREAMED: { + // STREAMED body mode works as follows: + // + // 1) As data callbacks come in to the filter, it "moves" the data into a new buffer, which it + // dispatches via gRPC message to the external processor, and then keeps in a queue. It + // may request a watermark if the queue is higher than the buffer limit to prevent running + // out of memory. + // 2) As a result, filters farther down the chain see empty buffers in some data callbacks. + // 3) When a response comes back from the external processor, it injects the processor's result + // into the filter chain using "inject**codedData". (The processor may respond indicating that + // there is no change, which means that the original buffer stored in the queue is what gets + // injected.) + // + // This way, we pipeline data from the proxy to the external processor, and give the processor + // the ability to modify each chunk, in order. Doing this any other way would have required + // substantial changes to the filter manager. See + // https://github.com/envoyproxy/envoy/issues/16760 for a discussion. + switch (openStream()) { + case StreamOpenState::Error: + return FilterDataStatus::StopIterationNoBuffer; + case StreamOpenState::IgnoreError: + return FilterDataStatus::Continue; + case StreamOpenState::Ok: + // Fall through + break; + } + // Send the chunk on the gRPC stream + sendBodyChunk(state, data, ProcessorState::CallbackState::StreamedBodyCallback, end_stream); + // Move the data to the queue and optionally raise the watermark. + state.enqueueStreamingChunk(data, end_stream, true); + // At this point we will continue, but with no data, because that will come later + if (end_stream) { + // But we need to buffer the last chunk because it's our last chance to do stuff + state.setPaused(true); + result = FilterDataStatus::StopIterationNoBuffer; + } else { + result = FilterDataStatus::Continue; + } + break; + } case ProcessingMode::BUFFERED_PARTIAL: - case ProcessingMode::STREAMED: ENVOY_LOG(debug, "Ignoring unimplemented request body processing mode"); result = FilterDataStatus::Continue; break; @@ -181,7 +237,6 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b result = FilterDataStatus::Continue; break; } - if (just_added_trailers) { // If we get here, then we need to send the trailers message now switch (openStream()) { @@ -193,8 +248,8 @@ FilterDataStatus Filter::onData(ProcessorState& state, Buffer::Instance& data, b // Fall through break; } - sendTrailers(state, *new_trailers); + state.setPaused(true); return FilterDataStatus::StopIterationAndBuffer; } return result; @@ -221,6 +276,7 @@ FilterTrailersStatus Filter::onTrailers(ProcessorState& state, Http::HeaderMap& if (state.callbackState() == ProcessorState::CallbackState::HeadersCallback || state.callbackState() == ProcessorState::CallbackState::BufferedBodyCallback) { ENVOY_LOG(trace, "Previous callback still executing -- holding header iteration"); + state.setPaused(true); return FilterTrailersStatus::StopIteration; } @@ -228,7 +284,8 @@ FilterTrailersStatus Filter::onTrailers(ProcessorState& state, Http::HeaderMap& // We would like to process the body in a buffered way, but until now the complete // body has not arrived. With the arrival of trailers, we now know that the body // has arrived. - sendBufferedData(state, true); + sendBufferedData(state, ProcessorState::CallbackState::BufferedBodyCallback, true); + state.setPaused(true); return FilterTrailersStatus::StopIteration; } @@ -248,6 +305,7 @@ FilterTrailersStatus Filter::onTrailers(ProcessorState& state, Http::HeaderMap& } sendTrailers(state, trailers); + state.setPaused(true); return FilterTrailersStatus::StopIteration; } @@ -288,9 +346,10 @@ FilterTrailersStatus Filter::encodeTrailers(ResponseTrailerMap& trailers) { return status; } -void Filter::sendBodyChunk(ProcessorState& state, const Buffer::Instance& data, bool end_stream) { +void Filter::sendBodyChunk(ProcessorState& state, const Buffer::Instance& data, + ProcessorState::CallbackState new_state, bool end_stream) { ENVOY_LOG(debug, "Sending a body chunk of {} bytes", data.length()); - state.setCallbackState(ProcessorState::CallbackState::BufferedBodyCallback); + state.setCallbackState(new_state); state.startMessageTimer(std::bind(&Filter::onMessageTimeout, this), config_->messageTimeout()); ProcessingRequest req; auto* body_req = state.mutableBody(req); @@ -313,6 +372,7 @@ void Filter::sendTrailers(ProcessorState& state, const Http::HeaderMap& trailers void Filter::onReceiveMessage(std::unique_ptr&& r) { if (processing_complete_) { + ENVOY_LOG(debug, "Ignoring stream message received after processing complete"); // Ignore additional messages after we decided we were done with the stream return; } @@ -329,6 +389,7 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { encoding_state_.setProcessingMode(response->mode_override()); } + ENVOY_LOG(debug, "Received {} response", responseCaseToString(response->response_case())); switch (response->response_case()) { case ProcessingResponse::ResponseCase::kRequestHeaders: message_handled = decoding_state_.handleHeadersResponse(response->request_headers()); @@ -357,6 +418,8 @@ void Filter::onReceiveMessage(std::unique_ptr&& r) { break; default: // Any other message is considered spurious + ENVOY_LOG(debug, "Received unknown stream message {} -- ignoring and marking spurious", + response->response_case()); break; } @@ -463,6 +526,27 @@ void Filter::sendImmediateResponse(const ImmediateResponse& response) { mutate_headers, grpc_status, response.details()); } +std::string responseCaseToString(const ProcessingResponse::ResponseCase response_case) { + switch (response_case) { + case ProcessingResponse::ResponseCase::kRequestHeaders: + return "request headers"; + case ProcessingResponse::ResponseCase::kResponseHeaders: + return "response headers"; + case ProcessingResponse::ResponseCase::kRequestBody: + return "request body"; + case ProcessingResponse::ResponseCase::kResponseBody: + return "response body"; + case ProcessingResponse::ResponseCase::kRequestTrailers: + return "request trailers"; + case ProcessingResponse::ResponseCase::kResponseTrailers: + return "response trailers"; + case ProcessingResponse::ResponseCase::kImmediateResponse: + return "immediate response"; + default: + return "unknown"; + } +} + } // namespace ExternalProcessing } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_proc/ext_proc.h b/source/extensions/filters/http/ext_proc/ext_proc.h index a3754db99aba0..a01732874c74a 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.h +++ b/source/extensions/filters/http/ext_proc/ext_proc.h @@ -118,9 +118,12 @@ class Filter : public Logger::Loggable, void onMessageTimeout(); - void sendBufferedData(ProcessorState& state, bool end_stream) { - sendBodyChunk(state, *state.bufferedData(), end_stream); + void sendBufferedData(ProcessorState& state, ProcessorState::CallbackState new_state, + bool end_stream) { + sendBodyChunk(state, *state.bufferedData(), new_state, end_stream); } + void sendBodyChunk(ProcessorState& state, const Buffer::Instance& data, + ProcessorState::CallbackState new_state, bool end_stream); void sendTrailers(ProcessorState& state, const Http::HeaderMap& trailers); @@ -130,7 +133,6 @@ class Filter : public Logger::Loggable, void cleanUpTimers(); void clearAsyncState(); void sendImmediateResponse(const envoy::service::ext_proc::v3alpha::ImmediateResponse& response); - void sendBodyChunk(ProcessorState& state, const Buffer::Instance& data, bool end_stream); Http::FilterHeadersStatus onHeaders(ProcessorState& state, Http::RequestOrResponseHeaderMap& headers, bool end_stream); @@ -159,6 +161,9 @@ class Filter : public Logger::Loggable, bool sent_immediate_response_ = false; }; +extern std::string responseCaseToString( + const envoy::service::ext_proc::v3alpha::ProcessingResponse::ResponseCase response_case); + } // namespace ExternalProcessing } // namespace HttpFilters } // namespace Extensions diff --git a/source/extensions/filters/http/ext_proc/processor_state.cc b/source/extensions/filters/http/ext_proc/processor_state.cc index 5fd79fa82d2f1..9a20f72d817fa 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.cc +++ b/source/extensions/filters/http/ext_proc/processor_state.cc @@ -11,6 +11,7 @@ namespace HttpFilters { namespace ExternalProcessing { using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode; +using envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode; using envoy::service::ext_proc::v3alpha::BodyResponse; using envoy::service::ext_proc::v3alpha::CommonResponse; @@ -33,7 +34,6 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { filter_callbacks_->clearRouteCache(); } callback_state_ = CallbackState::Idle; - clearWatermark(); message_timer_->disableTimer(); if (common_response.status() == CommonResponse::CONTINUE_AND_REPLACE) { @@ -59,6 +59,7 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { // or response to the processor. Clear flags to make sure. body_mode_ = ProcessingMode::NONE; send_trailers_ = false; + clearWatermark(); } else { if (body_mode_ == ProcessingMode::BUFFERED) { @@ -67,19 +68,47 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { // was complete, and the server wants the body. So, don't continue filter // processing, but send the buffered request body now. ENVOY_LOG(debug, "Sending buffered request body message"); - filter_.sendBufferedData(*this, true); + filter_.sendBufferedData(*this, ProcessorState::CallbackState::BufferedBodyCallback, + true); } - // Otherwise, we're not ready to continue processing because then // we won't be able to modify the headers any more, so do nothing and // let the doData callback handle body chunks until the end is reached. + clearWatermark(); + return true; + } else if (body_mode_ == ProcessingMode::STREAMED) { + if (complete_body_available_) { + // All data came in before headers callback, so act just as if we were buffering + // since effectively this is the same thing. + ENVOY_LOG(debug, "Sending buffered body data for whole message"); + filter_.sendBufferedData(*this, ProcessorState::CallbackState::BufferedBodyCallback, + true); + clearWatermark(); + return true; + } + if (hasBufferedData()) { + // We now know that we need to process what we have buffered in streaming mode. + // Move the current buffer into the queue for remote processing and clear the + // buffered data. + Buffer::OwnedImpl buffered_chunk; + modifyBufferedData( + [&buffered_chunk](Buffer::Instance& data) { buffered_chunk.move(data); }); + ENVOY_LOG(debug, "Sending first chunk using buffered data"); + filter_.sendBodyChunk(*this, buffered_chunk, + ProcessorState::CallbackState::StreamedBodyCallback, false); + enqueueStreamingChunk(buffered_chunk, false, true); + } + if (queueBelowLowLimit()) { + clearWatermark(); + } + continueIfNecessary(); return true; } - if (send_trailers_ && trailers_available_) { // Trailers came in while we were waiting for this response, and the server // is not interested in the body, so send them now. filter_.sendTrailers(*this, *trailers_); + clearWatermark(); return true; } } @@ -87,24 +116,62 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { // If we got here, then the processor doesn't care about the body or is not ready for // trailers, so we can just continue. headers_ = nullptr; - continueProcessing(); + continueIfNecessary(); + clearWatermark(); return true; } return false; } bool ProcessorState::handleBodyResponse(const BodyResponse& response) { - if (callback_state_ == CallbackState::BufferedBodyCallback) { - ENVOY_LOG(debug, "Applying body response to buffered data"); - modifyBufferedData([this, &response](Buffer::Instance& data) { - MutationUtils::applyCommonBodyResponse(response, headers_, data); - }); + bool should_continue = false; + if (callback_state_ == CallbackState::BufferedBodyCallback || + callback_state_ == CallbackState::StreamedBodyCallback || + callback_state_ == CallbackState::StreamedBodyCallbackFinishing) { + ENVOY_LOG(debug, "Processing body response"); + if (callback_state_ == CallbackState::BufferedBodyCallback) { + ENVOY_LOG(debug, "Applying body response to buffered data. State = {}", callback_state_); + modifyBufferedData([this, &response](Buffer::Instance& data) { + MutationUtils::applyCommonBodyResponse(response, headers_, data); + }); + clearWatermark(); + callback_state_ = CallbackState::Idle; + should_continue = true; + + } else if (callback_state_ == CallbackState::StreamedBodyCallback || + callback_state_ == CallbackState::StreamedBodyCallbackFinishing) { + bool delivered_one = false; + while (auto queued_chunk = dequeueStreamingChunk(delivered_one)) { + // Loop through queue in case some of it is chunks that were never + // delivered because the processing mode changed. + auto chunk = std::move(*queued_chunk); + if (chunk->delivered) { + ENVOY_LOG(debug, "Applying body response to chunk of data. Size = {}", + chunk->data.length()); + MutationUtils::applyCommonBodyResponse(response, nullptr, chunk->data); + delivered_one = true; + // After we have delivered one chunk, don't process anything + // more from the queue unless it was never sent to the server. + } + should_continue = chunk->end_stream; + if (chunk->data.length() > 0) { + ENVOY_LOG(trace, "Injecting {} bytes of data to filter stream", chunk->data.length()); + injectDataToFilterChain(chunk->data, false); + } + } + if (queueBelowLowLimit()) { + clearWatermark(); + } + if (chunks_for_processing_.empty()) { + callback_state_ = CallbackState::Idle; + } + } + if (response.response().clear_route_cache()) { filter_callbacks_->clearRouteCache(); } - headers_ = nullptr; - callback_state_ = CallbackState::Idle; message_timer_->disableTimer(); + headers_ = nullptr; if (send_trailers_ && trailers_available_) { // Trailers came in while we were waiting for this response, and the server @@ -113,9 +180,12 @@ bool ProcessorState::handleBodyResponse(const BodyResponse& response) { return true; } - continueProcessing(); + if (should_continue) { + continueIfNecessary(); + } return true; } + return false; } @@ -128,18 +198,48 @@ bool ProcessorState::handleTrailersResponse(const TrailersResponse& response) { trailers_ = nullptr; callback_state_ = CallbackState::Idle; message_timer_->disableTimer(); - continueProcessing(); + continueIfNecessary(); return true; } return false; } +void ProcessorState::enqueueStreamingChunk(Buffer::Instance& data, bool end_stream, + bool delivered) { + bytes_enqueued_ += data.length(); + auto next_chunk = std::make_unique(); + next_chunk->data.move(data); + next_chunk->end_stream = end_stream; + next_chunk->delivered = delivered; + chunks_for_processing_.push_back(std::move(next_chunk)); + if (queueOverHighLimit()) { + requestWatermark(); + } +} + +absl::optional ProcessorState::dequeueStreamingChunk(bool undelivered_only) { + if (chunks_for_processing_.empty()) { + return absl::nullopt; + } + if (undelivered_only && chunks_for_processing_.front()->delivered) { + return absl::nullopt; + } + QueuedChunkPtr chunk = std::move(chunks_for_processing_.front()); + chunks_for_processing_.pop_front(); + bytes_enqueued_ -= chunk->data.length(); + return chunk; +} + void ProcessorState::clearAsyncState() { cleanUpTimer(); - if (callback_state_ != CallbackState::Idle) { - continueProcessing(); - callback_state_ = CallbackState::Idle; + while (auto queued_chunk = dequeueStreamingChunk(false)) { + auto chunk = std::move(*queued_chunk); + ENVOY_LOG(trace, "Injecting leftover buffer of {} bytes", chunk->data.length()); + injectDataToFilterChain(chunk->data, false); } + clearWatermark(); + continueIfNecessary(); + callback_state_ = CallbackState::Idle; } void ProcessorState::cleanUpTimer() const { @@ -148,12 +248,30 @@ void ProcessorState::cleanUpTimer() const { } } +void ProcessorState::setBodyMode(ProcessingMode_BodySendMode body_mode) { + body_mode_ = body_mode; + if (callback_state_ == CallbackState::StreamedBodyCallback && + body_mode != ProcessingMode::STREAMED) { + // Special handling for when the processing mode is changed while + // streaming. + callback_state_ = CallbackState::StreamedBodyCallbackFinishing; + } +} + +void ProcessorState::continueIfNecessary() { + if (paused_) { + ENVOY_LOG(debug, "Continuing processing"); + paused_ = false; + continueProcessing(); + } +} + void DecodingProcessorState::setProcessingModeInternal(const ProcessingMode& mode) { // Account for the different default behaviors of headers and trailers -- // headers are sent by default and trailers are not. send_headers_ = mode.request_header_mode() != ProcessingMode::SKIP; send_trailers_ = mode.request_trailer_mode() == ProcessingMode::SEND; - body_mode_ = mode.request_body_mode(); + setBodyMode(mode.request_body_mode()); } void DecodingProcessorState::requestWatermark() { @@ -177,7 +295,7 @@ void EncodingProcessorState::setProcessingModeInternal(const ProcessingMode& mod // headers are sent by default and trailers are not. send_headers_ = mode.response_header_mode() != ProcessingMode::SKIP; send_trailers_ = mode.response_trailer_mode() == ProcessingMode::SEND; - body_mode_ = mode.response_body_mode(); + setBodyMode(mode.response_body_mode()); } void EncodingProcessorState::requestWatermark() { diff --git a/source/extensions/filters/http/ext_proc/processor_state.h b/source/extensions/filters/http/ext_proc/processor_state.h index 5a22c0ba26f96..d20e2bba5b59d 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.h +++ b/source/extensions/filters/http/ext_proc/processor_state.h @@ -1,5 +1,8 @@ #pragma once +#include +#include + #include "envoy/buffer/buffer.h" #include "envoy/event/timer.h" #include "envoy/extensions/filters/http/ext_proc/v3alpha/processing_mode.pb.h" @@ -7,6 +10,7 @@ #include "envoy/http/header_map.h" #include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "source/common/buffer/buffer_impl.h" #include "source/common/common/logger.h" namespace Envoy { @@ -16,29 +20,50 @@ namespace ExternalProcessing { class Filter; +class QueuedChunk { +public: + // True if this represents the last chunk in the stream + bool end_stream = false; + // True if the chunk was actually sent to the gRPC stream + bool delivered = false; + Buffer::OwnedImpl data; +}; +using QueuedChunkPtr = std::unique_ptr; + class ProcessorState : public Logger::Loggable { public: - // This describes whether the filter is waiting for a response to a gRPC message + // This describes whether the filter is waiting for a response to a gRPC message. + // We use it to determine how to respond to stream messages send back from + // the external processor. enum class CallbackState { // Not waiting for anything Idle, - // Waiting for a "headers" response + // Waiting for a "headers" response. This may be true even if we started + // to receive data frames for a message. HeadersCallback, - // Waiting for a "body" response in buffered mode + // Waiting for a "body" response in buffered mode. BufferedBodyCallback, - // and waiting for a "trailers" response + // Waiting for a "body" response in streaming mode. + StreamedBodyCallback, + // Waiting for a "body" response in streaming mode in the special case + // in which the processing mode was changed while there were outstanding + // messages sent to the processor. + StreamedBodyCallbackFinishing, + // Waiting for a "trailers" response. TrailersCallback, }; explicit ProcessorState(Filter& filter) - : filter_(filter), watermark_requested_(false), complete_body_available_(false), - trailers_available_(false), body_replaced_(false) {} + : filter_(filter), watermark_requested_(false), paused_(false), + complete_body_available_(false), trailers_available_(false), body_replaced_(false), + bytes_enqueued_(0) {} ProcessorState(const ProcessorState&) = delete; virtual ~ProcessorState() = default; ProcessorState& operator=(const ProcessorState&) = delete; CallbackState callbackState() const { return callback_state_; } void setCallbackState(CallbackState state) { callback_state_ = state; } + void setPaused(bool paused) { paused_ = paused; } bool completeBodyAvailable() const { return complete_body_available_; } void setCompleteBodyAvailable(bool d) { complete_body_available_ = d; } @@ -69,12 +94,21 @@ class ProcessorState : public Logger::Loggable { bool handleTrailersResponse(const envoy::service::ext_proc::v3alpha::TrailersResponse& response); virtual const Buffer::Instance* bufferedData() const PURE; + bool hasBufferedData() const { return bufferedData() != nullptr && bufferedData()->length() > 0; } virtual void addBufferedData(Buffer::Instance& data) const PURE; virtual void modifyBufferedData(std::function cb) const PURE; + virtual void injectDataToFilterChain(Buffer::Instance& data, bool end_stream) PURE; + virtual uint32_t bufferLimit() const PURE; + + void enqueueStreamingChunk(Buffer::Instance& data, bool end_stream, bool delivered); + absl::optional dequeueStreamingChunk(bool undelivered_only); + bool queueOverHighLimit() const { return bytes_enqueued_ > bufferLimit(); } + bool queueBelowLowLimit() const { return bytes_enqueued_ < bufferLimit() / 2; } virtual Http::HeaderMap* addTrailers() PURE; virtual void continueProcessing() const PURE; + void continueIfNecessary(); void clearAsyncState(); virtual envoy::service::ext_proc::v3alpha::HttpHeaders* @@ -85,12 +119,18 @@ class ProcessorState : public Logger::Loggable { mutableTrailers(envoy::service::ext_proc::v3alpha::ProcessingRequest& request) const PURE; protected: + void setBodyMode( + envoy::extensions::filters::http::ext_proc::v3alpha::ProcessingMode_BodySendMode body_mode); + Filter& filter_; Http::StreamFilterCallbacks* filter_callbacks_; CallbackState callback_state_ = CallbackState::Idle; // Keep track of whether we requested a watermark. bool watermark_requested_ : 1; + // Keep track of whether we paused processing and may require + // a "continue." + bool paused_ : 1; // If true, then the filter received the complete body bool complete_body_available_ : 1; @@ -110,6 +150,10 @@ class ProcessorState : public Logger::Loggable { Http::RequestOrResponseHeaderMap* headers_ = nullptr; Http::HeaderMap* trailers_ = nullptr; Event::TimerPtr message_timer_; + // A queue of chunks that were sent in streaming mode + std::deque chunks_for_processing_; + // The total size of chunks in the queue + uint32_t bytes_enqueued_; }; class DecodingProcessorState : public ProcessorState { @@ -140,6 +184,12 @@ class DecodingProcessorState : public ProcessorState { decoder_callbacks_->modifyDecodingBuffer(cb); } + void injectDataToFilterChain(Buffer::Instance& data, bool end_stream) override { + decoder_callbacks_->injectDecodedDataToFilterChain(data, end_stream); + } + + uint32_t bufferLimit() const override { return decoder_callbacks_->decoderBufferLimit(); } + Http::HeaderMap* addTrailers() override { trailers_ = &decoder_callbacks_->addDecodedTrailers(); return trailers_; @@ -205,6 +255,12 @@ class EncodingProcessorState : public ProcessorState { encoder_callbacks_->modifyEncodingBuffer(cb); } + void injectDataToFilterChain(Buffer::Instance& data, bool end_stream) override { + encoder_callbacks_->injectEncodedDataToFilterChain(data, end_stream); + } + + uint32_t bufferLimit() const override { return encoder_callbacks_->encoderBufferLimit(); } + Http::HeaderMap* addTrailers() override { trailers_ = &encoder_callbacks_->addEncodedTrailers(); return trailers_; diff --git a/test/extensions/filters/http/ext_proc/BUILD b/test/extensions/filters/http/ext_proc/BUILD index 09af7b09d7977..779252a19a27c 100644 --- a/test/extensions/filters/http/ext_proc/BUILD +++ b/test/extensions/filters/http/ext_proc/BUILD @@ -106,11 +106,13 @@ envoy_extension_cc_test( extension_names = ["envoy.filters.http.ext_proc"], deps = [ ":test_processor_lib", + ":utils_lib", "//source/common/network:address_lib", "//source/extensions/filters/http/ext_proc:config", "//test/common/http:common_lib", "//test/integration:http_integration_lib", "//test/test_common:utility_lib", + "@com_google_absl//absl/strings:str_format", "@envoy_api//envoy/extensions/filters/http/ext_proc/v3alpha:pkg_cc_proto", "@envoy_api//envoy/service/ext_proc/v3alpha:pkg_cc_proto", ], diff --git a/test/extensions/filters/http/ext_proc/filter_test.cc b/test/extensions/filters/http/ext_proc/filter_test.cc index cde78e9e16be0..54a3c1820288c 100644 --- a/test/extensions/filters/http/ext_proc/filter_test.cc +++ b/test/extensions/filters/http/ext_proc/filter_test.cc @@ -45,6 +45,8 @@ using testing::Unused; using namespace std::chrono_literals; +static const uint32_t BufferSize = 100000; + // These tests are all unit tests that directly drive an instance of the // ext_proc filter and verify the behavior using mocks. @@ -63,7 +65,9 @@ class HttpFilterTest : public testing::Test { config_.reset(new FilterConfig(proto_config, 200ms, stats_store_, "")); filter_ = std::make_unique(config_, std::move(client_)); filter_->setEncoderFilterCallbacks(encoder_callbacks_); + EXPECT_CALL(encoder_callbacks_, encoderBufferLimit()).WillRepeatedly(Return(BufferSize)); filter_->setDecoderFilterCallbacks(decoder_callbacks_); + EXPECT_CALL(decoder_callbacks_, decoderBufferLimit()).WillRepeatedly(Return(BufferSize)); HttpTestUtility::addDefaultHeaders(request_headers_); request_headers_.setMethod("POST"); } @@ -79,11 +83,7 @@ class HttpFilterTest : public testing::Test { return stream; } - void doSend(ProcessingRequest&& request, Unused) { - ASSERT_TRUE(last_request_processed_); - last_request_ = std::move(request); - last_request_processed_ = false; - } + void doSend(ProcessingRequest&& request, Unused) { last_request_ = std::move(request); } bool doSendClose() { return !server_closed_stream_; } @@ -107,12 +107,37 @@ class HttpFilterTest : public testing::Test { Invoke([&buf](std::function callback) { callback(buf); })); } + void setUpDecodingWatermarking(bool& watermarked) { + EXPECT_CALL(decoder_callbacks_, onDecoderFilterAboveWriteBufferHighWatermark()) + .WillRepeatedly(Invoke([&watermarked]() { + EXPECT_FALSE(watermarked); + watermarked = true; + })); + EXPECT_CALL(decoder_callbacks_, onDecoderFilterBelowWriteBufferLowWatermark()) + .WillRepeatedly(Invoke([&watermarked]() { + EXPECT_TRUE(watermarked); + watermarked = false; + })); + } + + void setUpEncodingWatermarking(bool& watermarked) { + EXPECT_CALL(encoder_callbacks_, onEncoderFilterAboveWriteBufferHighWatermark()) + .WillRepeatedly(Invoke([&watermarked]() { + EXPECT_FALSE(watermarked); + watermarked = true; + })); + EXPECT_CALL(encoder_callbacks_, onEncoderFilterBelowWriteBufferLowWatermark()) + .WillRepeatedly(Invoke([&watermarked]() { + EXPECT_TRUE(watermarked); + watermarked = false; + })); + } + // Expect a request_headers request, and send back a valid response. void processRequestHeaders( bool buffering_data, absl::optional> cb) { - ASSERT_FALSE(last_request_processed_); EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_request_headers()); const auto& headers = last_request_.request_headers(); @@ -121,7 +146,6 @@ class HttpFilterTest : public testing::Test { if (cb) { (*cb)(headers, *response, *headers_response); } - last_request_processed_ = true; if (!buffering_data) { EXPECT_CALL(decoder_callbacks_, continueDecoding()); } @@ -133,7 +157,6 @@ class HttpFilterTest : public testing::Test { bool buffering_data, absl::optional> cb) { - ASSERT_FALSE(last_request_processed_); EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_response_headers()); const auto& headers = last_request_.response_headers(); @@ -142,7 +165,6 @@ class HttpFilterTest : public testing::Test { if (cb) { (*cb)(headers, *response, *headers_response); } - last_request_processed_ = true; if (!buffering_data) { EXPECT_CALL(encoder_callbacks_, continueEncoding()); } @@ -151,8 +173,8 @@ class HttpFilterTest : public testing::Test { // Expect a request_body request, and send back a valid response void processRequestBody( - absl::optional> cb) { - ASSERT_FALSE(last_request_processed_); + absl::optional> cb, + bool should_continue = true) { EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_request_body()); const auto& body = last_request_.request_body(); @@ -161,15 +183,16 @@ class HttpFilterTest : public testing::Test { if (cb) { (*cb)(body, *response, *body_response); } - last_request_processed_ = true; - EXPECT_CALL(decoder_callbacks_, continueDecoding()); + if (should_continue) { + EXPECT_CALL(decoder_callbacks_, continueDecoding()); + } stream_callbacks_->onReceiveMessage(std::move(response)); } // Expect a request_body request, and send back a valid response void processResponseBody( - absl::optional> cb) { - ASSERT_FALSE(last_request_processed_); + absl::optional> cb, + bool should_continue = true) { EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_response_body()); const auto& body = last_request_.response_body(); @@ -178,15 +201,15 @@ class HttpFilterTest : public testing::Test { if (cb) { (*cb)(body, *response, *body_response); } - last_request_processed_ = true; - EXPECT_CALL(encoder_callbacks_, continueEncoding()); + if (should_continue) { + EXPECT_CALL(encoder_callbacks_, continueEncoding()); + } stream_callbacks_->onReceiveMessage(std::move(response)); } std::unique_ptr client_; ExternalProcessorCallbacks* stream_callbacks_ = nullptr; ProcessingRequest last_request_; - bool last_request_processed_ = true; bool server_closed_stream_ = false; NiceMock stats_store_; FilterConfigSharedPtr config_; @@ -429,7 +452,6 @@ TEST_F(HttpFilterTest, PostAndRespondImmediatelyOnResponse) { EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_response_headers()); - last_request_processed_ = true; Http::TestResponseHeaderMapImpl immediate_response_headers; EXPECT_CALL(encoder_callbacks_, sendLocalReply(Http::Code::BadRequest, "Bad request", _, @@ -817,7 +839,7 @@ TEST_F(HttpFilterTest, PostAndChangeBothBodiesBufferedMultiChunk) { } // Using a configuration with streaming set for the request and -// response bodies, we should ignore a "buffered" body mode for now +// response bodies, we should ignore a "buffered partial" body mode for now // because it is not implemented. TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { initialize(R"EOF( @@ -827,8 +849,8 @@ TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { processing_mode: request_header_mode: "SEND" response_header_mode: "SEND" - request_body_mode: "STREAMED" - response_body_mode: "STREAMED" + request_body_mode: "BUFFERED_PARTIAL" + response_body_mode: "BUFFERED_PARTIAL" request_trailer_mode: "SKIP" response_trailer_mode: "SKIP" )EOF"); @@ -866,6 +888,367 @@ TEST_F(HttpFilterTest, PostAndIgnoreStreamedBodiesUntilImplemented) { EXPECT_EQ(1, config_->stats().streams_closed_.value()); } +// Using a configuration with streaming set for the request and +// response bodies, ensure that the chunks are delivered to the processor and +// that the processor gets them correctly. +TEST_F(HttpFilterTest, PostStreamingBodies) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "STREAMED" + response_body_mode: "STREAMED" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); + request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + request_headers_.addCopy(LowerCaseString("content-length"), 100); + + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + processRequestHeaders(false, absl::nullopt); + + bool decoding_watermarked = false; + setUpDecodingWatermarking(decoding_watermarked); + + Buffer::OwnedImpl want_request_body; + Buffer::OwnedImpl got_request_body; + EXPECT_CALL(decoder_callbacks_, injectDecodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_request_body](Buffer::Instance& data, Unused) { got_request_body.move(data); })); + + Buffer::OwnedImpl req_chunk_1; + TestUtility::feedBufferWithRandomCharacters(req_chunk_1, 100); + want_request_body.add(req_chunk_1.toString()); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_chunk_1, true)); + got_request_body.move(req_chunk_1); + processRequestBody(absl::nullopt); + EXPECT_EQ(want_request_body.toString(), got_request_body.toString()); + EXPECT_FALSE(decoding_watermarked); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + response_headers_.addCopy(LowerCaseString("content-length"), "100"); + + bool encoding_watermarked = false; + setUpEncodingWatermarking(encoding_watermarked); + EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + processResponseHeaders(false, absl::nullopt); + + Buffer::OwnedImpl want_response_body; + Buffer::OwnedImpl got_response_body; + EXPECT_CALL(encoder_callbacks_, injectEncodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_response_body](Buffer::Instance& data, Unused) { got_response_body.move(data); })); + + for (int i = 0; i < 5; i++) { + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_chunk, false)); + got_response_body.move(resp_chunk); + processResponseBody(absl::nullopt, false); + } + + Buffer::OwnedImpl last_resp_chunk; + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(last_resp_chunk, true)); + processResponseBody(absl::nullopt, true); + + // At this point, since we injected the data from each chunk after the "encodeData" + // callback, and since we also injected any chunks inserted using "injectEncodedData," + // the two buffers should match! + EXPECT_EQ(want_response_body.toString(), got_response_body.toString()); + EXPECT_FALSE(encoding_watermarked); + + filter_->onDestroy(); + + EXPECT_EQ(1, config_->stats().streams_started_.value()); + EXPECT_EQ(9, config_->stats().stream_msgs_sent_.value()); + EXPECT_EQ(9, config_->stats().stream_msgs_received_.value()); + EXPECT_EQ(1, config_->stats().streams_closed_.value()); +} + +// Using a configuration with streaming set for the request and +// response bodies, ensure that the chunks are delivered to the processor and +// that the processor gets them correctly when some data comes in before the +// headers are done processing. +TEST_F(HttpFilterTest, PostStreamingBodiesDifferentOrder) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "STREAMED" + response_body_mode: "STREAMED" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + HttpTestUtility::addDefaultHeaders(request_headers_, "POST"); + request_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + request_headers_.addCopy(LowerCaseString("content-length"), 100); + + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, false)); + processRequestHeaders(false, absl::nullopt); + + bool decoding_watermarked = false; + setUpDecodingWatermarking(decoding_watermarked); + + Buffer::OwnedImpl want_request_body; + Buffer::OwnedImpl got_request_body; + EXPECT_CALL(decoder_callbacks_, injectDecodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_request_body](Buffer::Instance& data, Unused) { got_request_body.move(data); })); + + Buffer::OwnedImpl req_chunk_1; + TestUtility::feedBufferWithRandomCharacters(req_chunk_1, 100); + want_request_body.add(req_chunk_1.toString()); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_chunk_1, true)); + got_request_body.move(req_chunk_1); + processRequestBody(absl::nullopt); + EXPECT_EQ(want_request_body.toString(), got_request_body.toString()); + EXPECT_FALSE(decoding_watermarked); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + response_headers_.addCopy(LowerCaseString("content-length"), "100"); + + bool encoding_watermarked = false; + setUpEncodingWatermarking(encoding_watermarked); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + + Buffer::OwnedImpl want_response_body; + Buffer::OwnedImpl got_response_body; + EXPECT_CALL(encoder_callbacks_, injectEncodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_response_body](Buffer::Instance& data, Unused) { got_response_body.move(data); })); + Buffer::OwnedImpl response_buffer; + setUpEncodingBuffering(response_buffer); + + for (int i = 0; i < 3; i++) { + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::StopIterationAndWatermark, filter_->encodeData(resp_chunk, false)); + response_buffer.move(resp_chunk); + } + + processResponseHeaders(false, absl::nullopt); + EXPECT_EQ(0, response_buffer.length()); + EXPECT_FALSE(encoding_watermarked); + got_response_body.move(response_buffer); + + for (int i = 0; i < 5; i++) { + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_chunk, false)); + got_response_body.move(resp_chunk); + } + + Buffer::OwnedImpl last_resp_chunk; + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(last_resp_chunk, true)); + + // Act as if the callbacks were delayed and send back all the responses now. + for (int i = 0; i < 7; i++) { + auto response = std::make_unique(); + response->mutable_response_body(); + if (i == 6) { + EXPECT_CALL(encoder_callbacks_, continueEncoding()); + } + stream_callbacks_->onReceiveMessage(std::move(response)); + } + + EXPECT_EQ(want_response_body.toString(), got_response_body.toString()); + EXPECT_FALSE(encoding_watermarked); + + filter_->onDestroy(); + + EXPECT_EQ(1, config_->stats().streams_started_.value()); + EXPECT_EQ(10, config_->stats().stream_msgs_sent_.value()); + EXPECT_EQ(10, config_->stats().stream_msgs_received_.value()); + EXPECT_EQ(1, config_->stats().streams_closed_.value()); +} + +// Using a configuration with streaming set for the response body, +// change the processing mode after receiving some chunks and verify the +// correct behavior. +TEST_F(HttpFilterTest, GetStreamingBodyAndChangeMode) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "NONE" + response_body_mode: "STREAMED" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + HttpTestUtility::addDefaultHeaders(request_headers_); + + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); + processRequestHeaders(false, absl::nullopt); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + + bool encoding_watermarked = false; + setUpEncodingWatermarking(encoding_watermarked); + EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + processResponseHeaders(false, absl::nullopt); + + Buffer::OwnedImpl want_response_body; + Buffer::OwnedImpl got_response_body; + EXPECT_CALL(encoder_callbacks_, injectEncodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_response_body](Buffer::Instance& data, Unused) { got_response_body.move(data); })); + + // Send three bodies + for (int i = 0; i < 3; i++) { + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_chunk, false)); + got_response_body.move(resp_chunk); + } + + // Respond to the first one by asking to change the processing mode + processResponseBody( + [](const HttpBody&, ProcessingResponse& response, BodyResponse&) { + response.mutable_mode_override()->set_response_body_mode(ProcessingMode::NONE); + }, + false); + + // A new body chunk should not be sent to the server, but should be queued + // because we didn't get all the responses yet + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_chunk, false)); + got_response_body.move(resp_chunk); + + // There should be two more messages outstanding, but not three, so respond + // just to them. + for (int i = 0; i < 2; i++) { + processResponseBody(absl::nullopt, false); + } + + // Close the stream + Buffer::OwnedImpl last_resp_chunk; + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(last_resp_chunk, true)); + processResponseBody(absl::nullopt, false); + + // At this point, the whole body should have been processed including things + // that were rejected. + EXPECT_EQ(want_response_body.toString(), got_response_body.toString()); + EXPECT_FALSE(encoding_watermarked); + + filter_->onDestroy(); + + EXPECT_EQ(1, config_->stats().streams_started_.value()); + EXPECT_EQ(5, config_->stats().stream_msgs_sent_.value()); + EXPECT_EQ(5, config_->stats().stream_msgs_received_.value()); + EXPECT_EQ(1, config_->stats().streams_closed_.value()); +} + +// Using a configuration with streaming set for the response body, +// change the processing mode after receiving some chunks and verify the +// correct behavior. +TEST_F(HttpFilterTest, GetStreamingBodyAndChangeModeDifferentOrder) { + initialize(R"EOF( + grpc_service: + envoy_grpc: + cluster_name: "ext_proc_server" + processing_mode: + request_header_mode: "SEND" + response_header_mode: "SEND" + request_body_mode: "NONE" + response_body_mode: "STREAMED" + request_trailer_mode: "SKIP" + response_trailer_mode: "SKIP" + )EOF"); + + // Create synthetic HTTP request + HttpTestUtility::addDefaultHeaders(request_headers_); + + EXPECT_CALL(decoder_callbacks_, decodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->decodeHeaders(request_headers_, true)); + processRequestHeaders(false, absl::nullopt); + + response_headers_.addCopy(LowerCaseString(":status"), "200"); + response_headers_.addCopy(LowerCaseString("content-type"), "text/plain"); + + bool encoding_watermarked = false; + setUpEncodingWatermarking(encoding_watermarked); + EXPECT_CALL(encoder_callbacks_, encodingBuffer()).WillRepeatedly(Return(nullptr)); + EXPECT_EQ(FilterHeadersStatus::StopIteration, filter_->encodeHeaders(response_headers_, false)); + processResponseHeaders(false, absl::nullopt); + + Buffer::OwnedImpl want_response_body; + Buffer::OwnedImpl got_response_body; + EXPECT_CALL(encoder_callbacks_, injectEncodedDataToFilterChain(_, false)) + .WillRepeatedly(Invoke( + [&got_response_body](Buffer::Instance& data, Unused) { got_response_body.move(data); })); + + // Send three bodies + for (int i = 0; i < 3; i++) { + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_chunk, false)); + got_response_body.move(resp_chunk); + } + + // Respond to the first one by asking to change the processing mode + processResponseBody( + [](const HttpBody&, ProcessingResponse& response, BodyResponse&) { + response.mutable_mode_override()->set_response_body_mode(ProcessingMode::NONE); + }, + false); + + // A new body chunk should not be sent to the server, but should be queued + // because we didn't get all the responses yet + Buffer::OwnedImpl resp_chunk; + TestUtility::feedBufferWithRandomCharacters(resp_chunk, 100); + want_response_body.add(resp_chunk.toString()); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->encodeData(resp_chunk, true)); + got_response_body.move(resp_chunk); + + // There should be two more messages outstanding, but not three, so respond + // just to them. + processResponseBody(absl::nullopt, false); + processResponseBody(absl::nullopt, true); + + // At this point, the whole body should have been processed including things + // that were rejected. + EXPECT_EQ(want_response_body.toString(), got_response_body.toString()); + EXPECT_FALSE(encoding_watermarked); + + filter_->onDestroy(); + + EXPECT_EQ(1, config_->stats().streams_started_.value()); + EXPECT_EQ(5, config_->stats().stream_msgs_sent_.value()); + EXPECT_EQ(5, config_->stats().stream_msgs_received_.value()); + EXPECT_EQ(1, config_->stats().streams_closed_.value()); +} + // Using the default configuration, test the filter with a processor that // replies to the request_headers message with an empty immediate_response message TEST_F(HttpFilterTest, RespondImmediatelyDefault) { @@ -999,7 +1382,6 @@ TEST_F(HttpFilterTest, PostAndFailOnResponse) { EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_request_headers()); - last_request_processed_ = true; EXPECT_CALL(decoder_callbacks_, continueDecoding()); std::unique_ptr resp1 = std::make_unique(); @@ -1137,7 +1519,6 @@ TEST_F(HttpFilterTest, ProcessingModeRequestHeadersOnly) { EXPECT_FALSE(last_request_.async_mode()); ASSERT_TRUE(last_request_.has_request_headers()); - last_request_processed_ = true; EXPECT_CALL(decoder_callbacks_, continueDecoding()); std::unique_ptr resp1 = std::make_unique(); diff --git a/test/extensions/filters/http/ext_proc/ordering_test.cc b/test/extensions/filters/http/ext_proc/ordering_test.cc index 7d5437049051b..d76da3dd8eec1 100644 --- a/test/extensions/filters/http/ext_proc/ordering_test.cc +++ b/test/extensions/filters/http/ext_proc/ordering_test.cc @@ -887,6 +887,21 @@ TEST_F(FastFailOrderingTest, GrpcErrorOnStartRequestBody) { EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_body, true)); } +// gRPC failure while opening stream with only request body enabled in streaming mode +TEST_F(FastFailOrderingTest, GrpcErrorOnStartRequestBodyStreaming) { + initialize([](ExternalProcessor& cfg) { + auto* pm = cfg.mutable_processing_mode(); + pm->set_request_header_mode(ProcessingMode::SKIP); + pm->set_request_body_mode(ProcessingMode::STREAMED); + }); + sendRequestHeadersPost(false); + Buffer::OwnedImpl req_body("Hello!"); + Buffer::OwnedImpl buffered_body; + expectBufferedRequest(buffered_body, false); + EXPECT_CALL(encoder_callbacks_, sendLocalReply(Http::Code::InternalServerError, _, _, _, _)); + EXPECT_EQ(FilterDataStatus::StopIterationNoBuffer, filter_->decodeData(req_body, true)); +} + // gRPC failure while opening stream with only request body enabled and errors ignored TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBody) { initialize([](ExternalProcessor& cfg) { @@ -907,6 +922,27 @@ TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBody) { EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_body, true)); } +// gRPC failure while opening stream with only request body enabled in streamed mode and errors +// ignored +TEST_F(FastFailOrderingTest, GrpcErrorIgnoredOnStartRequestBodyStreamed) { + initialize([](ExternalProcessor& cfg) { + cfg.set_failure_mode_allow(true); + auto* pm = cfg.mutable_processing_mode(); + pm->set_request_header_mode(ProcessingMode::SKIP); + pm->set_request_body_mode(ProcessingMode::STREAMED); + }); + sendRequestHeadersPost(false); + Buffer::OwnedImpl req_body("Hello!"); + Buffer::OwnedImpl buffered_body; + expectBufferedRequest(buffered_body, false); + EXPECT_EQ(FilterDataStatus::Continue, filter_->decodeData(req_body, true)); + sendResponseHeaders(false); + Buffer::OwnedImpl resp_body("Hello!"); + Buffer::OwnedImpl resp_buf; + expectBufferedRequest(resp_buf, false); + EXPECT_EQ(FilterDataStatus::Continue, filter_->encodeData(resp_body, true)); +} + // gRPC failure while opening stream with only response headers enabled TEST_F(FastFailOrderingTest, GrpcErrorOnStartResponseHeaders) { initialize([](ExternalProcessor& cfg) { diff --git a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc index 59e03fe215773..9451afb90565a 100644 --- a/test/extensions/filters/http/ext_proc/streaming_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/streaming_integration_test.cc @@ -1,13 +1,16 @@ #include "envoy/extensions/filters/http/ext_proc/v3alpha/ext_proc.pb.h" #include "envoy/service/ext_proc/v3alpha/external_processor.pb.h" +#include "source/common/common/hash.h" #include "source/common/network/address_impl.h" #include "test/common/http/common.h" #include "test/extensions/filters/http/ext_proc/test_processor.h" +#include "test/extensions/filters/http/ext_proc/utils.h" #include "test/integration/http_integration.h" #include "test/test_common/utility.h" +#include "absl/strings/str_format.h" #include "gtest/gtest.h" namespace Envoy { @@ -101,9 +104,29 @@ class StreamingIntegrationTest : public HttpIntegrationTest, client_response_ = codec_client_->makeHeaderOnlyRequest(headers); } + // Send an HTTP POST containing a randomly-generated body consisting of + // "num_chunks" of "chunk_size" bytes each. Return a copy of the complete + // body that may be used for comparison later. + Buffer::OwnedImpl sendPostRequest(uint32_t num_chunks, uint32_t chunk_size, + absl::optional> cb) { + auto& encoder = sendClientRequestHeaders(cb); + Buffer::OwnedImpl post_body; + for (uint32_t i = 0; i < num_chunks; i++) { + Buffer::OwnedImpl chunk; + TestUtility::feedBufferWithRandomCharacters(chunk, chunk_size); + post_body.add(chunk.toString()); + codec_client_->sendData(encoder, chunk, false); + } + Buffer::OwnedImpl empty_chunk; + codec_client_->sendData(encoder, empty_chunk, true); + return post_body; + } + TestProcessor test_processor_; envoy::extensions::filters::http::ext_proc::v3alpha::ExternalProcessor proto_config_{}; IntegrationStreamDecoderPtr client_response_; + std::atomic processor_request_hash_; + std::atomic processor_response_hash_; }; // Ensure that the test suite is run with all combinations the Envoy and Google gRPC clients. @@ -119,18 +142,18 @@ TEST_P(StreamingIntegrationTest, PostAndProcessHeadersOnly) { // This starts the gRPC server in the background. It'll be shut down when we stop the tests. test_processor_.start( [](grpc::ServerReaderWriter* stream) { + // This is the same gRPC stream processing code that a "user" of ext_proc + // would write. In this case, we expect to receive a request_headers + // message, and then close the stream. ProcessingRequest header_req; - if (!stream->Read(&header_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!header_req.has_request_headers()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected request headers"); - } + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); ProcessingResponse header_resp; header_resp.mutable_request_headers(); stream->Write(header_resp); - return grpc::Status::OK; + // Returning here closes the stream, unless we had an ASSERT failure + // previously. }); initializeConfig(); @@ -162,12 +185,8 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBody) { test_processor_.start( [total_size](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; - if (!stream->Read(&header_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!header_req.has_request_headers()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected request headers"); - } + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); ProcessingResponse header_resp; header_resp.mutable_request_headers(); @@ -176,36 +195,171 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBody) { stream->Write(header_resp); ProcessingRequest body_req; - if (!stream->Read(&body_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!body_req.has_request_body()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected request body"); - } - if (body_req.request_body().body().size() != total_size) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "incorrect body size"); - } + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_request_body()); + EXPECT_EQ(body_req.request_body().body().size(), total_size); ProcessingResponse body_resp; header_resp.mutable_request_body(); stream->Write(body_resp); + }); - return grpc::Status::OK; + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); +} + +// Send a body that's larger than the buffer limit in streamed mode, and ensure +// that the processor gets the right number of bytes. +TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBody) { + const uint32_t num_chunks = 152; + const uint32_t chunk_size = 1000; + uint32_t total_size = num_chunks * chunk_size; + + test_processor_.start( + [total_size](grpc::ServerReaderWriter* stream) { + // Expect a request_headers message as the first message on the stream, + // and send back an empty response. + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + stream->Write(header_resp); + + // Now, expect a bunch of request_body messages and respond to each. + // Count up the number of bytes we receive and make sure that we get + // them all. + uint32_t received_size = 0; + ProcessingRequest body_req; + do { + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_request_body()); + received_size += body_req.request_body().body().size(); + ProcessingResponse body_resp; + body_resp.mutable_request_body(); + stream->Write(body_resp); + } while (!body_req.request_body().end_of_stream()); + + EXPECT_EQ(received_size, total_size); }); + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); initializeConfig(); HttpIntegrationTest::initialize(); - auto& encoder = sendClientRequestHeaders([total_size](Http::HeaderMap& headers) { + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + // This header tells the "autonomous upstream" that will respond to our + // request to throw an error if it doesn't get the right number of bytes. headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); }); - for (uint32_t i = 0; i < num_chunks; i++) { - Buffer::OwnedImpl chunk; - TestUtility::feedBufferWithRandomCharacters(chunk, chunk_size); - codec_client_->sendData(encoder, chunk, false); - } - Buffer::OwnedImpl empty_chunk; - codec_client_->sendData(encoder, empty_chunk, true); + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); +} + +// Send a body that's larger than the buffer limit in streamed mode, and change +// the processing mode after receiving some of the body. We might continue to +// receive streamed messages after this point, but the whole message should +// make it upstream regardless. +TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBodyPartially) { + const uint32_t num_chunks = 19; + const uint32_t chunk_size = 10000; + uint32_t total_size = num_chunks * chunk_size; + + test_processor_.start( + [](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + stream->Write(header_resp); + + uint32_t received_count = 0; + ProcessingRequest req; + + // Expect body chunks, and also change the processing mode partway so + // that we can see what happens when we do that. + while (stream->Read(&req)) { + ProcessingResponse resp; + if (req.has_request_body()) { + received_count++; + if (received_count == 2) { + // After two body chunks, change the processing mode. Since the body + // is pipelined, we might still get body chunks, however. This test can't + // validate this, but at least we can ensure that this doesn't blow up the + // protocol. + auto* mode_override = resp.mutable_mode_override(); + mode_override->set_request_body_mode(ProcessingMode::NONE); + } + resp.mutable_request_body(); + } else if (req.has_response_headers()) { + // Should not see response headers until we changed the processing mode. + EXPECT_GE(received_count, 2); + resp.mutable_response_headers(); + } else { + FAIL() << "unexpected stream message"; + } + stream->Write(resp); + } + }); + + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); +} + +// Send a body that's larger than the buffer limit in streamed mode, and close +// the stream before we've received all the chunks. The whole message should +// be received by the upstream regardless. +TEST_P(StreamingIntegrationTest, PostAndProcessStreamedRequestBodyAndClose) { + const uint32_t num_chunks = 25; + const uint32_t chunk_size = 10000; + uint32_t total_size = num_chunks * chunk_size; + + test_processor_.start( + [total_size](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + stream->Write(header_resp); + + ProcessingRequest req; + uint32_t received_size = 0; + while (stream->Read(&req)) { + received_size += req.request_body().body().size(); + if (received_size > (total_size / 2)) { + // Return before we get the end of stream + return; + } + ProcessingResponse resp; + resp.mutable_request_body(); + stream->Write(resp); + } + }); + + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); + }); ASSERT_TRUE(client_response_->waitForEndStream()); EXPECT_TRUE(client_response_->complete()); @@ -220,12 +374,8 @@ TEST_P(StreamingIntegrationTest, GetAndProcessBufferedResponseBody) { test_processor_.start( [response_size](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; - if (!stream->Read(&header_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!header_req.has_request_headers()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected request headers"); - } + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); ProcessingResponse header_resp; header_resp.mutable_request_headers(); @@ -235,23 +385,67 @@ TEST_P(StreamingIntegrationTest, GetAndProcessBufferedResponseBody) { stream->Write(header_resp); ProcessingRequest body_req; - if (!stream->Read(&body_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!body_req.has_response_body()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected response body"); - } - if (body_req.response_body().body().size() != response_size) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "incorrect body size"); - } + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_response_body()); + EXPECT_EQ(body_req.response_body().body().size(), response_size); + }); + + initializeConfig(); + HttpIntegrationTest::initialize(); + Http::TestRequestHeaderMapImpl headers; + HttpTestUtility::addDefaultHeaders(headers); + headers.addCopy(LowerCaseString("response_size_bytes"), response_size); + sendGetRequest(headers); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); + EXPECT_EQ(client_response_->body().size(), response_size); +} + +// Do an HTTP GET that will return a body larger than the buffer limit, which we process +// in the processor using streaming. +TEST_P(StreamingIntegrationTest, GetAndProcessStreamedResponseBody) { + uint32_t response_size = 170000; - return grpc::Status::OK; + test_processor_.start( + [this, + response_size](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + auto* override = header_resp.mutable_mode_override(); + override->set_response_header_mode(ProcessingMode::SKIP); + override->set_response_body_mode(ProcessingMode::STREAMED); + stream->Write(header_resp); + + ProcessingRequest body_req; + uint32_t total_response_size = 0; + Buffer::OwnedImpl allData; + + do { + ASSERT_TRUE(stream->Read(&body_req)); + ASSERT_TRUE(body_req.has_response_body()); + total_response_size += body_req.response_body().body().size(); + // Save all the chunks in a buffer so that we can calculate a hash. + allData.add(body_req.response_body().body()); + ProcessingResponse body_resp; + body_resp.mutable_response_body(); + stream->Write(body_resp); + } while (!body_req.response_body().end_of_stream()); + processor_response_hash_ = HashUtil::xxHash64(allData.toString()); + ASSERT_EQ(total_response_size, response_size); }); initializeConfig(); HttpIntegrationTest::initialize(); Http::TestRequestHeaderMapImpl headers; HttpTestUtility::addDefaultHeaders(headers); + // This magic header tells the "autonomous upstream" to send back a response + // of the specified size. headers.addCopy(LowerCaseString("response_size_bytes"), response_size); sendGetRequest(headers); @@ -259,6 +453,172 @@ TEST_P(StreamingIntegrationTest, GetAndProcessBufferedResponseBody) { EXPECT_TRUE(client_response_->complete()); EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); EXPECT_EQ(client_response_->body().size(), response_size); + EXPECT_EQ(processor_response_hash_, HashUtil::xxHash64(client_response_->body())); +} + +// Send a large HTTP POST, and expect back an equally large reply. Stream both and verify +// that we got back what we expected. The processor itself must be written carefully +// because once the request headers are delivered, the request and response body +// chunks and the response headers can come in any order. +TEST_P(StreamingIntegrationTest, PostAndProcessStreamBothBodies) { + const uint32_t send_chunks = 10; + const uint32_t chunk_size = 11000; + uint32_t request_size = send_chunks * chunk_size; + uint32_t response_size = 1700000; + + test_processor_.start( + [this, request_size, + response_size](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + stream->Write(header_resp); + + bool saw_response_headers = false; + bool saw_request_eof = false; + bool saw_response_eof = false; + ProcessingRequest message; + uint32_t total_request_size = 0; + uint32_t total_response_size = 0; + Buffer::OwnedImpl allResponseData; + Buffer::OwnedImpl allRequestData; + + do { + ProcessingResponse response; + ASSERT_TRUE(stream->Read(&message)); + if (message.has_response_headers()) { + // Expect only one response headers message, with a good status. + EXPECT_FALSE(saw_response_headers); + EXPECT_THAT(message.response_headers().headers(), + SingleProtoHeaderValueIs(":status", "200")); + saw_response_headers = true; + response.mutable_response_headers(); + } else if (message.has_request_body()) { + // Expect a number of request body messages. Make sure that we + // don't get a duplicate EOF, count the size, and store the chunks + // so that we can calculate a hash. + total_request_size += message.request_body().body().size(); + allRequestData.add(message.request_body().body()); + if (message.request_body().end_of_stream()) { + EXPECT_FALSE(saw_request_eof); + saw_request_eof = true; + EXPECT_EQ(total_request_size, request_size); + processor_request_hash_ = HashUtil::xxHash64(allRequestData.toString()); + } + response.mutable_request_body(); + } else if (message.has_response_body()) { + // Count ans hash the response body like we did for the request body. + total_response_size += message.response_body().body().size(); + allResponseData.add(message.response_body().body()); + if (message.response_body().end_of_stream()) { + EXPECT_FALSE(saw_response_eof); + saw_response_eof = true; + EXPECT_EQ(total_response_size, response_size); + processor_response_hash_ = HashUtil::xxHash64(allResponseData.toString()); + } + response.mutable_response_body(); + } else { + FAIL() << "unexpected stream message"; + } + stream->Write(response); + } while (!(saw_response_headers && saw_request_eof && saw_response_eof)); + }); + + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + proto_config_.mutable_processing_mode()->set_response_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto request_body = sendPostRequest( + send_chunks, chunk_size, [request_size, response_size](Http::HeaderMap& headers) { + // Tell the upstream to fail if it doesn't get the right amount of data. + headers.addCopy(LowerCaseString("expect_request_size_bytes"), request_size); + // Also tell the upstream how much data to send back. + headers.addCopy(LowerCaseString("response_size_bytes"), response_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); + EXPECT_EQ(client_response_->body().size(), response_size); + EXPECT_EQ(processor_request_hash_, HashUtil::xxHash64(request_body.toString())); + EXPECT_EQ(processor_response_hash_, HashUtil::xxHash64(client_response_->body())); +} + +// Send a large HTTP POST, and expect back an equally large reply. Stream both and replace both +// the request and response bodies with different bodies. +TEST_P(StreamingIntegrationTest, PostAndStreamAndTransformBothBodies) { + const uint32_t send_chunks = 12; + const uint32_t chunk_size = 10000; + uint32_t response_size = 180000; + + test_processor_.start( + [](grpc::ServerReaderWriter* stream) { + ProcessingRequest header_req; + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); + + ProcessingResponse header_resp; + header_resp.mutable_request_headers(); + stream->Write(header_resp); + + bool saw_response_headers = false; + bool saw_request_eof = false; + bool saw_response_eof = false; + bool first_request_chunk = true; + ProcessingRequest message; + + do { + ProcessingResponse response; + ASSERT_TRUE(stream->Read(&message)); + if (message.has_response_headers()) { + EXPECT_FALSE(saw_response_headers); + EXPECT_THAT(message.response_headers().headers(), + SingleProtoHeaderValueIs(":status", "200")); + saw_response_headers = true; + response.mutable_response_headers(); + } else if (message.has_request_body()) { + // Replace the first chunk with a new message, and zero out the rest + auto* new_body = response.mutable_request_body()->mutable_response(); + if (first_request_chunk) { + new_body->mutable_body_mutation()->set_body("Hello"); + first_request_chunk = false; + } else { + new_body->mutable_body_mutation()->set_clear_body(true); + } + if (message.request_body().end_of_stream()) { + saw_request_eof = true; + } + } else if (message.has_response_body()) { + // Replace the last chunk with a new message and zero out the rest + auto* new_body = response.mutable_response_body()->mutable_response(); + if (message.response_body().end_of_stream()) { + new_body->mutable_body_mutation()->set_body("World"); + first_request_chunk = false; + saw_response_eof = true; + } else { + new_body->mutable_body_mutation()->set_clear_body(true); + } + } else { + FAIL() << "unexpected stream message"; + } + stream->Write(response); + } while (!saw_response_headers || !saw_request_eof || !saw_response_eof); + }); + + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + proto_config_.mutable_processing_mode()->set_response_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + sendPostRequest(send_chunks, chunk_size, [response_size](Http::HeaderMap& headers) { + headers.addCopy(LowerCaseString("expect_request_size_bytes"), 5); + headers.addCopy(LowerCaseString("response_size_bytes"), response_size); + }); + + ASSERT_TRUE(client_response_->waitForEndStream()); + EXPECT_TRUE(client_response_->complete()); + EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("200")); } // Send a body that's larger than the buffer limit and have the processor @@ -273,12 +633,8 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBodyTooBig) { test_processor_.start( [](grpc::ServerReaderWriter* stream) { ProcessingRequest header_req; - if (!stream->Read(&header_req)) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected message"); - } - if (!header_req.has_request_headers()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected request headers"); - } + ASSERT_TRUE(stream->Read(&header_req)); + ASSERT_TRUE(header_req.has_request_headers()); ProcessingResponse response; response.mutable_request_headers(); @@ -288,27 +644,16 @@ TEST_P(StreamingIntegrationTest, PostAndProcessBufferedRequestBodyTooBig) { ProcessingRequest header_resp; if (stream->Read(&header_resp)) { - if (!header_resp.has_response_headers()) { - return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "expected response headers"); - } + ASSERT_TRUE(header_resp.has_response_headers()); } - return grpc::Status::OK; }); initializeConfig(); HttpIntegrationTest::initialize(); - auto& encoder = sendClientRequestHeaders([total_size](Http::HeaderMap& headers) { + sendPostRequest(num_chunks, chunk_size, [total_size](Http::HeaderMap& headers) { headers.addCopy(LowerCaseString("expect_request_size_bytes"), total_size); }); - for (uint32_t i = 0; i < num_chunks; i++) { - Buffer::OwnedImpl chunk; - TestUtility::feedBufferWithRandomCharacters(chunk, chunk_size); - codec_client_->sendData(encoder, chunk, false); - } - Buffer::OwnedImpl empty_chunk; - codec_client_->sendData(encoder, empty_chunk, true); - ASSERT_TRUE(client_response_->waitForEndStream()); EXPECT_TRUE(client_response_->complete()); EXPECT_THAT(client_response_->headers(), Http::HttpStatusIs("413")); diff --git a/test/extensions/filters/http/ext_proc/test_processor.cc b/test/extensions/filters/http/ext_proc/test_processor.cc index 7498f76ea24ec..735bfd707c1bf 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.cc +++ b/test/extensions/filters/http/ext_proc/test_processor.cc @@ -9,6 +9,20 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { +grpc::Status ProcessorWrapper::Process( + grpc::ServerContext*, + grpc::ServerReaderWriter* stream) { + callback_(stream); + if (testing::Test::HasFatalFailure()) { + // This is not strictly necessary, but it may help in troubleshooting to + // ensure that we return a bad gRPC status if an "ASSERT" failed in the + // processor. + return grpc::Status(grpc::StatusCode::INVALID_ARGUMENT, "Fatal test error"); + } + return grpc::Status::OK; +} + void TestProcessor::start(ProcessingFunc cb) { wrapper_ = std::make_unique(cb); grpc::ServerBuilder builder; diff --git a/test/extensions/filters/http/ext_proc/test_processor.h b/test/extensions/filters/http/ext_proc/test_processor.h index e6a027ca01f3e..b3b8ad2ed8bbf 100644 --- a/test/extensions/filters/http/ext_proc/test_processor.h +++ b/test/extensions/filters/http/ext_proc/test_processor.h @@ -14,10 +14,14 @@ namespace Extensions { namespace HttpFilters { namespace ExternalProcessing { -using ProcessingFunc = std::function*)>; +// An implementation of the ExternalProcessor service that may be included +// in integration tests. class ProcessorWrapper : public envoy::service::ext_proc::v3alpha::ExternalProcessor::Service { public: ProcessorWrapper(ProcessingFunc& cb) : callback_(cb) {} @@ -26,24 +30,28 @@ class ProcessorWrapper : public envoy::service::ext_proc::v3alpha::ExternalProce Process(grpc::ServerContext*, grpc::ServerReaderWriter* stream) - override { - return callback_(stream); - } + override; private: ProcessingFunc callback_; }; +// This class starts a gRPC server supporting the ExternalProcessor service. +// It delegates each gRPC stream to a method that can process the stream and +// use ASSERT_ and EXPECT_ macros to validate test results. class TestProcessor { public: // Start the processor listening on an ephemeral port (port 0) on 127.0.0.1. - // All new streams will be delegated to the specified function. + // All new streams will be delegated to the specified function. The function + // will be invoked in a background thread controlled by the gRPC server. void start(ProcessingFunc cb); - // Stop the processor from listening once all streams are closed. + // Stop the processor from listening once all streams are closed, and exit + // the listening threads. void shutdown(); - // Return the port that the processor is listening on. + // Return the port that the processor is listening on from the call to + // "start". int port() const { return listening_port_; } private: diff --git a/test/extensions/filters/http/ext_proc/utils.h b/test/extensions/filters/http/ext_proc/utils.h index e8b931dacc7a9..98274370d9451 100644 --- a/test/extensions/filters/http/ext_proc/utils.h +++ b/test/extensions/filters/http/ext_proc/utils.h @@ -35,6 +35,16 @@ MATCHER_P2(SingleHeaderValueIs, key, value, return hdr[0]->value() == value; } +MATCHER_P2(SingleProtoHeaderValueIs, key, value, + absl::StrFormat("Header \"%s\" equals \"%s\"", key, value)) { + for (const auto& hdr : arg.headers()) { + if (key == hdr.key()) { + return value == hdr.value(); + } + } + return false; +} + } // namespace ExternalProcessing } // namespace HttpFilters } // namespace Extensions From 284698959bb4f0d1e2c36a387a2e725b38f19a09 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Wed, 21 Jul 2021 19:22:51 -0700 Subject: [PATCH 27/57] bazel: update deprecated flag name (#17446) This fixes: ``` WARNING: Option 'experimental_strict_action_env' is deprecated: Use --incompatible_strict_action_env instead ``` Signed-off-by: Keith Smiley --- .bazelrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.bazelrc b/.bazelrc index 7d5c4ce1e86bf..2a9ede51a1aaa 100644 --- a/.bazelrc +++ b/.bazelrc @@ -14,7 +14,7 @@ run --color=yes build --color=yes build --workspace_status_command="bash bazel/get_workspace_status" -build --experimental_strict_action_env=true +build --incompatible_strict_action_env build --host_force_python=PY3 build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 From 2b716d1c6602ce0f0e38bcbe65378a65faaf6de1 Mon Sep 17 00:00:00 2001 From: RenjieTang Date: Thu, 22 Jul 2021 04:50:21 -0700 Subject: [PATCH 28/57] quic: add QUIC connection close stats to upstream connections (#17222) * add QUIC stats to upstream connections Signed-off-by: Renjie Tang * format Signed-off-by: Renjie Tang * minor ci fixes Signed-off-by: Renjie Tang * format Signed-off-by: Renjie Tang * fix compiler error Signed-off-by: Renjie Tang * address comments. Signed-off-by: Renjie Tang * clean up unused variables. Signed-off-by: Renjie Tang * remove unused includes. Signed-off-by: Renjie Tang * fix ci compile time option Signed-off-by: Renjie Tang * fix an unrelated Windows ci failure. Signed-off-by: Renjie Tang * add docs and fix a compile option issue. Signed-off-by: Renjie Tang * fix include Signed-off-by: Renjie Tang * fix windows CI failure in config_impl_test Signed-off-by: Renjie Tang * fix unsuccessful merge to upstream. Signed-off-by: Renjie Tang --- .../cluster_manager/cluster_stats.rst | 12 ++++++ source/common/http/BUILD | 1 + source/common/http/conn_pool_grid.cc | 8 ++-- source/common/http/conn_pool_grid.h | 7 ++- source/common/http/http3/conn_pool.cc | 11 +++-- source/common/http/http3/conn_pool.h | 3 +- source/common/quic/BUILD | 1 + .../quic/client_connection_factory_impl.cc | 5 ++- .../quic/client_connection_factory_impl.h | 3 +- .../common/quic/envoy_quic_client_session.cc | 7 ++- .../common/quic/envoy_quic_client_session.h | 6 ++- source/common/upstream/BUILD | 1 + .../common/upstream/cluster_manager_impl.cc | 6 ++- source/common/upstream/cluster_manager_impl.h | 24 +++++------ source/server/BUILD | 3 +- source/server/config_validation/BUILD | 1 + .../config_validation/cluster_manager.h | 10 ++--- source/server/config_validation/server.cc | 9 ++-- source/server/config_validation/server.h | 2 + source/server/listener_manager_impl.cc | 6 +-- source/server/listener_manager_impl.h | 5 ++- source/server/server.cc | 10 +++-- source/server/server.h | 4 ++ test/common/http/conn_manager_impl_test.cc | 2 +- test/common/http/conn_pool_grid_test.cc | 15 ++++--- test/common/http/http3/conn_pool_test.cc | 7 ++- .../client_connection_factory_impl_test.cc | 6 ++- .../quic/envoy_quic_client_session_test.cc | 43 +++++++++++-------- test/common/quic/test_utils.h | 7 ++- test/common/router/config_impl_test.cc | 4 +- test/config_test/config_test.cc | 6 +-- test/integration/BUILD | 1 + test/integration/http_integration.cc | 4 +- test/integration/http_integration.h | 2 +- .../integration/quic_http_integration_test.cc | 2 +- .../sds_dynamic_integration_test.cc | 3 +- test/integration/utility.cc | 7 ++- test/mocks/server/BUILD | 1 + test/mocks/server/instance.cc | 2 +- test/mocks/server/instance.h | 2 + test/server/api_listener_test.cc | 4 +- .../config_validation/cluster_manager_test.cc | 3 +- test/server/configuration_impl_test.cc | 3 +- test/server/listener_manager_impl_test.h | 5 ++- 44 files changed, 180 insertions(+), 94 deletions(-) diff --git a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst index 055240a77172b..17b5e2da387ce 100644 --- a/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst +++ b/docs/root/configuration/upstream/cluster_manager/cluster_stats.rst @@ -105,6 +105,18 @@ Every cluster has a statistics tree rooted at *cluster..* with the followi assignment_timeout_received, Counter, Total assignments received with endpoint lease information. assignment_stale, Counter, Number of times the received assignments went stale before new assignments arrived. +HTTP/3 protocol statistics +-------------------------- + +HTTP/3 protocol stats are global with the following statistics: + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + .quic_connection_close_error_code_, Counter, A collection of counters that are lazily initialized to record each QUIC connection close's error code. + + Health check statistics ----------------------- diff --git a/source/common/http/BUILD b/source/common/http/BUILD index db231bbd66b07..ebc5bdcf0cf67 100644 --- a/source/common/http/BUILD +++ b/source/common/http/BUILD @@ -190,6 +190,7 @@ envoy_cc_library( ":http3_status_tracker", ":mixed_conn_pool", "//source/common/http/http3:conn_pool_lib", + "//source/common/quic:quic_stat_names_lib", ], ) diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index 28ce8d1c89c30..071088f1b871e 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -193,11 +193,13 @@ ConnectivityGrid::ConnectivityGrid( const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, Upstream::ClusterConnectivityState& state, TimeSource& time_source, AlternateProtocolsCacheSharedPtr alternate_protocols, - std::chrono::milliseconds next_attempt_duration, ConnectivityOptions connectivity_options) + std::chrono::milliseconds next_attempt_duration, ConnectivityOptions connectivity_options, + Quic::QuicStatNames& quic_stat_names, Stats::Scope& scope) : dispatcher_(dispatcher), random_generator_(random_generator), host_(host), priority_(priority), options_(options), transport_socket_options_(transport_socket_options), state_(state), next_attempt_duration_(next_attempt_duration), time_source_(time_source), - http3_status_tracker_(dispatcher_), alternate_protocols_(alternate_protocols) { + http3_status_tracker_(dispatcher_), alternate_protocols_(alternate_protocols), + quic_stat_names_(quic_stat_names), scope_(scope) { // ProdClusterManagerFactory::allocateConnPool verifies the protocols are HTTP/1, HTTP/2 and // HTTP/3. // TODO(#15649) support v6/v4, WiFi/cellular. @@ -225,7 +227,7 @@ absl::optional ConnectivityGrid::createNextPool( if (pools_.empty()) { pools_.push_back(Http3::allocateConnPool(dispatcher_, random_generator_, host_, priority_, options_, transport_socket_options_, state_, - time_source_)); + time_source_, quic_stat_names_, scope_)); return pools_.begin(); } pools_.push_back(std::make_unique(dispatcher_, random_generator_, host_, diff --git a/source/common/http/conn_pool_grid.h b/source/common/http/conn_pool_grid.h index 5adf47dd4f7c3..e658b5ed1123c 100644 --- a/source/common/http/conn_pool_grid.h +++ b/source/common/http/conn_pool_grid.h @@ -3,6 +3,7 @@ #include "source/common/http/alternate_protocols_cache_impl.h" #include "source/common/http/conn_pool_base.h" #include "source/common/http/http3_status_tracker.h" +#include "source/common/quic/quic_stat_names.h" #include "absl/container/flat_hash_map.h" @@ -134,7 +135,8 @@ class ConnectivityGrid : public ConnectionPool::Instance, Upstream::ClusterConnectivityState& state, TimeSource& time_source, AlternateProtocolsCacheSharedPtr alternate_protocols, std::chrono::milliseconds next_attempt_duration, - ConnectivityOptions connectivity_options); + ConnectivityOptions connectivity_options, Quic::QuicStatNames& quic_stat_names, + Stats::Scope& scope); ~ConnectivityGrid() override; // Http::ConnPool::Instance @@ -210,6 +212,9 @@ class ConnectivityGrid : public ConnectionPool::Instance, // Wrapped callbacks are stashed in the wrapped_callbacks_ for ownership. std::list wrapped_callbacks_; + + Quic::QuicStatNames& quic_stat_names_; + Stats::Scope& scope_; }; } // namespace Http diff --git a/source/common/http/http3/conn_pool.cc b/source/common/http/http3/conn_pool.cc index ff7f39f21feeb..49ed2e1a7b3c4 100644 --- a/source/common/http/http3/conn_pool.cc +++ b/source/common/http/http3/conn_pool.cc @@ -59,10 +59,12 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, - Upstream::ClusterConnectivityState& state, TimeSource& time_source) { + Upstream::ClusterConnectivityState& state, TimeSource& time_source, + Quic::QuicStatNames& quic_stat_names, Stats::Scope& scope) { return std::make_unique( host, priority, dispatcher, options, transport_socket_options, random_generator, state, - [](HttpConnPoolImplBase* pool) -> ::Envoy::ConnectionPool::ActiveClientPtr { + [&quic_stat_names, + &scope](HttpConnPoolImplBase* pool) -> ::Envoy::ConnectionPool::ActiveClientPtr { // If there's no ssl context, the secrets are not loaded. Fast-fail by returning null. auto factory = &pool->host()->transportSocketFactory(); ASSERT(dynamic_cast(factory) != nullptr); @@ -77,8 +79,9 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ if (!source_address.get()) { source_address = Network::Utility::getLocalAddress(host_address->ip()->version()); } - data.connection_ = Quic::createQuicNetworkConnection( - h3_pool->quicInfo(), pool->dispatcher(), host_address, source_address); + data.connection_ = + Quic::createQuicNetworkConnection(h3_pool->quicInfo(), pool->dispatcher(), host_address, + source_address, quic_stat_names, scope); return std::make_unique(*pool, data); }, [](Upstream::Host::CreateConnectionData& data, HttpConnPoolImplBase* pool) { diff --git a/source/common/http/http3/conn_pool.h b/source/common/http/http3/conn_pool.h index 3eaf625609db6..0886d4ddaf7ae 100644 --- a/source/common/http/http3/conn_pool.h +++ b/source/common/http/http3/conn_pool.h @@ -66,7 +66,8 @@ allocateConnPool(Event::Dispatcher& dispatcher, Random::RandomGenerator& random_ Upstream::HostConstSharedPtr host, Upstream::ResourcePriority priority, const Network::ConnectionSocket::OptionsSharedPtr& options, const Network::TransportSocketOptionsConstSharedPtr& transport_socket_options, - Upstream::ClusterConnectivityState& state, TimeSource& time_source); + Upstream::ClusterConnectivityState& state, TimeSource& time_source, + Quic::QuicStatNames& quic_stat_names, Stats::Scope& scope); } // namespace Http3 } // namespace Http diff --git a/source/common/quic/BUILD b/source/common/quic/BUILD index 435a1ac486b95..24ad4b8453ea9 100644 --- a/source/common/quic/BUILD +++ b/source/common/quic/BUILD @@ -265,6 +265,7 @@ envoy_cc_library( ":envoy_quic_stream_lib", ":envoy_quic_utils_lib", ":quic_filter_manager_connection_lib", + ":quic_stat_names_lib", "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/http:codes_lib", diff --git a/source/common/quic/client_connection_factory_impl.cc b/source/common/quic/client_connection_factory_impl.cc index e8af0afeb2d59..2e796745a4ca4 100644 --- a/source/common/quic/client_connection_factory_impl.cc +++ b/source/common/quic/client_connection_factory_impl.cc @@ -55,7 +55,8 @@ PersistentQuicInfoImpl::PersistentQuicInfoImpl( std::unique_ptr createQuicNetworkConnection(Http::PersistentQuicInfo& info, Event::Dispatcher& dispatcher, Network::Address::InstanceConstSharedPtr server_addr, - Network::Address::InstanceConstSharedPtr local_addr) { + Network::Address::InstanceConstSharedPtr local_addr, + QuicStatNames& quic_stat_names, Stats::Scope& scope) { // This flag fix a QUICHE issue which may crash Envoy during connection close. SetQuicReloadableFlag(quic_single_ack_in_packet2, true); PersistentQuicInfoImpl* info_impl = reinterpret_cast(&info); @@ -74,7 +75,7 @@ createQuicNetworkConnection(Http::PersistentQuicInfo& info, Event::Dispatcher& d auto ret = std::make_unique( info_impl->quic_config_, info_impl->supported_versions_, std::move(connection), info_impl->server_id_, std::move(config), &info_impl->push_promise_index_, dispatcher, - info_impl->buffer_limit_, info_impl->crypto_stream_factory_); + info_impl->buffer_limit_, info_impl->crypto_stream_factory_, quic_stat_names, scope); return ret; } diff --git a/source/common/quic/client_connection_factory_impl.h b/source/common/quic/client_connection_factory_impl.h index 269d4bb180733..5267c67b8fd0f 100644 --- a/source/common/quic/client_connection_factory_impl.h +++ b/source/common/quic/client_connection_factory_impl.h @@ -54,7 +54,8 @@ struct PersistentQuicInfoImpl : public Http::PersistentQuicInfo { std::unique_ptr createQuicNetworkConnection(Http::PersistentQuicInfo& info, Event::Dispatcher& dispatcher, Network::Address::InstanceConstSharedPtr server_addr, - Network::Address::InstanceConstSharedPtr local_addr); + Network::Address::InstanceConstSharedPtr local_addr, + QuicStatNames& quic_stat_names, Stats::Scope& scope); } // namespace Quic } // namespace Envoy diff --git a/source/common/quic/envoy_quic_client_session.cc b/source/common/quic/envoy_quic_client_session.cc index 6525b085bca1d..e300b820f873e 100644 --- a/source/common/quic/envoy_quic_client_session.cc +++ b/source/common/quic/envoy_quic_client_session.cc @@ -10,13 +10,15 @@ EnvoyQuicClientSession::EnvoyQuicClientSession( std::unique_ptr connection, const quic::QuicServerId& server_id, std::shared_ptr crypto_config, quic::QuicClientPushPromiseIndex* push_promise_index, Event::Dispatcher& dispatcher, - uint32_t send_buffer_limit, EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory) + uint32_t send_buffer_limit, EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory, + QuicStatNames& quic_stat_names, Stats::Scope& scope) : QuicFilterManagerConnectionImpl(*connection, connection->connection_id(), dispatcher, send_buffer_limit), quic::QuicSpdyClientSession(config, supported_versions, connection.release(), server_id, crypto_config.get(), push_promise_index), host_name_(server_id.host()), crypto_config_(crypto_config), - crypto_stream_factory_(crypto_stream_factory) {} + crypto_stream_factory_(crypto_stream_factory), quic_stat_names_(quic_stat_names), + scope_(scope) {} EnvoyQuicClientSession::~EnvoyQuicClientSession() { ASSERT(!connection()->connected()); @@ -35,6 +37,7 @@ void EnvoyQuicClientSession::connect() { void EnvoyQuicClientSession::OnConnectionClosed(const quic::QuicConnectionCloseFrame& frame, quic::ConnectionCloseSource source) { quic::QuicSpdyClientSession::OnConnectionClosed(frame, source); + quic_stat_names_.chargeQuicConnectionCloseStats(scope_, frame.quic_error_code, source, true); onConnectionCloseEvent(frame, source, version()); } diff --git a/source/common/quic/envoy_quic_client_session.h b/source/common/quic/envoy_quic_client_session.h index 955dbe0ef40f4..307a191e36225 100644 --- a/source/common/quic/envoy_quic_client_session.h +++ b/source/common/quic/envoy_quic_client_session.h @@ -17,6 +17,7 @@ #include "source/common/quic/envoy_quic_client_connection.h" #include "source/common/quic/quic_filter_manager_connection_impl.h" #include "source/common/quic/envoy_quic_crypto_stream_factory.h" +#include "source/common/quic/quic_stat_names.h" namespace Envoy { namespace Quic { @@ -38,7 +39,8 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, std::shared_ptr crypto_config, quic::QuicClientPushPromiseIndex* push_promise_index, Event::Dispatcher& dispatcher, uint32_t send_buffer_limit, - EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory); + EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory, + QuicStatNames& quic_stat_names, Stats::Scope& scope); ~EnvoyQuicClientSession() override; @@ -102,6 +104,8 @@ class EnvoyQuicClientSession : public QuicFilterManagerConnectionImpl, const absl::string_view host_name_; std::shared_ptr crypto_config_; EnvoyQuicCryptoClientStreamFactoryInterface& crypto_stream_factory_; + QuicStatNames& quic_stat_names_; + Stats::Scope& scope_; }; } // namespace Quic diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 901d530d882df..13dcb3846af3b 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -87,6 +87,7 @@ envoy_cc_library( "//source/common/tcp:conn_pool_lib", "//source/common/upstream:priority_conn_pool_map_impl_lib", "//source/common/upstream:upstream_lib", + "//source/common/quic:quic_stat_names_lib", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 63d0cf5c798fd..24343d13af833 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1550,7 +1550,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( Envoy::Http::ConnectivityGrid::ConnectivityOptions coptions{protocols}; return std::make_unique( dispatcher, api_.randomGenerator(), host, priority, options, transport_socket_options, - state, source, alternate_protocols_cache, std::chrono::milliseconds(300), coptions); + state, source, alternate_protocols_cache, std::chrono::milliseconds(300), coptions, + quic_stat_names_, stats_); #else // Should be blocked by configuration checking at an earlier point. NOT_REACHED_GCOVR_EXCL_LINE; @@ -1571,7 +1572,8 @@ Http::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateConnPool( runtime_.snapshot().featureEnabled("upstream.use_http3", 100)) { #ifdef ENVOY_ENABLE_QUIC return Http::Http3::allocateConnPool(dispatcher, api_.randomGenerator(), host, priority, - options, transport_socket_options, state, source); + options, transport_socket_options, state, source, + quic_stat_names_, stats_); #else UNREFERENCED_PARAMETER(source); // Should be blocked by configuration checking at an earlier point. diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index c3d436eb3f9ac..d03ea3c439626 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -32,6 +32,7 @@ #include "source/common/http/alternate_protocols_cache_impl.h" #include "source/common/http/alternate_protocols_cache_manager_impl.h" #include "source/common/http/async_client_impl.h" +#include "source/common/quic/quic_stat_names.h" #include "source/common/upstream/load_stats_reporter.h" #include "source/common/upstream/priority_conn_pool_map.h" #include "source/common/upstream/upstream_impl.h" @@ -44,23 +45,21 @@ namespace Upstream { */ class ProdClusterManagerFactory : public ClusterManagerFactory { public: - ProdClusterManagerFactory(Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, - ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, - Ssl::ContextManager& ssl_context_manager, - Event::Dispatcher& main_thread_dispatcher, - const LocalInfo::LocalInfo& local_info, - Secret::SecretManager& secret_manager, - ProtobufMessage::ValidationContext& validation_context, Api::Api& api, - Http::Context& http_context, Grpc::Context& grpc_context, - Router::Context& router_context, - AccessLog::AccessLogManager& log_manager, - Singleton::Manager& singleton_manager, const Server::Options& options) + ProdClusterManagerFactory( + Server::Admin& admin, Runtime::Loader& runtime, Stats::Store& stats, + ThreadLocal::Instance& tls, Network::DnsResolverSharedPtr dns_resolver, + Ssl::ContextManager& ssl_context_manager, Event::Dispatcher& main_thread_dispatcher, + const LocalInfo::LocalInfo& local_info, Secret::SecretManager& secret_manager, + ProtobufMessage::ValidationContext& validation_context, Api::Api& api, + Http::Context& http_context, Grpc::Context& grpc_context, Router::Context& router_context, + AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, + const Server::Options& options, Quic::QuicStatNames& quic_stat_names) : main_thread_dispatcher_(main_thread_dispatcher), validation_context_(validation_context), api_(api), http_context_(http_context), grpc_context_(grpc_context), router_context_(router_context), admin_(admin), runtime_(runtime), stats_(stats), tls_(tls), dns_resolver_(dns_resolver), ssl_context_manager_(ssl_context_manager), local_info_(local_info), secret_manager_(secret_manager), log_manager_(log_manager), - singleton_manager_(singleton_manager), options_(options), + singleton_manager_(singleton_manager), options_(options), quic_stat_names_(quic_stat_names), alternate_protocols_cache_manager_factory_(singleton_manager, main_thread_dispatcher.timeSource(), tls_), alternate_protocols_cache_manager_(alternate_protocols_cache_manager_factory_.get()) {} @@ -108,6 +107,7 @@ class ProdClusterManagerFactory : public ClusterManagerFactory { AccessLog::AccessLogManager& log_manager_; Singleton::Manager& singleton_manager_; const Server::Options& options_; + Quic::QuicStatNames& quic_stat_names_; Http::AlternateProtocolsCacheManagerFactoryImpl alternate_protocols_cache_manager_factory_; Http::AlternateProtocolsCacheManagerSharedPtr alternate_protocols_cache_manager_; }; diff --git a/source/server/BUILD b/source/server/BUILD index 9c893e5444288..3e6f60ab88002 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -411,8 +411,8 @@ envoy_cc_library( "//source/common/network:utility_lib", "//source/common/protobuf:utility_lib", "//source/common/stream_info:stream_info_lib", - "//source/extensions/filters/network/http_connection_manager:config", "//source/common/quic:quic_stat_names_lib", + "//source/extensions/filters/network/http_connection_manager:config", "//source/extensions/upstreams/http/generic:config", "@envoy_api//envoy/admin/v3:pkg_cc_proto", "@envoy_api//envoy/config/core/v3:pkg_cc_proto", @@ -520,6 +520,7 @@ envoy_cc_library( "//source/common/memory:heap_shrinker_lib", "//source/common/memory:stats_lib", "//source/common/protobuf:utility_lib", + "//source/common/quic:quic_stat_names_lib", "//source/common/router:rds_lib", "//source/common/runtime:runtime_lib", "//source/common/secret:secret_manager_impl_lib", diff --git a/source/server/config_validation/BUILD b/source/server/config_validation/BUILD index f1c14f54e4f0d..f33cd523ce856 100644 --- a/source/server/config_validation/BUILD +++ b/source/server/config_validation/BUILD @@ -90,6 +90,7 @@ envoy_cc_library( "//source/common/grpc:common_lib", "//source/common/local_info:local_info_lib", "//source/common/protobuf:utility_lib", + "//source/common/quic:quic_stat_names_lib", "//source/common/router:context_lib", "//source/common/router:rds_lib", "//source/common/runtime:runtime_lib", diff --git a/source/server/config_validation/cluster_manager.h b/source/server/config_validation/cluster_manager.h index 60c801a5260da..0cfc3fa31dff6 100644 --- a/source/server/config_validation/cluster_manager.h +++ b/source/server/config_validation/cluster_manager.h @@ -28,11 +28,11 @@ class ValidationClusterManagerFactory : public ProdClusterManagerFactory { ProtobufMessage::ValidationContext& validation_context, Api::Api& api, Http::Context& http_context, Grpc::Context& grpc_context, Router::Context& router_context, AccessLog::AccessLogManager& log_manager, Singleton::Manager& singleton_manager, - const Server::Options& options) - : ProdClusterManagerFactory(admin, runtime, stats, tls, dns_resolver, ssl_context_manager, - main_thread_dispatcher, local_info, secret_manager, - validation_context, api, http_context, grpc_context, - router_context, log_manager, singleton_manager, options), + const Server::Options& options, Quic::QuicStatNames& quic_stat_names) + : ProdClusterManagerFactory( + admin, runtime, stats, tls, dns_resolver, ssl_context_manager, main_thread_dispatcher, + local_info, secret_manager, validation_context, api, http_context, grpc_context, + router_context, log_manager, singleton_manager, options, quic_stat_names), grpc_context_(grpc_context), router_context_(router_context) {} ClusterManagerPtr diff --git a/source/server/config_validation/server.cc b/source/server/config_validation/server.cc index 98014fbd614f6..3d37c7ac56c49 100644 --- a/source/server/config_validation/server.cc +++ b/source/server/config_validation/server.cc @@ -53,7 +53,8 @@ ValidationInstance::ValidationInstance( store), mutex_tracer_(nullptr), grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), router_context_(stats_store_.symbolTable()), - time_system_(time_system), server_contexts_(*this) { + time_system_(time_system), server_contexts_(*this), + quic_stat_names_(stats_store_.symbolTable()) { TRY_ASSERT_MAIN_THREAD { initialize(options, local_address, component_factory); } END_TRY catch (const EnvoyException& e) { @@ -94,7 +95,8 @@ void ValidationInstance::initialize(const Options& options, Configuration::InitialImpl initial_config(bootstrap, options); initial_config.initAdminAccessLog(bootstrap, *this); admin_ = std::make_unique(initial_config.admin().address()); - listener_manager_ = std::make_unique(*this, *this, *this, false); + listener_manager_ = + std::make_unique(*this, *this, *this, false, quic_stat_names_); thread_local_.registerThread(*dispatcher_, true); runtime_singleton_ = std::make_unique( component_factory.createRuntime(*this, initial_config)); @@ -103,7 +105,8 @@ void ValidationInstance::initialize(const Options& options, cluster_manager_factory_ = std::make_unique( admin(), runtime(), stats(), threadLocal(), dnsResolver(), sslContextManager(), dispatcher(), localInfo(), *secret_manager_, messageValidationContext(), *api_, http_context_, - grpc_context_, router_context_, accessLogManager(), singletonManager(), options); + grpc_context_, router_context_, accessLogManager(), singletonManager(), options, + quic_stat_names_); config_.initialize(bootstrap, *this, *cluster_manager_factory_); runtime().initialize(clusterManager()); clusterManager().setInitializedCb([this]() -> void { init_manager_.initialize(init_watcher_); }); diff --git a/source/server/config_validation/server.h b/source/server/config_validation/server.h index a569f76a1fdf1..356769f2b5962 100644 --- a/source/server/config_validation/server.h +++ b/source/server/config_validation/server.h @@ -16,6 +16,7 @@ #include "source/common/common/random_generator.h" #include "source/common/grpc/common.h" #include "source/common/protobuf/message_validator_impl.h" +#include "source/common/quic/quic_stat_names.h" #include "source/common/router/context_impl.h" #include "source/common/router/rds_impl.h" #include "source/common/runtime/runtime_impl.h" @@ -213,6 +214,7 @@ class ValidationInstance final : Logger::Loggable, Router::ContextImpl router_context_; Event::TimeSystem& time_system_; ServerFactoryContextImpl server_contexts_; + Quic::QuicStatNames quic_stat_names_; }; } // namespace Server diff --git a/source/server/listener_manager_impl.cc b/source/server/listener_manager_impl.cc index 98814eea12918..5fd3db577d6b8 100644 --- a/source/server/listener_manager_impl.cc +++ b/source/server/listener_manager_impl.cc @@ -243,7 +243,8 @@ DrainingFilterChainsManager::DrainingFilterChainsManager(ListenerImplPtr&& drain ListenerManagerImpl::ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, WorkerFactory& worker_factory, - bool enable_dispatcher_stats) + bool enable_dispatcher_stats, + Quic::QuicStatNames& quic_stat_names) : server_(server), factory_(listener_factory), scope_(server.stats().createScope("listener_manager.")), stats_(generateStats(*scope_)), config_tracker_entry_(server.admin().getConfigTracker().add( @@ -251,8 +252,7 @@ ListenerManagerImpl::ListenerManagerImpl(Instance& server, [this](const Matchers::StringMatcher& name_matcher) { return dumpListenerConfigs(name_matcher); })), - enable_dispatcher_stats_(enable_dispatcher_stats), - quic_stat_names_(server_.stats().symbolTable()) { + enable_dispatcher_stats_(enable_dispatcher_stats), quic_stat_names_(quic_stat_names) { for (uint32_t i = 0; i < server.options().concurrency(); i++) { workers_.emplace_back( worker_factory.createWorker(i, server.overloadManager(), absl::StrCat("worker_", i))); diff --git a/source/server/listener_manager_impl.h b/source/server/listener_manager_impl.h index a98e8fbfe56c4..ad85054671954 100644 --- a/source/server/listener_manager_impl.h +++ b/source/server/listener_manager_impl.h @@ -177,7 +177,8 @@ class DrainingFilterChainsManager { class ListenerManagerImpl : public ListenerManager, Logger::Loggable { public: ListenerManagerImpl(Instance& server, ListenerComponentFactory& listener_factory, - WorkerFactory& worker_factory, bool enable_dispatcher_stats); + WorkerFactory& worker_factory, bool enable_dispatcher_stats, + Quic::QuicStatNames& quic_stat_names); void onListenerWarmed(ListenerImpl& listener); void inPlaceFilterChainUpdate(ListenerImpl& listener); @@ -323,7 +324,7 @@ class ListenerManagerImpl : public ListenerManager, Logger::Loggable> error_state_tracker_; FailureStates overall_error_state_; - Quic::QuicStatNames quic_stat_names_; + Quic::QuicStatNames& quic_stat_names_; }; class ListenerFilterChainFactoryBuilder : public FilterChainFactoryBuilder { diff --git a/source/server/server.cc b/source/server/server.cc index b82655a58b360..d8bc98da42ef2 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -88,7 +88,8 @@ InstanceImpl::InstanceImpl( : nullptr), grpc_context_(store.symbolTable()), http_context_(store.symbolTable()), router_context_(store.symbolTable()), process_context_(std::move(process_context)), - hooks_(hooks), server_contexts_(*this), stats_flush_in_progress_(false) { + hooks_(hooks), quic_stat_names_(store.symbolTable()), server_contexts_(*this), + stats_flush_in_progress_(false) { TRY_ASSERT_MAIN_THREAD { if (!options.logPath().empty()) { TRY_ASSERT_MAIN_THREAD { @@ -517,8 +518,9 @@ void InstanceImpl::initialize(const Options& options, } // Workers get created first so they register for thread local updates. - listener_manager_ = std::make_unique( - *this, listener_component_factory_, worker_factory_, bootstrap_.enable_dispatcher_stats()); + listener_manager_ = + std::make_unique(*this, listener_component_factory_, worker_factory_, + bootstrap_.enable_dispatcher_stats(), quic_stat_names_); // The main thread is also registered for thread local updates so that code that does not care // whether it runs on the main thread or on workers can still use TLS. @@ -592,7 +594,7 @@ void InstanceImpl::initialize(const Options& options, *admin_, Runtime::LoaderSingleton::get(), stats_store_, thread_local_, dns_resolver_, *ssl_context_manager_, *dispatcher_, *local_info_, *secret_manager_, messageValidationContext(), *api_, http_context_, grpc_context_, router_context_, - access_log_manager_, *singleton_manager_, options_); + access_log_manager_, *singleton_manager_, options_, quic_stat_names_); // Now the configuration gets parsed. The configuration may start setting // thread local data per above. See MainImpl::initialize() for why ConfigImpl diff --git a/source/server/server.h b/source/server/server.h index 945567740bb7e..36a017416317c 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -33,6 +33,7 @@ #include "source/common/init/manager_impl.h" #include "source/common/memory/heap_shrinker.h" #include "source/common/protobuf/message_validator_impl.h" +#include "source/common/quic/quic_stat_names.h" #include "source/common/router/context_impl.h" #include "source/common/runtime/runtime_impl.h" #include "source/common/secret/secret_manager_impl.h" @@ -288,6 +289,8 @@ class InstanceImpl final : Logger::Loggable, } bool enableReusePortDefault() override; + Quic::QuicStatNames& quicStatNames() { return quic_stat_names_; } + // ServerLifecycleNotifier ServerLifecycleNotifier::HandlePtr registerCallback(Stage stage, StageCallback callback) override; ServerLifecycleNotifier::HandlePtr @@ -382,6 +385,7 @@ class InstanceImpl final : Logger::Loggable, // whenever we have support for histogram merge across hot restarts. Stats::TimespanPtr initialization_timer_; ListenerHooks& hooks_; + Quic::QuicStatNames quic_stat_names_; ServerFactoryContextImpl server_contexts_; absl::optional enable_reuse_port_default_; diff --git a/test/common/http/conn_manager_impl_test.cc b/test/common/http/conn_manager_impl_test.cc index 69894daf187aa..c1fe56f0fbede 100644 --- a/test/common/http/conn_manager_impl_test.cc +++ b/test/common/http/conn_manager_impl_test.cc @@ -303,7 +303,7 @@ TEST_F(HttpConnectionManagerImplTest, PopulateStreamInfo) { EXPECT_EQ(requestIDExtension().get(), decoder_->streamInfo().getRequestIDProvider()); EXPECT_EQ(ssl_connection_, decoder_->streamInfo().downstreamSslConnection()); EXPECT_EQ(filter_callbacks_.connection_.id_, - decoder_->streamInfo().downstreamAddressProvider().connectionID()); + decoder_->streamInfo().downstreamAddressProvider().connectionID().value()); EXPECT_EQ(server_name_, decoder_->streamInfo().downstreamAddressProvider().requestedServerName()); // Clean up. diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index 945af22b86ab7..ec76c120984ec 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -101,10 +101,12 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te ConnectivityGridTestBase(bool use_alternate_protocols) : options_({Http::Protocol::Http11, Http::Protocol::Http2, Http::Protocol::Http3}), alternate_protocols_(maybeCreateAlternateProtocolsCacheImpl(use_alternate_protocols)), + quic_stat_names_(store_.symbolTable()), grid_(dispatcher_, random_, Upstream::makeTestHost(cluster_, "hostname", "tcp://127.0.0.1:9000", simTime()), Upstream::ResourcePriority::Default, socket_options_, transport_socket_options_, - state_, simTime(), alternate_protocols_, std::chrono::milliseconds(300), options_), + state_, simTime(), alternate_protocols_, std::chrono::milliseconds(300), options_, + quic_stat_names_, store_), host_(grid_.host()) { grid_.info_ = &info_; grid_.encoder_ = &encoder_; @@ -134,6 +136,8 @@ class ConnectivityGridTestBase : public Event::TestUsingSimulatedTime, public te std::shared_ptr cluster_{new NiceMock()}; NiceMock random_; AlternateProtocolsCacheSharedPtr alternate_protocols_; + Stats::IsolatedStoreImpl store_; + Quic::QuicStatNames quic_stat_names_; ConnectivityGridForTest grid_; Upstream::HostDescriptionConstSharedPtr host_; @@ -620,10 +624,11 @@ TEST_F(ConnectivityGridTest, RealGrid) { .WillRepeatedly( Return(Upstream::TransportSocketMatcher::MatchData(*factory, matcher.stats_, "test"))); - ConnectivityGrid grid( - dispatcher_, random_, Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime()), - Upstream::ResourcePriority::Default, socket_options_, transport_socket_options_, state_, - simTime(), alternate_protocols_, std::chrono::milliseconds(300), options_); + ConnectivityGrid grid(dispatcher_, random_, + Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:9000", simTime()), + Upstream::ResourcePriority::Default, socket_options_, + transport_socket_options_, state_, simTime(), alternate_protocols_, + std::chrono::milliseconds(300), options_, quic_stat_names_, store_); // Create the HTTP/3 pool. auto optional_it1 = ConnectivityGridForTest::forceCreateNextPool(grid); diff --git a/test/common/http/http3/conn_pool_test.cc b/test/common/http/http3/conn_pool_test.cc index fdb472e8c7665..ba4c3124cb38c 100644 --- a/test/common/http/http3/conn_pool_test.cc +++ b/test/common/http/http3/conn_pool_test.cc @@ -45,8 +45,9 @@ class Http3ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testi new Event::MockSchedulableCallback(&dispatcher_); Network::ConnectionSocket::OptionsSharedPtr options; Network::TransportSocketOptionsConstSharedPtr transport_options; - pool_ = allocateConnPool(dispatcher_, random_, host_, Upstream::ResourcePriority::Default, - options, transport_options, state_, simTime()); + pool_ = + allocateConnPool(dispatcher_, random_, host_, Upstream::ResourcePriority::Default, options, + transport_options, state_, simTime(), quic_stat_names_, store_); } Upstream::MockHost& mockHost() { return static_cast(*host_); } @@ -62,6 +63,8 @@ class Http3ConnPoolImplTest : public Event::TestUsingSimulatedTime, public testi Quic::QuicClientTransportSocketFactory factory_{ std::unique_ptr(new NiceMock), context_}; + Stats::IsolatedStoreImpl store_; + Quic::QuicStatNames quic_stat_names_{store_.symbolTable()}; ConnectionPool::InstancePtr pool_; }; diff --git a/test/common/quic/client_connection_factory_impl_test.cc b/test/common/quic/client_connection_factory_impl_test.cc index 2995db8e98583..97220f85244a9 100644 --- a/test/common/quic/client_connection_factory_impl_test.cc +++ b/test/common/quic/client_connection_factory_impl_test.cc @@ -45,6 +45,8 @@ class QuicNetworkConnectionTest : public Event::TestUsingSimulatedTime, public t Network::Address::InstanceConstSharedPtr test_address_; NiceMock context_; std::unique_ptr factory_; + Stats::IsolatedStoreImpl store_; + QuicStatNames quic_stat_names_{store_.symbolTable()}; }; TEST_F(QuicNetworkConnectionTest, BufferLimits) { @@ -53,8 +55,8 @@ TEST_F(QuicNetworkConnectionTest, BufferLimits) { quic::QuicConfig config; PersistentQuicInfoImpl info{dispatcher_, *factory_, simTime(), test_address_, config, 45}; - std::unique_ptr client_connection = - createQuicNetworkConnection(info, dispatcher_, test_address_, test_address_); + std::unique_ptr client_connection = createQuicNetworkConnection( + info, dispatcher_, test_address_, test_address_, quic_stat_names_, store_); EnvoyQuicClientSession* session = static_cast(client_connection.get()); session->Initialize(); client_connection->connect(); diff --git a/test/common/quic/envoy_quic_client_session_test.cc b/test/common/quic/envoy_quic_client_session_test.cc index 62b9c66516736..812818016fe2f 100644 --- a/test/common/quic/envoy_quic_client_session_test.cc +++ b/test/common/quic/envoy_quic_client_session_test.cc @@ -83,13 +83,14 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { quic_version_, *dispatcher_, createConnectionSocket(peer_addr_, self_addr_, nullptr))), crypto_config_(std::make_shared( quic::test::crypto_test_utils::ProofVerifierForTesting())), - envoy_quic_session_(quic_config_, quic_version_, - std::unique_ptr(quic_connection_), - quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, - *dispatcher_, - /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_), - stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), - POOL_GAUGE_PREFIX(scope_, "http3."))}), + quic_stat_names_(store_.symbolTable()), + envoy_quic_session_( + quic_config_, quic_version_, + std::unique_ptr(quic_connection_), + quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, *dispatcher_, + /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_, quic_stat_names_, store_), + stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(store_, "http3."), + POOL_GAUGE_PREFIX(store_, "http3."))}), http_connection_(envoy_quic_session_, http_connection_callbacks_, stats_, http3_options_, 64 * 1024, 100) { EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); @@ -148,6 +149,8 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { quic::QuicConfig quic_config_; std::shared_ptr crypto_config_; TestQuicCryptoClientStreamFactory crypto_stream_factory_; + Stats::IsolatedStoreImpl store_; + QuicStatNames quic_stat_names_; EnvoyQuicClientSession envoy_quic_session_; Network::MockConnectionCallbacks network_connection_callbacks_; Http::MockServerConnectionCallbacks http_connection_callbacks_; @@ -155,7 +158,6 @@ class EnvoyQuicClientSessionTest : public testing::TestWithParam { testing::StrictMock read_current_; testing::StrictMock write_total_; testing::StrictMock write_current_; - Stats::IsolatedStoreImpl scope_; Http::Http3::CodecStats stats_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; QuicHttpClientConnectionImpl http_connection_; @@ -257,6 +259,11 @@ TEST_P(EnvoyQuicClientSessionTest, ConnectionClose) { EXPECT_EQ(absl::StrCat(quic::QuicErrorCodeToString(error), " with details: ", error_details), envoy_quic_session_.transportFailureReason()); EXPECT_EQ(Network::Connection::State::Closed, envoy_quic_session_.state()); + + EXPECT_EQ( + 1U, TestUtility::findCounter( + store_, "http3.upstream.rx.quic_connection_close_error_code_QUIC_INVALID_FRAME_DATA") + ->value()); } TEST_P(EnvoyQuicClientSessionTest, ConnectionCloseWithActiveStream) { @@ -289,13 +296,14 @@ class EnvoyQuicClientSessionAllQuicVersionTest createConnectionSocket(peer_addr_, self_addr_, nullptr))), crypto_config_(std::make_shared( quic::test::crypto_test_utils::ProofVerifierForTesting())), - envoy_quic_session_(quic_config_, quic::test::SupportedVersions(GetParam()), - std::unique_ptr(quic_connection_), - quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, - *dispatcher_, - /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_), - stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(scope_, "http3."), - POOL_GAUGE_PREFIX(scope_, "http3."))}), + quic_stat_names_(store_.symbolTable()), + envoy_quic_session_( + quic_config_, quic::test::SupportedVersions(GetParam()), + std::unique_ptr(quic_connection_), + quic::QuicServerId("example.com", 443, false), crypto_config_, nullptr, *dispatcher_, + /*send_buffer_limit*/ 1024 * 1024, crypto_stream_factory_, quic_stat_names_, store_), + stats_({ALL_HTTP3_CODEC_STATS(POOL_COUNTER_PREFIX(store_, "http3."), + POOL_GAUGE_PREFIX(store_, "http3."))}), http_connection_(envoy_quic_session_, http_connection_callbacks_, stats_, http3_options_, 64 * 1024, 100) { EXPECT_EQ(time_system_.systemTime(), envoy_quic_session_.streamInfo().startTime()); @@ -339,6 +347,8 @@ class EnvoyQuicClientSessionAllQuicVersionTest quic::QuicConfig quic_config_; std::shared_ptr crypto_config_; TestQuicCryptoClientStreamFactory crypto_stream_factory_; + Stats::IsolatedStoreImpl store_; + QuicStatNames quic_stat_names_; EnvoyQuicClientSession envoy_quic_session_; Network::MockConnectionCallbacks network_connection_callbacks_; Http::MockServerConnectionCallbacks http_connection_callbacks_; @@ -346,7 +356,6 @@ class EnvoyQuicClientSessionAllQuicVersionTest testing::StrictMock read_current_; testing::StrictMock write_total_; testing::StrictMock write_current_; - Stats::IsolatedStoreImpl scope_; Http::Http3::CodecStats stats_; envoy::config::core::v3::Http3ProtocolOptions http3_options_; QuicHttpClientConnectionImpl http_connection_; @@ -391,7 +400,7 @@ TEST_P(EnvoyQuicClientSessionAllQuicVersionTest, ConnectionClosePopulatesQuicVer break; } EXPECT_EQ(1U, TestUtility::findCounter( - scope_, absl::StrCat("http3.quic_version_", quic_version_stat_name)) + store_, absl::StrCat("http3.quic_version_", quic_version_stat_name)) ->value()); } diff --git a/test/common/quic/test_utils.h b/test/common/quic/test_utils.h index c5487c81538e0..192f0d4465c65 100644 --- a/test/common/quic/test_utils.h +++ b/test/common/quic/test_utils.h @@ -26,6 +26,7 @@ #include "source/common/quic/envoy_quic_utils.h" #include "source/common/quic/envoy_quic_client_session.h" #include "test/test_common/environment.h" +#include "source/common/stats/isolated_store_impl.h" namespace Envoy { namespace Quic { @@ -168,7 +169,8 @@ class MockEnvoyQuicClientSession : public EnvoyQuicClientSession { quic::QuicServerId("example.com", 443, false), std::make_shared( quic::test::crypto_test_utils::ProofVerifierForTesting()), - nullptr, dispatcher, send_buffer_limit, crypto_stream_factory) {} + nullptr, dispatcher, send_buffer_limit, crypto_stream_factory, + quic_stat_names_, stats_store_) {} void Initialize() override { EnvoyQuicClientSession::Initialize(); @@ -203,6 +205,9 @@ class MockEnvoyQuicClientSession : public EnvoyQuicClientSession { return initialized_ ? connection() : nullptr; } quic::QuicConnection* quicConnection() override { return initialized_ ? connection() : nullptr; } + + Stats::IsolatedStoreImpl stats_store_; + QuicStatNames quic_stat_names_{stats_store_.symbolTable()}; }; Buffer::OwnedImpl diff --git a/test/common/router/config_impl_test.cc b/test/common/router/config_impl_test.cc index fba1a9ab1f98d..3a40e003bc4bc 100644 --- a/test/common/router/config_impl_test.cc +++ b/test/common/router/config_impl_test.cc @@ -2224,7 +2224,7 @@ TEST_F(RouterMatcherHashPolicyTest, HashHeadersWithMultipleValues) { EXPECT_FALSE(generateHash({})); EXPECT_TRUE(generateHash({"bar"})); - EXPECT_NE(0UL, generateHash({"bar", "foo"})); + EXPECT_NE(0UL, generateHash({"bar", "foo"}).value()); EXPECT_EQ(generateHash({"bar", "foo"}), generateHash({"bar", "foo"})); // deterministic EXPECT_EQ(generateHash({"bar", "foo"}), generateHash({"foo", "bar"})); // order independent EXPECT_NE(generateHash({"abcd", "ef"}), generateHash({"abc", "def"})); @@ -2273,7 +2273,7 @@ TEST_F(RouterMatcherHashPolicyTest, HashHeadersRegexSubstitutionWithMultipleValu EXPECT_FALSE(generateHash({})); EXPECT_TRUE(generateHash({"/bar"})); - EXPECT_NE(0UL, generateHash({"/bar", "/foo"})); + EXPECT_NE(0UL, generateHash({"/bar", "/foo"}).value()); EXPECT_EQ(generateHash({"bar", "foo"}), generateHash({"/bar", "/foo"})); // deterministic EXPECT_EQ(generateHash({"bar", "foo"}), generateHash({"/foo", "/bar"})); // order independent EXPECT_NE(generateHash({"abcd", "ef"}), generateHash({"/abc", "/def"})); diff --git a/test/config_test/config_test.cc b/test/config_test/config_test.cc index 865c6b562ee4f..5dc666dd6f476 100644 --- a/test/config_test/config_test.cc +++ b/test/config_test/config_test.cc @@ -107,7 +107,7 @@ class ConfigTest { server_.dnsResolver(), ssl_context_manager_, server_.dispatcher(), server_.localInfo(), server_.secretManager(), server_.messageValidationContext(), *api_, server_.httpContext(), server_.grpcContext(), server_.routerContext(), server_.accessLogManager(), - server_.singletonManager(), server_.options()); + server_.singletonManager(), server_.options(), server_.quic_stat_names_); ON_CALL(server_, clusterManager()).WillByDefault(Invoke([&]() -> Upstream::ClusterManager& { return *main_config.clusterManager(); @@ -160,8 +160,8 @@ class ConfigTest { std::unique_ptr cluster_manager_factory_; NiceMock component_factory_; NiceMock worker_factory_; - Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_, - false}; + Server::ListenerManagerImpl listener_manager_{server_, component_factory_, worker_factory_, false, + server_.quic_stat_names_}; Random::RandomGeneratorImpl random_; std::shared_ptr snapshot_{ std::make_shared>()}; diff --git a/test/integration/BUILD b/test/integration/BUILD index 873ee7db78b20..544fed82743df 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -804,6 +804,7 @@ envoy_cc_test_library( "//source/common/http/http3:quic_client_connection_factory_lib", "//source/common/json:json_loader_lib", "//source/common/network:utility_lib", + "//source/common/quic:quic_stat_names_lib", "//source/common/stats:allocator_lib", "//source/common/stats:isolated_store_lib", "//source/common/thread_local:thread_local_lib", diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 3f0f219e005eb..0430602910755 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -232,7 +232,7 @@ Network::ClientConnectionPtr HttpIntegrationTest::makeClientConnectionWithOption Network::Address::InstanceConstSharedPtr local_addr = Network::Test::getCanonicalLoopbackAddress(version_); return Quic::createQuicNetworkConnection(*quic_connection_persistent_info_, *dispatcher_, - server_addr, local_addr); + server_addr, local_addr, quic_stat_names_, stats_store_); #else ASSERT(false, "running a QUIC integration test without compiling QUIC"); return nullptr; @@ -302,7 +302,7 @@ HttpIntegrationTest::HttpIntegrationTest(Http::CodecType downstream_protocol, Network::Address::IpVersion version, const std::string& config) : BaseIntegrationTest(upstream_address_fn, version, config), - downstream_protocol_(downstream_protocol) { + downstream_protocol_(downstream_protocol), quic_stat_names_(stats_store_.symbolTable()) { // Legacy integration tests expect the default listener to be named "http" for // lookupPort calls. config_helper_.renameListener("http"); diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index 2d795cddba0a7..34a6ed6c82ee7 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -268,7 +268,7 @@ class HttpIntegrationTest : public BaseIntegrationTest { Http::CodecType downstream_protocol_{Http::CodecType::HTTP1}; std::string access_log_name_; testing::NiceMock random_; - + Quic::QuicStatNames quic_stat_names_; std::string san_to_match_{"spiffe://lyft.com/backend-team"}; }; diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 3a38fea1fadba..a32fda3ddbee4 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -111,7 +111,7 @@ class QuicHttpIntegrationTest : public HttpIntegrationTest, public QuicMultiVers // Use smaller window than the default one to have test coverage of client codec buffer // exceeding high watermark. /*send_buffer_limit=*/2 * Http2::Utility::OptionsLimits::MIN_INITIAL_STREAM_WINDOW_SIZE, - persistent_info.crypto_stream_factory_); + persistent_info.crypto_stream_factory_, quic_stat_names_, stats_store_); return session; } diff --git a/test/integration/sds_dynamic_integration_test.cc b/test/integration/sds_dynamic_integration_test.cc index e29fc1e9cf4b9..8e7b7ad656f27 100644 --- a/test/integration/sds_dynamic_integration_test.cc +++ b/test/integration/sds_dynamic_integration_test.cc @@ -303,7 +303,8 @@ version_info: "0" local_address = std::make_shared("::1"); } return Quic::createQuicNetworkConnection(*quic_connection_persistent_info_, *dispatcher_, - Network::Utility::resolveUrl(url), local_address); + Network::Utility::resolveUrl(url), local_address, + quic_stat_names_, stats_store_); #else NOT_REACHED_GCOVR_EXCL_LINE; #endif diff --git a/test/integration/utility.cc b/test/integration/utility.cc index abf19d24a6439..c6746cae7869d 100644 --- a/test/integration/utility.cc +++ b/test/integration/utility.cc @@ -19,6 +19,7 @@ #include "source/common/http/http3/quic_client_connection_factory.h" #include "source/common/network/address_impl.h" #include "source/common/network/utility.h" +#include "source/common/quic/quic_stat_names.h" #include "source/common/upstream/upstream_impl.h" #ifdef ENVOY_ENABLE_QUIC @@ -182,6 +183,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt const std::string& body, Http::CodecType type, const std::string& host, const std::string& content_type) { NiceMock mock_stats_store; + Quic::QuicStatNames quic_stat_names(mock_stats_store.symbolTable()); NiceMock random; Event::GlobalTimeSystem time_system; NiceMock random_generator; @@ -189,6 +191,7 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt Filesystem::fileSystemForTest(), random_generator); Event::DispatcherPtr dispatcher(api.allocateDispatcher("test_thread")); TestConnectionCallbacks connection_callbacks(*dispatcher); + std::shared_ptr cluster{new NiceMock()}; Upstream::HostDescriptionConstSharedPtr host_description{Upstream::makeTestHostDescription( cluster, fmt::format("{}://127.0.0.1:80", (type == Http::CodecType::HTTP3 ? "udp" : "tcp")), @@ -221,8 +224,8 @@ IntegrationUtil::makeSingleRequest(const Network::Address::InstanceConstSharedPt // Docker only works with loopback v6 address. local_address = std::make_shared("::1"); } - Network::ClientConnectionPtr connection = - Quic::createQuicNetworkConnection(*persistent_info, *dispatcher, addr, local_address); + Network::ClientConnectionPtr connection = Quic::createQuicNetworkConnection( + *persistent_info, *dispatcher, addr, local_address, quic_stat_names, mock_stats_store); connection->addConnectionCallbacks(connection_callbacks); Http::CodecClientProd client(type, std::move(connection), host_description, *dispatcher, random); // Quic connection needs to finish handshake. diff --git a/test/mocks/server/BUILD b/test/mocks/server/BUILD index 679f3e6d1b2d2..c26d734bad79f 100644 --- a/test/mocks/server/BUILD +++ b/test/mocks/server/BUILD @@ -182,6 +182,7 @@ envoy_cc_mock( "//envoy/server:instance_interface", "//source/common/grpc:context_lib", "//source/common/http:context_lib", + "//source/common/quic:quic_stat_names_lib", "//source/common/router:context_lib", "//source/common/secret:secret_manager_impl_lib", "//source/common/singleton:manager_impl_lib", diff --git a/test/mocks/server/instance.cc b/test/mocks/server/instance.cc index 92be91025b9ed..f19f81dc4afcd 100644 --- a/test/mocks/server/instance.cc +++ b/test/mocks/server/instance.cc @@ -16,7 +16,7 @@ MockInstance::MockInstance() cluster_manager_(timeSource()), ssl_context_manager_(timeSource()), singleton_manager_(new Singleton::ManagerImpl(Thread::threadFactoryForTest())), grpc_context_(stats_store_.symbolTable()), http_context_(stats_store_.symbolTable()), - router_context_(stats_store_.symbolTable()), + router_context_(stats_store_.symbolTable()), quic_stat_names_(stats_store_.symbolTable()), stats_config_(std::make_shared>()), server_factory_context_( std::make_shared>()), diff --git a/test/mocks/server/instance.h b/test/mocks/server/instance.h index fe3f22fcc2305..455e82eebfec5 100644 --- a/test/mocks/server/instance.h +++ b/test/mocks/server/instance.h @@ -4,6 +4,7 @@ #include "source/common/grpc/context_impl.h" #include "source/common/http/context_impl.h" +#include "source/common/quic/quic_stat_names.h" #include "source/common/router/context_impl.h" #include "source/common/stats/symbol_table_impl.h" #include "source/extensions/transport_sockets/tls/context_manager_impl.h" @@ -122,6 +123,7 @@ class MockInstance : public Instance { Http::ContextImpl http_context_; envoy::config::bootstrap::v3::Bootstrap bootstrap_; Router::ContextImpl router_context_; + Quic::QuicStatNames quic_stat_names_; testing::NiceMock validation_context_; std::shared_ptr> stats_config_; std::shared_ptr> diff --git a/test/server/api_listener_test.cc b/test/server/api_listener_test.cc index f1c269a8f8c17..ee51447e4e404 100644 --- a/test/server/api_listener_test.cc +++ b/test/server/api_listener_test.cc @@ -22,8 +22,8 @@ namespace Server { class ApiListenerTest : public testing::Test { protected: ApiListenerTest() - : listener_manager_(std::make_unique(server_, listener_factory_, - worker_factory_, false)) {} + : listener_manager_(std::make_unique( + server_, listener_factory_, worker_factory_, false, server_.quic_stat_names_)) {} NiceMock server_; NiceMock listener_factory_; diff --git a/test/server/config_validation/cluster_manager_test.cc b/test/server/config_validation/cluster_manager_test.cc index 4a5ec1f4fd0d9..d0762c4425df7 100644 --- a/test/server/config_validation/cluster_manager_test.cc +++ b/test/server/config_validation/cluster_manager_test.cc @@ -45,13 +45,14 @@ TEST(ValidationClusterManagerTest, MockedMethods) { Http::ContextImpl http_context(stats_store.symbolTable()); Grpc::ContextImpl grpc_context(stats_store.symbolTable()); Router::ContextImpl router_context(stats_store.symbolTable()); + Quic::QuicStatNames quic_stat_names(stats_store.symbolTable()); AccessLog::MockAccessLogManager log_manager; Singleton::ManagerImpl singleton_manager{Thread::threadFactoryForTest()}; ValidationClusterManagerFactory factory( admin, runtime, stats_store, tls, dns_resolver, ssl_context_manager, dispatcher, local_info, secret_manager, validation_context, *api, http_context, grpc_context, router_context, - log_manager, singleton_manager, options); + log_manager, singleton_manager, options, quic_stat_names); const envoy::config::bootstrap::v3::Bootstrap bootstrap; ClusterManagerPtr cluster_manager = factory.clusterManagerFromProto(bootstrap); diff --git a/test/server/configuration_impl_test.cc b/test/server/configuration_impl_test.cc index 4e7de3b88dc1f..f2f02f385bc00 100644 --- a/test/server/configuration_impl_test.cc +++ b/test/server/configuration_impl_test.cc @@ -65,7 +65,8 @@ class ConfigurationImplTest : public testing::Test { server_.dnsResolver(), server_.sslContextManager(), server_.dispatcher(), server_.localInfo(), server_.secretManager(), server_.messageValidationContext(), *api_, server_.httpContext(), server_.grpcContext(), server_.routerContext(), - server_.accessLogManager(), server_.singletonManager(), server_.options()) {} + server_.accessLogManager(), server_.singletonManager(), server_.options(), + server_.quic_stat_names_) {} void addStatsdFakeClusterConfig(envoy::config::metrics::v3::StatsSink& sink) { envoy::config::metrics::v3::StatsdSink statsd_sink; diff --git a/test/server/listener_manager_impl_test.h b/test/server/listener_manager_impl_test.h index 856436d9c2423..077c221b4fc0b 100644 --- a/test/server/listener_manager_impl_test.h +++ b/test/server/listener_manager_impl_test.h @@ -73,8 +73,9 @@ class ListenerManagerImplTest : public testing::Test { .WillByDefault(ReturnRef(validation_visitor)); ON_CALL(server_.validation_context_, dynamicValidationVisitor()) .WillByDefault(ReturnRef(validation_visitor)); - manager_ = std::make_unique(server_, listener_factory_, worker_factory_, - enable_dispatcher_stats_); + manager_ = + std::make_unique(server_, listener_factory_, worker_factory_, + enable_dispatcher_stats_, server_.quic_stat_names_); // Use real filter loading by default. ON_CALL(listener_factory_, createNetworkFilterFactoryList(_, _)) From 2ece16147ad6722013b0b21edfdb8e0f97006104 Mon Sep 17 00:00:00 2001 From: htuch Date: Thu, 22 Jul 2021 09:41:10 -0400 Subject: [PATCH 29/57] ci: set --flaky_test_attempts=2 for coverage. (#17433) Signed-off-by: Harvey Tuch --- test/run_envoy_bazel_coverage.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/run_envoy_bazel_coverage.sh b/test/run_envoy_bazel_coverage.sh index f2f44ea651fa8..7a315a1416359 100755 --- a/test/run_envoy_bazel_coverage.sh +++ b/test/run_envoy_bazel_coverage.sh @@ -63,6 +63,9 @@ else "--test_tag_filters=-nocoverage,-fuzz_target") fi +# Don't block coverage on flakes. +BAZEL_BUILD_OPTIONS+=("--flaky_test_attempts=2") + bazel coverage "${BAZEL_BUILD_OPTIONS[@]}" "${COVERAGE_TARGETS[@]}" # Collecting profile and testlogs From 2cc9b69f28caa34bd0bc0d4371b669f027807ccd Mon Sep 17 00:00:00 2001 From: Ilya Lobkov Date: Thu, 22 Jul 2021 23:02:35 +0700 Subject: [PATCH 30/57] hds: reset specifier_hash on remoteClose (#17378) Signed-off-by: Ilya Lobkov --- .../upstream/health_discovery_service.cc | 1 + test/integration/hds_integration_test.cc | 55 +++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/source/common/upstream/health_discovery_service.cc b/source/common/upstream/health_discovery_service.cc index c9f5a58649582..96f3c07580ac5 100644 --- a/source/common/upstream/health_discovery_service.cc +++ b/source/common/upstream/health_discovery_service.cc @@ -330,6 +330,7 @@ void HdsDelegate::onRemoteClose(Grpc::Status::GrpcStatus status, const std::stri hds_stream_response_timer_->disableTimer(); stream_ = nullptr; server_response_ms_ = 0; + specifier_hash_ = 0; handleFailure(); } diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index 15a3015807cab..b7938878f0352 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -1279,5 +1279,60 @@ TEST_P(HdsIntegrationTest, SingleEndpointUnhealthyHttpCustomPort) { cleanupHdsConnection(); } +// Tests Envoy keeps sending EndpointHealthResponses after the HDS server reconnection +TEST_P(HdsIntegrationTest, SingleEndpointHealthyHttpHdsReconnect) { + XDS_DEPRECATED_FEATURE_TEST_SKIP; + initialize(); + + // Server <--> Envoy + waitForHdsStream(); + ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_)); + EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0), + envoy::service::health::v3::Capability::HTTP); + + // Server asks for health checking + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + hds_stream_->startGrpcStream(); + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Envoy sends a health check message to an endpoint + healthcheckEndpoints(); + + // Endpoint responds to the health check + host_stream_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + host_stream_->encodeData(1024, true); + + // Receive updates until the one we expect arrives + waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY); + + checkCounters(1, 2, 1, 0); + + // Simulate disconnection of HDS server + ASSERT_TRUE(hds_fake_connection_->close()); + ASSERT_TRUE(hds_fake_connection_->waitForDisconnect()); + + // Server <--> Envoy, connect once again + waitForHdsStream(); + ASSERT_TRUE(hds_stream_->waitForGrpcMessage(*dispatcher_, envoy_msg_)); + EXPECT_EQ(envoy_msg_.health_check_request().capability().health_check_protocols(0), + envoy::service::health::v3::Capability::HTTP); + + // Server asks for health checking + server_health_check_specifier_ = + makeHttpHealthCheckSpecifier(envoy::type::v3::CodecClientType::HTTP1, false); + hds_stream_->startGrpcStream(); + hds_stream_->sendGrpcMessage(server_health_check_specifier_); + test_server_->waitForCounterGe("hds_delegate.requests", ++hds_requests_); + + // Receive updates until the one we expect arrives + waitForEndpointHealthResponse(envoy::config::core::v3::HEALTHY); + + // Clean up connections + cleanupHostConnections(); + cleanupHdsConnection(); +} + } // namespace } // namespace Envoy From 27387070f8de03c94c7531feae7db9aa87f36740 Mon Sep 17 00:00:00 2001 From: Manish Kumar Date: Fri, 23 Jul 2021 00:35:55 +0530 Subject: [PATCH 31/57] Add Jwt cache. (#14341) * Add Jwt cache. * Added release docs. * Added unit test. Signed-off-by: Manish Kumar Co-authored-by: Wayne Zhang --- .../filters/http/jwt_authn/v3/config.proto | 12 +- .../http/jwt_authn/v4alpha/config.proto | 15 ++- bazel/repositories.bzl | 5 + bazel/repository_locations.bzl | 6 +- .../http/http_filters/jwt_authn_filter.rst | 1 + docs/root/version_history/current.rst | 2 + .../filters/http/jwt_authn/v3/config.proto | 12 +- .../http/jwt_authn/v4alpha/config.proto | 15 ++- .../extensions/filters/http/jwt_authn/BUILD | 15 +++ .../filters/http/jwt_authn/authenticator.cc | 40 +++++-- .../filters/http/jwt_authn/authenticator.h | 1 + .../filters/http/jwt_authn/jwks_cache.cc | 17 ++- .../filters/http/jwt_authn/jwks_cache.h | 6 + .../filters/http/jwt_authn/jwt_cache.cc | 79 +++++++++++++ .../filters/http/jwt_authn/jwt_cache.h | 46 ++++++++ test/extensions/filters/http/jwt_authn/BUILD | 13 +++ .../http/jwt_authn/authenticator_test.cc | 106 ++++++++++++++++++ .../filters/http/jwt_authn/jwt_cache_test.cc | 87 ++++++++++++++ test/extensions/filters/http/jwt_authn/mock.h | 48 ++++++++ tools/spelling/spelling_dictionary.txt | 1 + 20 files changed, 510 insertions(+), 17 deletions(-) create mode 100644 source/extensions/filters/http/jwt_authn/jwt_cache.cc create mode 100644 source/extensions/filters/http/jwt_authn/jwt_cache.h create mode 100644 test/extensions/filters/http/jwt_authn/jwt_cache_test.cc diff --git a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto index db81f847d7bae..9e658ed8627ff 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 12] +// [#next-free-field: 13] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -220,6 +220,16 @@ message JwtProvider { // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; + + // Enables JWT cache, its size is specified by *jwt_cache_size*. + // Only valid JWT tokens are cached. + JwtCacheConfig jwt_cache_config = 12; +} + +// This message specifies JWT Cache configuration. +message JwtCacheConfig { + // The unit is number of JWT tokens, default to 100. + uint32 jwt_cache_size = 1; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 3a65b6a64cc8c..57c6630c940e7 100644 --- a/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // cache_duration: // seconds: 300 // -// [#next-free-field: 12] +// [#next-free-field: 13] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; @@ -220,6 +220,19 @@ message JwtProvider { // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; + + // Enables JWT cache, its size is specified by *jwt_cache_size*. + // Only valid JWT tokens are cached. + JwtCacheConfig jwt_cache_config = 12; +} + +// This message specifies JWT Cache configuration. +message JwtCacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtCacheConfig"; + + // The unit is number of JWT tokens, default to 100. + uint32 jwt_cache_size = 1; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 9967b86504382..be758fa4cebfd 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -880,6 +880,11 @@ def _com_github_google_jwt_verify(): actual = "@com_github_google_jwt_verify//:jwt_verify_lib", ) + native.bind( + name = "simple_lru_cache_lib", + actual = "@com_github_google_jwt_verify//:simple_lru_cache_lib", + ) + def _com_github_luajit_luajit(): external_http_archive( name = "com_github_luajit_luajit", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 9339a4afdc800..2b2b53eba6688 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -467,13 +467,13 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "jwt_verify_lib", project_desc = "JWT verification library for C++", project_url = "https://github.com/google/jwt_verify_lib", - version = "28efec2e4df1072db0ed03597591360ec9f80aac", - sha256 = "7a5c35b7cbf633398503ae12cad8c2833e92b3a796eed68b6256d22d51ace5e1", + version = "e5d6cf7067495b0868787e1fd1e75cef3242a840", + sha256 = "0d294dc8697049a0d7f2aaa81d08713fea581061c5359d6edb229b3e7c6cf58e", strip_prefix = "jwt_verify_lib-{version}", urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.jwt_authn"], - release_date = "2020-11-05", + release_date = "2021-03-05", cpe = "N/A", ), com_github_nodejs_http_parser = dict( diff --git a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst index 814deed6b16c6..905aaa6aecc43 100644 --- a/docs/root/configuration/http/http_filters/jwt_authn_filter.rst +++ b/docs/root/configuration/http/http_filters/jwt_authn_filter.rst @@ -41,6 +41,7 @@ JwtProvider * *from_headers*: extract JWT from HTTP headers. * *from_params*: extract JWT from query parameters. * *forward_payload_header*: forward the JWT payload in the specified HTTP header. +* *jwt_cache_config*: Enables JWT cache, its size can be specified by *jwt_cache_size*. Only valid JWT tokens are cached. Default Extract Location ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 5261f906b4ec3..3a8da48b481f6 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -47,6 +47,8 @@ New Features * http: added :ref:`string_match ` in the header matcher. * http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. +* jwt_authn: added support for :ref:`Jwt Cache ` and its size can be specified by :ref:`jwt_cache_size `. + Deprecated ---------- * cluster: :ref:`max_requests_per_connection ` is deprecated in favor of :ref:`max_requests_per_connection `. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto index db81f847d7bae..9e658ed8627ff 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v3/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // cache_duration: // seconds: 300 // -// [#next-free-field: 12] +// [#next-free-field: 13] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; @@ -220,6 +220,16 @@ message JwtProvider { // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; + + // Enables JWT cache, its size is specified by *jwt_cache_size*. + // Only valid JWT tokens are cached. + JwtCacheConfig jwt_cache_config = 12; +} + +// This message specifies JWT Cache configuration. +message JwtCacheConfig { + // The unit is number of JWT tokens, default to 100. + uint32 jwt_cache_size = 1; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto index 3a65b6a64cc8c..57c6630c940e7 100644 --- a/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto +++ b/generated_api_shadow/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto @@ -52,7 +52,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // cache_duration: // seconds: 300 // -// [#next-free-field: 12] +// [#next-free-field: 13] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; @@ -220,6 +220,19 @@ message JwtProvider { // Specify the clock skew in seconds when verifying JWT time constraint, // such as `exp`, and `nbf`. If not specified, default is 60 seconds. uint32 clock_skew_seconds = 10; + + // Enables JWT cache, its size is specified by *jwt_cache_size*. + // Only valid JWT tokens are cached. + JwtCacheConfig jwt_cache_config = 12; +} + +// This message specifies JWT Cache configuration. +message JwtCacheConfig { + option (udpa.annotations.versioning).previous_message_type = + "envoy.extensions.filters.http.jwt_authn.v3.JwtCacheConfig"; + + // The unit is number of JWT tokens, default to 100. + uint32 jwt_cache_size = 1; } // This message specifies how to fetch JWKS from remote and how to cache it. diff --git a/source/extensions/filters/http/jwt_authn/BUILD b/source/extensions/filters/http/jwt_authn/BUILD index 86f2cb511839b..50f5952b2f707 100644 --- a/source/extensions/filters/http/jwt_authn/BUILD +++ b/source/extensions/filters/http/jwt_authn/BUILD @@ -56,6 +56,7 @@ envoy_cc_library( ], deps = [ "jwks_async_fetcher_lib", + ":jwt_cache_lib", "//source/common/config:datasource_lib", "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], @@ -143,3 +144,17 @@ envoy_cc_library( "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", ], ) + +envoy_cc_library( + name = "jwt_cache_lib", + srcs = ["jwt_cache.cc"], + hdrs = ["jwt_cache.h"], + external_deps = [ + "jwt_verify_lib", + "simple_lru_cache_lib", + ], + deps = [ + "//source/common/protobuf:utility_lib", + "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/jwt_authn/authenticator.cc b/source/extensions/filters/http/jwt_authn/authenticator.cc index f32bfac5dd78d..ef17e90fb1a6a 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.cc +++ b/source/extensions/filters/http/jwt_authn/authenticator.cc @@ -58,6 +58,9 @@ class AuthenticatorImpl : public Logger::Loggable, // Verify with a specific public key. void verifyKey(); + // Handle Good Jwt either Cache JWT or verified public key. + void handleGoodJwt(bool cache_hit); + // Calls the callback with status. void doneWithStatus(const Status& status); @@ -80,10 +83,9 @@ class AuthenticatorImpl : public Logger::Loggable, std::vector tokens_; JwtLocationConstPtr curr_token_; // The JWT object. - std::unique_ptr<::google::jwt_verify::Jwt> jwt_; + std::unique_ptr<::google::jwt_verify::Jwt> owned_jwt_; // The JWKS data object JwksCache::JwksData* jwks_data_{}; - // The HTTP request headers Http::HeaderMap* headers_{}; // The active span for the request @@ -99,6 +101,7 @@ class AuthenticatorImpl : public Logger::Loggable, const bool is_allow_failed_; const bool is_allow_missing_; TimeSource& time_source_; + ::google::jwt_verify::Jwt* jwt_{}; }; std::string AuthenticatorImpl::name() const { @@ -140,9 +143,20 @@ void AuthenticatorImpl::startVerify() { curr_token_ = std::move(tokens_.back()); tokens_.pop_back(); - jwt_ = std::make_unique<::google::jwt_verify::Jwt>(); + if (provider_ != absl::nullopt) { + jwks_data_ = jwks_cache_.findByProvider(provider_.value()); + jwt_ = jwks_data_->getJwtCache().lookup(curr_token_->token()); + if (jwt_ != nullptr) { + handleGoodJwt(/*cache_hit=*/true); + return; + } + } + ENVOY_LOG(debug, "{}: Parse Jwt {}", name(), curr_token_->token()); - Status status = jwt_->parseFromString(curr_token_->token()); + owned_jwt_ = std::make_unique<::google::jwt_verify::Jwt>(); + Status status = owned_jwt_->parseFromString(curr_token_->token()); + jwt_ = owned_jwt_.get(); + if (status != Status::Ok) { doneWithStatus(status); return; @@ -157,9 +171,10 @@ void AuthenticatorImpl::startVerify() { } } - // Check the issuer is configured or not. - jwks_data_ = provider_ ? jwks_cache_.findByProvider(provider_.value()) - : jwks_cache_.findByIssuer(jwt_->iss_); + // Issuer is configured + if (!provider_) { + jwks_data_ = jwks_cache_.findByIssuer(jwt_->iss_); + } // When `provider` is valid, findByProvider should never return nullptr. // Only when `allow_missing` or `allow_failed` is used, `provider` is invalid, // and this authenticator is checking tokens from all providers. In this case, @@ -200,6 +215,7 @@ void AuthenticatorImpl::startVerify() { // the key cached, if we do proceed to verify else try a new JWKS retrieval. // JWTs without a kid header field in the JWS we might be best to get each // time? This all only matters for remote JWKS. + verifyKey(); return; } @@ -246,11 +262,15 @@ void AuthenticatorImpl::onDestroy() { void AuthenticatorImpl::verifyKey() { const Status status = ::google::jwt_verify::verifyJwtWithoutTimeChecking(*jwt_, *jwks_data_->getJwksObj()); + if (status != Status::Ok) { doneWithStatus(status); return; } + handleGoodJwt(/*cache_hit=*/false); +} +void AuthenticatorImpl::handleGoodJwt(bool cache_hit) { // Forward the payload const auto& provider = jwks_data_->getJwtProvider(); @@ -274,7 +294,10 @@ void AuthenticatorImpl::verifyKey() { if (set_payload_cb_ && !provider.payload_in_metadata().empty()) { set_payload_cb_(provider.payload_in_metadata(), jwt_->payload_pb_); } - + if (provider_ && !cache_hit) { + // move the ownership of "owned_jwt_" into the function. + jwks_data_->getJwtCache().insert(curr_token_->token(), std::move(owned_jwt_)); + } doneWithStatus(Status::Ok); } @@ -301,6 +324,7 @@ void AuthenticatorImpl::doneWithStatus(const Status& status) { callback_ = nullptr; return; } + startVerify(); } diff --git a/source/extensions/filters/http/jwt_authn/authenticator.h b/source/extensions/filters/http/jwt_authn/authenticator.h index f8dd647c7f4f9..62803498d6941 100644 --- a/source/extensions/filters/http/jwt_authn/authenticator.h +++ b/source/extensions/filters/http/jwt_authn/authenticator.h @@ -4,6 +4,7 @@ #include "source/extensions/filters/http/jwt_authn/extractor.h" #include "source/extensions/filters/http/jwt_authn/jwks_cache.h" +#include "source/extensions/filters/http/jwt_authn/jwt_cache.h" #include "jwt_verify_lib/check_audience.h" #include "jwt_verify_lib/status.h" diff --git a/source/extensions/filters/http/jwt_authn/jwks_cache.cc b/source/extensions/filters/http/jwt_authn/jwks_cache.cc index 673b4f6823d8d..d69bff20ccc51 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_cache.cc +++ b/source/extensions/filters/http/jwt_authn/jwks_cache.cc @@ -1,6 +1,7 @@ #include "source/extensions/filters/http/jwt_authn/jwks_cache.h" #include +#include #include "envoy/common/time.h" #include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" @@ -34,8 +35,11 @@ class JwksDataImpl : public JwksCache::JwksData, public Logger::Loggable(audiences); - - tls_.set([](Envoy::Event::Dispatcher&) { return std::make_shared(); }); + bool enable_jwt_cache = jwt_provider_.has_jwt_cache_config(); + const auto& config = jwt_provider_.jwt_cache_config(); + tls_.set([enable_jwt_cache, config](Envoy::Event::Dispatcher& dispatcher) { + return std::make_shared(enable_jwt_cache, config, dispatcher.timeSource()); + }); const auto inline_jwks = Config::DataSource::read(jwt_provider_.local_jwks(), true, context.api()); @@ -77,10 +81,19 @@ class JwksDataImpl : public JwksCache::JwksData, public Logger::Loggablejwt_cache_; } + private: struct ThreadLocalCache : public ThreadLocal::ThreadLocalObject { + ThreadLocalCache(bool enable_jwt_cache, + const envoy::extensions::filters::http::jwt_authn::v3::JwtCacheConfig& config, + TimeSource& time_source) + : jwt_cache_(JwtCache::create(enable_jwt_cache, config, time_source)) {} + // The jwks object. JwksConstSharedPtr jwks_; + // The JwtCache object + const JwtCachePtr jwt_cache_; // The pubkey expiration time. MonotonicTime expire_; }; diff --git a/source/extensions/filters/http/jwt_authn/jwks_cache.h b/source/extensions/filters/http/jwt_authn/jwks_cache.h index 0efcb23931362..20ef525df1232 100644 --- a/source/extensions/filters/http/jwt_authn/jwks_cache.h +++ b/source/extensions/filters/http/jwt_authn/jwks_cache.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "envoy/api/api.h" #include "envoy/common/pure.h" #include "envoy/common/time.h" @@ -8,6 +10,7 @@ #include "source/extensions/filters/http/common/jwks_fetcher.h" #include "source/extensions/filters/http/jwt_authn/jwks_async_fetcher.h" +#include "source/extensions/filters/http/jwt_authn/jwt_cache.h" #include "source/extensions/filters/http/jwt_authn/stats.h" #include "jwt_verify_lib/jwks.h" @@ -65,6 +68,9 @@ class JwksCache { // Set a remote Jwks. virtual const ::google::jwt_verify::Jwks* setRemoteJwks(JwksConstPtr&& jwks) PURE; + + // Get Token Cache + virtual JwtCache& getJwtCache() PURE; }; // Lookup issuer cache map. The cache only stores Jwks specified in the config. diff --git a/source/extensions/filters/http/jwt_authn/jwt_cache.cc b/source/extensions/filters/http/jwt_authn/jwt_cache.cc new file mode 100644 index 0000000000000..186130bc1de9c --- /dev/null +++ b/source/extensions/filters/http/jwt_authn/jwt_cache.cc @@ -0,0 +1,79 @@ +#include "source/extensions/filters/http/jwt_authn/jwt_cache.h" + +#include "source/common/common/assert.h" + +#include "simple_lru_cache/simple_lru_cache_inl.h" + +using ::google::simple_lru_cache::SimpleLRUCache; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace JwtAuthn { +namespace { + +// The default number of entries in JWT cache is 100. +constexpr int kJwtCacheDefaultSize = 100; +// The maximum size of JWT to be cached. +constexpr int kMaxJwtSizeForCache = 4 * 1024; // 4KiB + +class JwtCacheImpl : public JwtCache { +public: + JwtCacheImpl(bool enable_cache, const JwtCacheConfig& config, TimeSource& time_source) + : time_source_(time_source) { + if (enable_cache) { + // if cache_size is 0, it is not specified in the config, use default + cache_size_ = config.jwt_cache_size() == 0 ? kJwtCacheDefaultSize : config.jwt_cache_size(); + jwt_lru_cache_ = + std::make_unique>(cache_size_); + } + } + + ~JwtCacheImpl() override { + if (jwt_lru_cache_) { + jwt_lru_cache_->clear(); + } + } + + ::google::jwt_verify::Jwt* lookup(const std::string& token) override { + if (!jwt_lru_cache_) { + return nullptr; + } + ::google::jwt_verify::Jwt* found_jwt{}; + SimpleLRUCache::ScopedLookup lookup( + jwt_lru_cache_.get(), token); + if (lookup.found()) { + found_jwt = lookup.value(); + ASSERT(found_jwt != nullptr); + if (found_jwt->verifyTimeConstraint(DateUtil::nowToSeconds(time_source_)) == + ::google::jwt_verify::Status::JwtExpired) { + jwt_lru_cache_->remove(token); + found_jwt = nullptr; + } + } + return found_jwt; + } + + void insert(const std::string& token, std::unique_ptr<::google::jwt_verify::Jwt>&& jwt) override { + if (jwt_lru_cache_ && token.size() <= kMaxJwtSizeForCache) { + // pass the ownership of jwt to cache + jwt_lru_cache_->insert(token, jwt.release(), 1); + } + } + +private: + std::unique_ptr> jwt_lru_cache_; + TimeSource& time_source_; + int cache_size_{}; +}; +} // namespace + +JwtCachePtr JwtCache::create(bool enable_cache, const JwtCacheConfig& config, + TimeSource& time_source) { + return std::make_unique(enable_cache, config, time_source); +} + +} // namespace JwtAuthn +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/jwt_authn/jwt_cache.h b/source/extensions/filters/http/jwt_authn/jwt_cache.h new file mode 100644 index 0000000000000..cbf84091621c1 --- /dev/null +++ b/source/extensions/filters/http/jwt_authn/jwt_cache.h @@ -0,0 +1,46 @@ +#pragma once +#include +#include +#include + +#include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" + +#include "source/common/common/utility.h" + +#include "jwt_verify_lib/jwt.h" +#include "jwt_verify_lib/verify.h" + +using envoy::extensions::filters::http::jwt_authn::v3::JwtCacheConfig; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace JwtAuthn { + +// Cache key is the JWT string, value is parsed JWT struct. + +class JwtCache; +using JwtCachePtr = std::unique_ptr; + +class JwtCache { +public: + virtual ~JwtCache() = default; + + // Lookup a JWT token in the cache, if found return the pointer to its parsed jwt struct. + // If no found, return nullptr. + virtual ::google::jwt_verify::Jwt* lookup(const std::string& token) PURE; + + // Insert a JWT token and its parsed JWT struct to the cache. + // The function will take over the ownership of jwt object. + virtual void insert(const std::string& token, + std::unique_ptr<::google::jwt_verify::Jwt>&& jwt) PURE; + + // JwtCache factory function. + static JwtCachePtr create(bool enable_cache, const JwtCacheConfig& config, + TimeSource& time_source); +}; + +} // namespace JwtAuthn +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/jwt_authn/BUILD b/test/extensions/filters/http/jwt_authn/BUILD index 5dbecda7c3145..1a7c3f0723247 100644 --- a/test/extensions/filters/http/jwt_authn/BUILD +++ b/test/extensions/filters/http/jwt_authn/BUILD @@ -106,6 +106,19 @@ envoy_extension_cc_test( ], ) +envoy_extension_cc_test( + name = "jwt_cache_test", + srcs = ["jwt_cache_test.cc"], + extension_names = ["envoy.filters.http.jwt_authn"], + deps = [ + "//source/extensions/filters/http/jwt_authn:jwt_cache_lib", + "//test/extensions/filters/http/jwt_authn:test_common_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/http/jwt_authn/v3:pkg_cc_proto", + ], +) + envoy_extension_cc_test( name = "authenticator_test", srcs = ["authenticator_test.cc"], diff --git a/test/extensions/filters/http/jwt_authn/authenticator_test.cc b/test/extensions/filters/http/jwt_authn/authenticator_test.cc index 41224121bbe97..b2b92cf75cdd2 100644 --- a/test/extensions/filters/http/jwt_authn/authenticator_test.cc +++ b/test/extensions/filters/http/jwt_authn/authenticator_test.cc @@ -24,7 +24,9 @@ using ::google::jwt_verify::Jwks; using ::google::jwt_verify::Status; using ::testing::_; using ::testing::Invoke; +using ::testing::MockFunction; using ::testing::NiceMock; +using ::testing::Return; namespace Envoy { namespace Extensions { @@ -648,6 +650,110 @@ TEST_F(AuthenticatorTest, TestInvalidPubkeyKey) { expectVerifyStatus(Status::JwksPemBadBase64, headers); } +class AuthenticatorJwtCacheTest : public testing::Test { +public: + void SetUp() override { + jwks_ = Jwks::createFrom(PublicKey, Jwks::JWKS); + extractor_ = Extractor::create(jwks_cache_.jwks_data_.jwt_provider_); + // Not to use jwks_fetcher, mocked that JwksObj already has Jwks + EXPECT_CALL(jwks_cache_.jwks_data_, getJwksObj()).WillRepeatedly(Return(jwks_.get())); + EXPECT_CALL(mock_fetcher_, Call(_, _)).Times(0); + } + + void createAuthenticator(const absl::optional& provider) { + auth_ = Authenticator::create(nullptr, provider, false, false, jwks_cache_, cm_, + mock_fetcher_.AsStdFunction(), time_system_); + } + + void expectVerifyStatus(Status expected_status, Http::RequestHeaderMap& headers) { + std::function on_complete_cb = [&expected_status](const Status& status) { + ASSERT_EQ(status, expected_status); + }; + auto set_payload_cb = [this](const std::string& name, const ProtobufWkt::Struct& payload) { + out_name_ = name; + out_payload_ = payload; + }; + auto tokens = extractor_->extract(headers); + auth_->verify(headers, parent_span_, std::move(tokens), set_payload_cb, on_complete_cb); + } + + ::google::jwt_verify::JwksPtr jwks_; + NiceMock jwks_cache_; + MockFunction mock_fetcher_; + AuthenticatorPtr auth_; + NiceMock cm_; + Event::SimulatedTimeSystem time_system_; + ExtractorConstPtr extractor_; + NiceMock parent_span_; + std::string out_name_; + ProtobufWkt::Struct out_payload_; +}; + +TEST_F(AuthenticatorJwtCacheTest, TestNonProvider) { + createAuthenticator(absl::nullopt); + + // For invalid provider, jwt_cache is not called. + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, lookup(_)).Times(0); + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, insert(GoodToken, _)).Times(0); + + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + expectVerifyStatus(Status::Ok, headers); +} + +TEST_F(AuthenticatorJwtCacheTest, TestCacheMissGoodToken) { + createAuthenticator("provider"); + + // jwt_cache miss: lookup return nullptr + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, lookup(_)).WillOnce(Return(nullptr)); + // jwt_cache insert is called for a good jwt. + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, insert(GoodToken, _)); + + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + expectVerifyStatus(Status::Ok, headers); +} + +TEST_F(AuthenticatorJwtCacheTest, TestCacheMissExpiredToken) { + createAuthenticator("provider"); + + // jwt_cache miss: lookup return nullptr + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, lookup(_)).WillOnce(Return(nullptr)); + // jwt_cache insert is not called for a bad Jwt + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, insert(_, _)).Times(0); + + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(ExpiredToken)}}; + expectVerifyStatus(Status::JwtExpired, headers); +} + +TEST_F(AuthenticatorJwtCacheTest, TestCacheHit) { + jwks_cache_.jwks_data_.jwt_provider_.set_forward_payload_header("jwt-payload"); + jwks_cache_.jwks_data_.jwt_provider_.set_forward(true); + jwks_cache_.jwks_data_.jwt_provider_.set_payload_in_metadata("my_payload"); + + createAuthenticator("provider"); + + ::google::jwt_verify::Jwt cached_jwt; + cached_jwt.parseFromString(GoodToken); + // jwt_cache hit: lookup return a cached jwt. + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, lookup(_)).WillOnce(Return(&cached_jwt)); + // jwt_cache insert is not called. + EXPECT_CALL(jwks_cache_.jwks_data_.jwt_cache_, insert(_, _)).Times(0); + + Http::TestRequestHeaderMapImpl headers{{"Authorization", "Bearer " + std::string(GoodToken)}}; + expectVerifyStatus(Status::Ok, headers); + + // Verify post processing of a good Jwt with a cache hit. + EXPECT_EQ(headers.get_("jwt-payload"), ExpectedPayloadValue); + // Verify the token is not removed. + EXPECT_TRUE(headers.has(Http::CustomHeaders::get().Authorization)); + + // Payload is set + EXPECT_EQ(out_name_, "my_payload"); + + ProtobufWkt::Struct expected_payload; + TestUtility::loadFromJson(ExpectedPayloadJSON, expected_payload); + EXPECT_TRUE(TestUtility::protoEqual(out_payload_, expected_payload)); +} + } // namespace } // namespace JwtAuthn } // namespace HttpFilters diff --git a/test/extensions/filters/http/jwt_authn/jwt_cache_test.cc b/test/extensions/filters/http/jwt_authn/jwt_cache_test.cc new file mode 100644 index 0000000000000..70d6f402679e8 --- /dev/null +++ b/test/extensions/filters/http/jwt_authn/jwt_cache_test.cc @@ -0,0 +1,87 @@ +#include + +#include "envoy/extensions/filters/http/jwt_authn/v3/config.pb.h" + +#include "source/common/protobuf/utility.h" +#include "source/extensions/filters/http/jwt_authn/jwt_cache.h" + +#include "test/extensions/filters/http/jwt_authn/test_common.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +using ::google::jwt_verify::Status; + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace JwtAuthn { +namespace { + +class JwtCacheTest : public testing::Test { +public: + void setupCache(bool enable) { + envoy::extensions::filters::http::jwt_authn::v3::JwtCacheConfig config; + config.set_jwt_cache_size(0); + cache_ = JwtCache::create(enable, config, time_system_); + } + + void loadJwt(const char* jwt_str) { + jwt_ = std::make_unique<::google::jwt_verify::Jwt>(); + Status status = jwt_->parseFromString(jwt_str); + EXPECT_EQ(status, Status::Ok); + } + + Event::SimulatedTimeSystem time_system_; + JwtCachePtr cache_; + std::unique_ptr<::google::jwt_verify::Jwt> jwt_; +}; + +TEST_F(JwtCacheTest, TestEnabledCache) { + // setup an enabled cache + setupCache(true); + loadJwt(GoodToken); + + auto* origin_jwt = jwt_.get(); + cache_->insert(GoodToken, std::move(jwt_)); + // jwt ownership is moved into the cache. + EXPECT_FALSE(jwt_); + + auto* jwt1 = cache_->lookup(GoodToken); + EXPECT_TRUE(jwt1 != nullptr); + EXPECT_EQ(jwt1, origin_jwt); + + auto* jwt2 = cache_->lookup(ExpiredToken); + EXPECT_TRUE(jwt2 == nullptr); +} + +TEST_F(JwtCacheTest, TestDisabledCache) { + // setup a disabled cache + setupCache(false); + loadJwt(GoodToken); + + cache_->insert(GoodToken, std::move(jwt_)); + // jwt ownership is not moved into the cache. + EXPECT_TRUE(jwt_); + + auto* jwt = cache_->lookup(GoodToken); + // not found since cache is disabled. + EXPECT_TRUE(jwt == nullptr); +} + +TEST_F(JwtCacheTest, TestExpiredToken) { + // setup an enabled cache + setupCache(true); + loadJwt(ExpiredToken); + + cache_->insert(ExpiredToken, std::move(jwt_)); + + auto* jwt = cache_->lookup(ExpiredToken); + // not be found since it is expired. + EXPECT_TRUE(jwt == nullptr); +} + +} // namespace +} // namespace JwtAuthn +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/jwt_authn/mock.h b/test/extensions/filters/http/jwt_authn/mock.h index a4924033d4587..ff235c5aed00d 100644 --- a/test/extensions/filters/http/jwt_authn/mock.h +++ b/test/extensions/filters/http/jwt_authn/mock.h @@ -59,6 +59,54 @@ class MockExtractor : public Extractor { MOCK_METHOD(void, sanitizePayloadHeaders, (Http::HeaderMap & headers), (const)); }; +class MockJwtCache : public JwtCache { +public: + MOCK_METHOD(::google::jwt_verify::Jwt*, lookup, (const std::string&), ()); + MOCK_METHOD(void, insert, (const std::string&, std::unique_ptr<::google::jwt_verify::Jwt>&&), ()); +}; + +class MockJwksData : public JwksCache::JwksData { +public: + MockJwksData() { + ON_CALL(*this, areAudiencesAllowed(_)).WillByDefault(::testing::Return(true)); + ON_CALL(*this, getJwtProvider()).WillByDefault(::testing::ReturnRef(jwt_provider_)); + ON_CALL(*this, isExpired()).WillByDefault(::testing::Return(false)); + ON_CALL(*this, getJwtCache()).WillByDefault(::testing::ReturnRef(jwt_cache_)); + } + + MOCK_METHOD(bool, areAudiencesAllowed, (const std::vector&), (const)); + MOCK_METHOD(const envoy::extensions::filters::http::jwt_authn::v3::JwtProvider&, getJwtProvider, + (), (const)); + MOCK_METHOD(const ::google::jwt_verify::Jwks*, getJwksObj, (), (const)); + MOCK_METHOD(bool, isExpired, (), (const)); + MOCK_METHOD(const ::google::jwt_verify::Jwks*, setRemoteJwks, (JwksConstPtr &&), ()); + MOCK_METHOD(JwtCache&, getJwtCache, (), ()); + + envoy::extensions::filters::http::jwt_authn::v3::JwtProvider jwt_provider_; + ::testing::NiceMock jwt_cache_; +}; + +class MockJwksCache : public JwksCache { +public: + MockJwksCache() : stats_(generateMockStats(stats_store_)) { + ON_CALL(*this, findByIssuer(_)).WillByDefault(::testing::Return(&jwks_data_)); + ON_CALL(*this, findByProvider(_)).WillByDefault(::testing::Return(&jwks_data_)); + ON_CALL(*this, stats()).WillByDefault(::testing::ReturnRef(stats_)); + } + + JwtAuthnFilterStats generateMockStats(Stats::Scope& scope) { + return {ALL_JWT_AUTHN_FILTER_STATS(POOL_COUNTER_PREFIX(scope, ""))}; + } + + MOCK_METHOD(JwksData*, findByIssuer, (const std::string&), ()); + MOCK_METHOD(JwksData*, findByProvider, (const std::string&), ()); + MOCK_METHOD(JwtAuthnFilterStats&, stats, ()); + + NiceMock stats_store_; + JwtAuthnFilterStats stats_; + ::testing::NiceMock jwks_data_; +}; + // A mock HTTP upstream with response body. class MockUpstream { public: diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index cc700f7e12eef..cf48779ec8bac 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -32,6 +32,7 @@ ENOTCONN EPIPE HEXDIG HEXDIGIT +LRU OWS Preconnecting RCVBUF From 6c25d074113a4de00aad6d49fad624c3bc411498 Mon Sep 17 00:00:00 2001 From: Adam Kotwasinski Date: Thu, 22 Jul 2021 12:30:16 -0700 Subject: [PATCH 32/57] kafka: add stub classes for mesh filter (#17374) Signed-off-by: Adam Kotwasinski --- .../filters/network/kafka/mesh/BUILD | 64 ++++++ .../network/kafka/mesh/abstract_command.cc | 28 +++ .../network/kafka/mesh/abstract_command.h | 97 +++++++++ .../filters/network/kafka/mesh/filter.cc | 102 ++++++++++ .../filters/network/kafka/mesh/filter.h | 83 ++++++++ .../network/kafka/mesh/request_processor.cc | 31 +++ .../network/kafka/mesh/request_processor.h | 30 +++ .../filters/network/kafka/mesh/BUILD | 46 +++++ .../kafka/mesh/abstract_command_unit_test.cc | 55 ++++++ .../network/kafka/mesh/filter_unit_test.cc | 184 ++++++++++++++++++ .../kafka/mesh/request_processor_unit_test.cc | 45 +++++ 11 files changed, 765 insertions(+) create mode 100644 source/extensions/filters/network/kafka/mesh/BUILD create mode 100644 source/extensions/filters/network/kafka/mesh/abstract_command.cc create mode 100644 source/extensions/filters/network/kafka/mesh/abstract_command.h create mode 100644 source/extensions/filters/network/kafka/mesh/filter.cc create mode 100644 source/extensions/filters/network/kafka/mesh/filter.h create mode 100644 source/extensions/filters/network/kafka/mesh/request_processor.cc create mode 100644 source/extensions/filters/network/kafka/mesh/request_processor.h create mode 100644 test/extensions/filters/network/kafka/mesh/BUILD create mode 100644 test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc create mode 100644 test/extensions/filters/network/kafka/mesh/filter_unit_test.cc create mode 100644 test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc diff --git a/source/extensions/filters/network/kafka/mesh/BUILD b/source/extensions/filters/network/kafka/mesh/BUILD new file mode 100644 index 0000000000000..e707476b50891 --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/BUILD @@ -0,0 +1,64 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +# Kafka-mesh network filter. + +envoy_extension_package() + +envoy_cc_library( + name = "filter_lib", + srcs = ["filter.cc"], + hdrs = [ + "filter.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":abstract_command_lib", + ":request_processor_lib", + "//envoy/buffer:buffer_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//source/extensions/filters/network/kafka:kafka_response_codec_lib", + ], +) + +envoy_cc_library( + name = "request_processor_lib", + srcs = [ + "request_processor.cc", + ], + hdrs = [ + "request_processor.h", + ], + tags = ["skip_on_windows"], + deps = [ + ":abstract_command_lib", + "//source/common/common:minimal_logger_lib", + "//source/extensions/filters/network/kafka:kafka_request_codec_lib", + "//source/extensions/filters/network/kafka:kafka_request_parser_lib", + ], +) + +envoy_cc_library( + name = "abstract_command_lib", + srcs = [ + "abstract_command.cc", + ], + hdrs = [ + "abstract_command.h", + ], + tags = ["skip_on_windows"], + deps = [ + "//source/common/common:minimal_logger_lib", + "//source/extensions/filters/network/kafka:kafka_response_lib", + "//source/extensions/filters/network/kafka:tagged_fields_lib", + ], +) diff --git a/source/extensions/filters/network/kafka/mesh/abstract_command.cc b/source/extensions/filters/network/kafka/mesh/abstract_command.cc new file mode 100644 index 0000000000000..eab6dbb47df5d --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/abstract_command.cc @@ -0,0 +1,28 @@ +#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +void BaseInFlightRequest::abandon() { + ENVOY_LOG(trace, "Abandoning request"); + filter_active_ = false; +} + +void BaseInFlightRequest::notifyFilter() { + if (filter_active_) { + ENVOY_LOG(trace, "Notifying filter for request"); + filter_.onRequestReadyForAnswer(); + } else { + ENVOY_LOG(trace, "Request has been finished, but we are not doing anything, because filter has " + "been already destroyed"); + } +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/abstract_command.h b/source/extensions/filters/network/kafka/mesh/abstract_command.h new file mode 100644 index 0000000000000..40cbb18396f50 --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/abstract_command.h @@ -0,0 +1,97 @@ +#pragma once + +#include "source/common/common/logger.h" +#include "source/extensions/filters/network/kafka/kafka_response.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Represents single downstream client request. + * Responsible for performing the work on multiple upstream clusters and aggregating the results. + */ +class InFlightRequest { +public: + virtual ~InFlightRequest() = default; + + /** + * Begins processing of given request with context provided. + */ + virtual void startProcessing() PURE; + + /** + * Whether the given request has finished processing. + * E.g. produce requests need to be forwarded upstream and get a response from Kafka cluster for + * this to be true. + */ + virtual bool finished() const PURE; + + /** + * Creates a Kafka answer object that can be sent downstream. + */ + virtual AbstractResponseSharedPtr computeAnswer() const PURE; + + /** + * Abandon this request. + * In-flight requests that have been abandoned are not going to cause any action after they have + * finished processing. + */ + virtual void abandon() PURE; +}; + +using InFlightRequestSharedPtr = std::shared_ptr; + +/** + * Callback to be implemented by entities that are interested when the request has finished and has + * answer ready. + */ +// Impl note: Filter implements this interface to keep track of requests coming to it. +class AbstractRequestListener { +public: + virtual ~AbstractRequestListener() = default; + + // Notifies the listener that a new request has been received. + virtual void onRequest(InFlightRequestSharedPtr request) PURE; + + // Notified the listener, that the request finally has an answer ready. + // Usually this means that the request has been sent to upstream Kafka clusters and we got answers + // (unless it's something that could be responded to locally). + // IMPL: we do not need to pass request here, as filters need to answer in-order. + // What means that we always need to check if first answer is ready, even if the latter are + // already finished. + virtual void onRequestReadyForAnswer() PURE; +}; + +/** + * Helper base class for all in flight requests. + * Binds request to its origin filter. + */ +class BaseInFlightRequest : public InFlightRequest, protected Logger::Loggable { +public: + BaseInFlightRequest(AbstractRequestListener& filter) : filter_{filter} {}; + void abandon() override; + +protected: + /** + * Notify the originating filter that this request has an answer ready. + * This method is to be invoked by each request after it has finished processing. + * Obviously, if the filter is no longer active (connection got closed before we were ready to + * answer) nothing will happen. + */ + void notifyFilter(); + + // Filter that originated this request. + AbstractRequestListener& filter_; + + // Whether the filter_ reference is still viable. + bool filter_active_ = true; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/filter.cc b/source/extensions/filters/network/kafka/mesh/filter.cc new file mode 100644 index 0000000000000..f250046c849f7 --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/filter.cc @@ -0,0 +1,102 @@ +#include "source/extensions/filters/network/kafka/mesh/filter.h" + +#include "envoy/network/connection.h" + +#include "source/common/buffer/buffer_impl.h" +#include "source/extensions/filters/network/kafka/external/requests.h" +#include "source/extensions/filters/network/kafka/external/responses.h" +#include "source/extensions/filters/network/kafka/response_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +KafkaMeshFilter::KafkaMeshFilter(RequestDecoderSharedPtr request_decoder) + : request_decoder_{request_decoder} {} + +KafkaMeshFilter::~KafkaMeshFilter() { abandonAllInFlightRequests(); } + +Network::FilterStatus KafkaMeshFilter::onNewConnection() { return Network::FilterStatus::Continue; } + +void KafkaMeshFilter::initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) { + read_filter_callbacks_ = &callbacks; + read_filter_callbacks_->connection().addConnectionCallbacks(*this); +} + +Network::FilterStatus KafkaMeshFilter::onData(Buffer::Instance& data, bool) { + try { + request_decoder_->onData(data); + data.drain(data.length()); // All the bytes have been copied to decoder. + return Network::FilterStatus::StopIteration; + } catch (const EnvoyException& e) { + ENVOY_LOG(trace, "Could not process data from Kafka client: {}", e.what()); + request_decoder_->reset(); + // Something very wrong occurred, let's just close the connection. + read_filter_callbacks_->connection().close(Network::ConnectionCloseType::FlushWrite); + return Network::FilterStatus::StopIteration; + } +} + +void KafkaMeshFilter::onEvent(Network::ConnectionEvent event) { + if (Network::ConnectionEvent::RemoteClose == event || + Network::ConnectionEvent::LocalClose == event) { + // Connection is being closed but there might be some requests in flight, abandon them. + abandonAllInFlightRequests(); + } +} + +void KafkaMeshFilter::onAboveWriteBufferHighWatermark() {} + +void KafkaMeshFilter::onBelowWriteBufferLowWatermark() {} + +/** + * We have received a request we can actually process. + */ +void KafkaMeshFilter::onRequest(InFlightRequestSharedPtr request) { + requests_in_flight_.push_back(request); + request->startProcessing(); +} + +/** + * Our filter has been notified that a request that originated in this filter has an answer ready. + * Because the Kafka messages have ordering, we need to check all messages and can possibly send + * multiple answers in one go. This can happen if e.g. message 3 finishes first, then 2, then 1, + * what allows us to send 1, 2, 3 in one invocation. + */ +void KafkaMeshFilter::onRequestReadyForAnswer() { + while (!requests_in_flight_.empty()) { + InFlightRequestSharedPtr rq = requests_in_flight_.front(); + if (rq->finished()) { + // The request has been finished, so we no longer need to store it. + requests_in_flight_.erase(requests_in_flight_.begin()); + + // And write the response downstream. + const AbstractResponseSharedPtr response = rq->computeAnswer(); + Buffer::OwnedImpl buffer; + ResponseEncoder encoder{buffer}; + encoder.encode(*response); + read_filter_callbacks_->connection().write(buffer, false); + } else { + break; + } + } +} + +void KafkaMeshFilter::abandonAllInFlightRequests() { + for (const auto& request : requests_in_flight_) { + request->abandon(); + } + requests_in_flight_.erase(requests_in_flight_.begin(), requests_in_flight_.end()); +} + +std::list& KafkaMeshFilter::getRequestsInFlightForTest() { + return requests_in_flight_; +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/filter.h b/source/extensions/filters/network/kafka/mesh/filter.h new file mode 100644 index 0000000000000..3fde92e6ca273 --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/filter.h @@ -0,0 +1,83 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/network/connection.h" +#include "envoy/network/filter.h" +#include "envoy/stats/scope.h" + +#include "source/common/common/logger.h" +#include "source/extensions/filters/network/kafka/external/requests.h" +#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" +#include "source/extensions/filters/network/kafka/mesh/request_processor.h" +#include "source/extensions/filters/network/kafka/request_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +/** + * Main entry point. + * Decoded request bytes are passed to processor, that calls us back with enriched request. + * Request then gets invoked with upstream Kafka facade, which will (in future) maintain + * thread-local list of (enriched) Kafka producers. Filter is going to maintain a list of + * in-flight-request so it can send responses when they finish. + * + * + * +----------------+ +-----------------------+ + * |RequestProcessor+----------------->AbstractInFlightRequest| + * +-------^--------+ +------^----------------+ + * | | + * | | + * +-------+-------+ | + * |KafkaMeshFilter+-------------------------+ + * +-------+-------+ + **/ +class KafkaMeshFilter : public Network::ReadFilter, + public Network::ConnectionCallbacks, + public AbstractRequestListener, + private Logger::Loggable { +public: + // Visible for testing. + KafkaMeshFilter(RequestDecoderSharedPtr request_decoder); + + // Non-trivial. See 'abandonAllInFlightRequests'. + ~KafkaMeshFilter() override; + + // Network::ReadFilter + Network::FilterStatus onNewConnection() override; + void initializeReadFilterCallbacks(Network::ReadFilterCallbacks& callbacks) override; + Network::FilterStatus onData(Buffer::Instance& data, bool end_stream) override; + + // Network::ConnectionCallbacks + void onEvent(Network::ConnectionEvent event) override; + void onAboveWriteBufferHighWatermark() override; + void onBelowWriteBufferLowWatermark() override; + + // AbstractRequestListener + void onRequest(InFlightRequestSharedPtr request) override; + void onRequestReadyForAnswer() override; + + std::list& getRequestsInFlightForTest(); + +private: + // Helper method invoked when connection gets dropped. + // Request references are going to be stored in 2 places: this filter (request's origin) and in + // UpstreamKafkaClient instances (to match pure-Kafka confirmations to the requests). Because + // filter can be destroyed before confirmations from Kafka are received, we are just going to mark + // related requests as abandoned, so they do not attempt to reference this filter anymore. + // Impl note: this is similar to what Redis filter does. + void abandonAllInFlightRequests(); + + const RequestDecoderSharedPtr request_decoder_; + + Network::ReadFilterCallbacks* read_filter_callbacks_; + + std::list requests_in_flight_; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/request_processor.cc b/source/extensions/filters/network/kafka/mesh/request_processor.cc new file mode 100644 index 0000000000000..d19c8f9edd055 --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/request_processor.cc @@ -0,0 +1,31 @@ +#include "source/extensions/filters/network/kafka/mesh/request_processor.h" + +#include "envoy/common/exception.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +// Helper function. Throws a nice message. Filter will react by closing the connection. +static void throwOnUnsupportedRequest(const std::string& reason, const RequestHeader& header) { + throw EnvoyException(absl::StrCat(reason, " Kafka request (key=", header.api_key_, ", version=", + header.api_version_, ", cid=", header.correlation_id_)); +} + +void RequestProcessor::onMessage(AbstractRequestSharedPtr arg) { + // This will be replaced with switch on header's API key. + throwOnUnsupportedRequest("unsupported (bad client API invoked?)", arg->request_header_); +} + +// We got something that the parser could not handle. +void RequestProcessor::onFailedParse(RequestParseFailureSharedPtr arg) { + throwOnUnsupportedRequest("unknown", arg->request_header_); +} + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/kafka/mesh/request_processor.h b/source/extensions/filters/network/kafka/mesh/request_processor.h new file mode 100644 index 0000000000000..ae89ca8ae47be --- /dev/null +++ b/source/extensions/filters/network/kafka/mesh/request_processor.h @@ -0,0 +1,30 @@ +#pragma once + +#include "source/common/common/logger.h" +#include "source/extensions/filters/network/kafka/external/requests.h" +#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" +#include "source/extensions/filters/network/kafka/request_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { + +/** + * Processes (enriches) incoming requests and passes it back to origin. + */ +class RequestProcessor : public RequestCallback, private Logger::Loggable { +public: + RequestProcessor() = default; + + // RequestCallback + void onMessage(AbstractRequestSharedPtr arg) override; + void onFailedParse(RequestParseFailureSharedPtr) override; +}; + +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/kafka/mesh/BUILD b/test/extensions/filters/network/kafka/mesh/BUILD new file mode 100644 index 0000000000000..ab7507ffef02c --- /dev/null +++ b/test/extensions/filters/network/kafka/mesh/BUILD @@ -0,0 +1,46 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "filter_unit_test", + srcs = ["filter_unit_test.cc"], + # This name needs to be changed after we have the mesh filter ready. + extension_names = ["envoy.filters.network.kafka_broker"], + tags = ["skip_on_windows"], + deps = [ + "//source/extensions/filters/network/kafka/mesh:filter_lib", + "//test/mocks/network:network_mocks", + ], +) + +envoy_extension_cc_test( + name = "request_processor_unit_test", + srcs = ["request_processor_unit_test.cc"], + # This name needs to be changed after we have the mesh filter ready. + extension_names = ["envoy.filters.network.kafka_broker"], + tags = ["skip_on_windows"], + deps = [ + "//source/extensions/filters/network/kafka/mesh:request_processor_lib", + ], +) + +envoy_extension_cc_test( + name = "abstract_command_unit_test", + srcs = ["abstract_command_unit_test.cc"], + # This name needs to be changed after we have the mesh filter ready. + extension_names = ["envoy.filters.network.kafka_broker"], + tags = ["skip_on_windows"], + deps = [ + "//source/extensions/filters/network/kafka/mesh:abstract_command_lib", + ], +) diff --git a/test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc b/test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc new file mode 100644 index 0000000000000..e0ff2202f0971 --- /dev/null +++ b/test/extensions/filters/network/kafka/mesh/abstract_command_unit_test.cc @@ -0,0 +1,55 @@ +#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockAbstractRequestListener : public AbstractRequestListener { +public: + MOCK_METHOD(void, onRequest, (InFlightRequestSharedPtr)); + MOCK_METHOD(void, onRequestReadyForAnswer, ()); +}; + +class Testee : public BaseInFlightRequest { +public: + Testee(AbstractRequestListener& filter) : BaseInFlightRequest{filter} {}; + void startProcessing() override { throw "not interesting"; } + bool finished() const override { throw "not interesting"; } + AbstractResponseSharedPtr computeAnswer() const override { throw "not interesting"; } + void finishRequest() { BaseInFlightRequest::notifyFilter(); } +}; + +TEST(AbstractCommandTest, shouldNotifyFilter) { + // given + MockAbstractRequestListener filter; + EXPECT_CALL(filter, onRequestReadyForAnswer()); + Testee testee = {filter}; + + // when + testee.finishRequest(); + + // then - filter got notified that a requested has finished processing. +} + +TEST(AbstractCommandTest, shouldHandleBeingAbandoned) { + // given + MockAbstractRequestListener filter; + Testee testee = {filter}; + testee.abandon(); + + // when, then - abandoned request does not notify filter even after it finishes. + testee.finishRequest(); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/kafka/mesh/filter_unit_test.cc b/test/extensions/filters/network/kafka/mesh/filter_unit_test.cc new file mode 100644 index 0000000000000..07fffa2466a0e --- /dev/null +++ b/test/extensions/filters/network/kafka/mesh/filter_unit_test.cc @@ -0,0 +1,184 @@ +#include "source/extensions/filters/network/kafka/mesh/filter.h" + +#include "test/mocks/network/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::_; +using testing::Return; +using testing::Throw; + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class MockRequestDecoder : public RequestDecoder { +public: + MockRequestDecoder() : RequestDecoder{{}} {}; + MOCK_METHOD(void, onData, (Buffer::Instance&)); + MOCK_METHOD(void, reset, ()); +}; + +using MockRequestDecoderSharedPtr = std::shared_ptr; + +class MockInFlightRequest : public InFlightRequest { +public: + MOCK_METHOD(void, startProcessing, ()); + MOCK_METHOD(bool, finished, (), (const)); + MOCK_METHOD(AbstractResponseSharedPtr, computeAnswer, (), (const)); + MOCK_METHOD(void, abandon, ()); +}; + +using Request = std::shared_ptr; + +class MockResponse : public AbstractResponse { +public: + MockResponse() : AbstractResponse{ResponseMetadata{0, 0, 0}} {}; + MOCK_METHOD(uint32_t, computeSize, (), (const)); + MOCK_METHOD(uint32_t, encode, (Buffer::Instance & dst), (const)); +}; + +class FilterUnitTest : public testing::Test { +protected: + MockRequestDecoderSharedPtr request_decoder_ = std::make_shared(); + KafkaMeshFilter testee_{request_decoder_}; + + NiceMock filter_callbacks_; + + // Helper: computed response for any kind of request. + std::shared_ptr computed_response_ = std::make_shared>(); + + void initialize() { + testee_.initializeReadFilterCallbacks(filter_callbacks_); + testee_.onNewConnection(); + } +}; + +TEST_F(FilterUnitTest, ShouldAcceptDataSentByKafkaClient) { + // given + Buffer::OwnedImpl data; + EXPECT_CALL(*request_decoder_, onData(_)); + + // when + initialize(); + const auto result = testee_.onData(data, false); + + // then + ASSERT_EQ(result, Network::FilterStatus::StopIteration); + // Also, request_decoder got invoked. +} + +TEST_F(FilterUnitTest, ShouldStopIterationIfProcessingDataFromKafkaClientFails) { + // given + Buffer::OwnedImpl data; + EXPECT_CALL(*request_decoder_, onData(_)).WillOnce(Throw(EnvoyException("boom"))); + EXPECT_CALL(*request_decoder_, reset()); + + // when + initialize(); + const auto result = testee_.onData(data, false); + + // then + ASSERT_EQ(result, Network::FilterStatus::StopIteration); +} + +TEST_F(FilterUnitTest, ShouldAcceptAndAbandonRequests) { + // given + initialize(); + Request request1 = std::make_shared(); + testee_.getRequestsInFlightForTest().push_back(request1); + EXPECT_CALL(*request1, abandon()); + Request request2 = std::make_shared(); + testee_.getRequestsInFlightForTest().push_back(request2); + EXPECT_CALL(*request2, abandon()); + + // when, then - requests get abandoned in destructor. +} + +TEST_F(FilterUnitTest, ShouldAcceptAndAbandonRequestsOnConnectionClose) { + // given + initialize(); + Request request1 = std::make_shared(); + testee_.getRequestsInFlightForTest().push_back(request1); + EXPECT_CALL(*request1, abandon()); + Request request2 = std::make_shared(); + testee_.getRequestsInFlightForTest().push_back(request2); + EXPECT_CALL(*request2, abandon()); + + // when + testee_.onEvent(Network::ConnectionEvent::LocalClose); + + // then - requests get abandoned (only once). +} + +TEST_F(FilterUnitTest, ShouldAcceptAndProcessRequests) { + // given + initialize(); + Request request = std::make_shared(); + EXPECT_CALL(*request, startProcessing()); + EXPECT_CALL(*request, finished()).WillOnce(Return(true)); + EXPECT_CALL(*request, computeAnswer()).WillOnce(Return(computed_response_)); + + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)); + + // when - 1 + testee_.onRequest(request); + + // then - 1 + ASSERT_EQ(testee_.getRequestsInFlightForTest().size(), 1); + + // when - 2 + testee_.onRequestReadyForAnswer(); + + // then - 2 + ASSERT_EQ(testee_.getRequestsInFlightForTest().size(), 0); +} + +// This is important - we have two requests, but it is the second one that finishes processing +// first. As Kafka protocol uses sequence numbers, we need to wait until the first finishes. +TEST_F(FilterUnitTest, ShouldAcceptAndProcessRequestsInOrder) { + // given + initialize(); + Request request1 = std::make_shared(); + Request request2 = std::make_shared(); + testee_.getRequestsInFlightForTest().push_back(request1); + testee_.getRequestsInFlightForTest().push_back(request2); + + EXPECT_CALL(*request1, finished()).WillOnce(Return(false)).WillOnce(Return(true)); + EXPECT_CALL(*request2, finished()).WillOnce(Return(true)); + EXPECT_CALL(*request1, computeAnswer()).WillOnce(Return(computed_response_)); + EXPECT_CALL(*request2, computeAnswer()).WillOnce(Return(computed_response_)); + EXPECT_CALL(filter_callbacks_.connection_, write(_, false)).Times(2); + + // when - 1 + testee_.onRequestReadyForAnswer(); + + // then - 1 + ASSERT_EQ(testee_.getRequestsInFlightForTest().size(), 2); + + // when - 2 + testee_.onRequestReadyForAnswer(); + + // then - 2 + ASSERT_EQ(testee_.getRequestsInFlightForTest().size(), 0); +} + +TEST_F(FilterUnitTest, ShouldDoNothingOnBufferWatermarkEvents) { + // given + initialize(); + + // when, then - nothing happens. + testee_.onBelowWriteBufferLowWatermark(); + testee_.onAboveWriteBufferHighWatermark(); +} + +} // namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc b/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc new file mode 100644 index 0000000000000..45639eb9b223b --- /dev/null +++ b/test/extensions/filters/network/kafka/mesh/request_processor_unit_test.cc @@ -0,0 +1,45 @@ +#include "source/extensions/filters/network/kafka/mesh/abstract_command.h" +#include "source/extensions/filters/network/kafka/mesh/request_processor.h" + +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Mesh { +namespace { + +class RequestProcessorTest : public testing::Test { +protected: + RequestProcessor testee_ = {}; +}; + +TEST_F(RequestProcessorTest, ShouldHandleUnsupportedRequest) { + // given + const RequestHeader header = {0, 0, 0, absl::nullopt}; + const ListOffsetRequest data = {0, {}}; + const auto message = std::make_shared>(header, data); + + // when, then - exception gets thrown. + EXPECT_THROW_WITH_REGEX(testee_.onMessage(message), EnvoyException, "unsupported"); +} + +TEST_F(RequestProcessorTest, ShouldHandleUnparseableRequest) { + // given + const RequestHeader header = {42, 42, 42, absl::nullopt}; + const auto arg = std::make_shared(header); + + // when, then - exception gets thrown. + EXPECT_THROW_WITH_REGEX(testee_.onFailedParse(arg), EnvoyException, "unknown"); +} + +} // anonymous namespace +} // namespace Mesh +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy From 8e03ca4b64e387ed6697a9e82b72773ea445c36e Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Thu, 22 Jul 2021 15:30:45 -0400 Subject: [PATCH 33/57] envoy-mobile-hcm: removing an unnecessary try/catch (#17437) Signed-off-by: Alyssa Wilk --- source/server/api_listener_impl.cc | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/source/server/api_listener_impl.cc b/source/server/api_listener_impl.cc index e9963392cca5c..41f4d34195a64 100644 --- a/source/server/api_listener_impl.cc +++ b/source/server/api_listener_impl.cc @@ -32,17 +32,20 @@ void ApiListenerImplBase::SyntheticReadCallbacks::SyntheticConnection::raiseConn HttpApiListener::HttpApiListener(const envoy::config::listener::v3::Listener& config, ListenerManagerImpl& parent, const std::string& name) : ApiListenerImplBase(config, parent, name) { - TRY_ASSERT_MAIN_THREAD - auto typed_config = MessageUtil::anyConvertAndValidate< - envoy::extensions::filters::network::http_connection_manager::v3:: - EnvoyMobileHttpConnectionManager>(config.api_listener().api_listener(), - factory_context_.messageValidationVisitor()); + if (config.api_listener().api_listener().type_url() == + absl::StrCat("type.googleapis.com/", + envoy::extensions::filters::network::http_connection_manager::v3:: + EnvoyMobileHttpConnectionManager::descriptor() + ->full_name())) { + auto typed_config = MessageUtil::anyConvertAndValidate< + envoy::extensions::filters::network::http_connection_manager::v3:: + EnvoyMobileHttpConnectionManager>(config.api_listener().api_listener(), + factory_context_.messageValidationVisitor()); - http_connection_manager_factory_ = Envoy::Extensions::NetworkFilters::HttpConnectionManager:: - HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( - typed_config.config(), factory_context_, read_callbacks_, false); - END_TRY - catch (const EnvoyException& e) { + http_connection_manager_factory_ = Envoy::Extensions::NetworkFilters::HttpConnectionManager:: + HttpConnectionManagerFactory::createHttpConnectionManagerFactoryFromProto( + typed_config.config(), factory_context_, read_callbacks_, false); + } else { auto typed_config = MessageUtil::anyConvertAndValidate< envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager>( config.api_listener().api_listener(), factory_context_.messageValidationVisitor()); From 9f05912d76edb2924325039c6a28b5b017ccd8dc Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Thu, 22 Jul 2021 12:31:18 -0700 Subject: [PATCH 34/57] bazel: add darwin_arm64 to apple config group (#17444) This is the CPU you target when building on an Apple Silicon mac. All the apple settings should apply. Signed-off-by: Keith Smiley --- bazel/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/bazel/BUILD b/bazel/BUILD index 3d65037c27928..016482a577f3e 100644 --- a/bazel/BUILD +++ b/bazel/BUILD @@ -550,6 +550,7 @@ selects.config_setting_group( name = "apple", match_any = [ ":darwin", + ":darwin_arm64", ":darwin_x86_64", ":ios_arm64", ":ios_arm64e", From a8033fa8e44406ba6448cf3110037cb79f81ff39 Mon Sep 17 00:00:00 2001 From: Taylor Barrella Date: Thu, 22 Jul 2021 12:46:39 -0700 Subject: [PATCH 35/57] grpc stream: reduce log level depending on remote close status (#17300) Signed-off-by: Taylor Barrella --- docs/root/version_history/current.rst | 4 ++ source/common/config/grpc_stream.h | 99 ++++++++++++++++++++++++-- test/common/config/BUILD | 2 + test/common/config/grpc_stream_test.cc | 93 ++++++++++++++++++++++++ 4 files changed, 192 insertions(+), 6 deletions(-) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 3a8da48b481f6..38a869bf3c79d 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,10 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level + for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been + retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 + seconds. * grpc: gRPC async client can be cached and shared accross filter instances in the same thread, this feature is turned off by default, can be turned on by setting runtime guard ``envoy.reloadable_features.enable_grpc_async_client_cache`` to true. * http: correct the use of the ``x-forwarded-proto`` header and the ``:scheme`` header. Where they differ (which is rare) ``:scheme`` will now be used for serving redirect URIs and cached content. This behavior diff --git a/source/common/config/grpc_stream.h b/source/common/config/grpc_stream.h index a682fef77d0ce..23ee9a0d287b2 100644 --- a/source/common/config/grpc_stream.h +++ b/source/common/config/grpc_stream.h @@ -15,6 +15,14 @@ namespace Envoy { namespace Config { +namespace { + +// TODO(htuch): Make this configurable. +constexpr uint32_t RetryInitialDelayMs = 500; +constexpr uint32_t RetryMaxDelayMs = 30000; // Do not cross more than 30s + +} // namespace + template using ResponseProtoPtr = std::unique_ptr; // Oversees communication for gRPC xDS implementations (parent to both regular xDS and delta @@ -45,9 +53,6 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, }); } - // TODO(htuch): Make this configurable. - static constexpr uint32_t RetryInitialDelayMs = 500; - static constexpr uint32_t RetryMaxDelayMs = 30000; // Do not cross more than 30s backoff_strategy_ = std::make_unique( RetryInitialDelayMs, RetryMaxDelayMs, random_); } @@ -60,7 +65,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, } stream_ = async_client_->start(service_method_, *this, Http::AsyncClient::StreamOptions()); if (stream_ == nullptr) { - ENVOY_LOG(warn, "Unable to establish new stream"); + ENVOY_LOG(debug, "Unable to establish new stream to configuration server"); callbacks_->onEstablishmentFailure(); setRetryTimer(); return; @@ -85,6 +90,9 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, void onReceiveMessage(ResponseProtoPtr&& message) override { // Reset here so that it starts with fresh backoff interval on next disconnect. backoff_strategy_->reset(); + // Clear here instead of on stream establishment in case streams are immediately closed + // repeatedly. + clearCloseStatus(); // Sometimes during hot restarts this stat's value becomes inconsistent and will continue to // have 0 until it is reconnected. Setting here ensures that it is consistent with the state of // management server connection. @@ -97,8 +105,7 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, } void onRemoteClose(Grpc::Status::GrpcStatus status, const std::string& message) override { - ENVOY_LOG(warn, "{} gRPC config stream closed: {}, {}", service_method_.name(), status, - message); + logClose(status, message); stream_ = nullptr; control_plane_stats_.connected_state_.set(0); callbacks_->onEstablishmentFailure(); @@ -131,11 +138,85 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, return false; } + absl::optional getCloseStatus() { return last_close_status_; } + private: void setRetryTimer() { retry_timer_->enableTimer(std::chrono::milliseconds(backoff_strategy_->nextBackOffMs())); } + // Log level should be reduced when the remote close failure is `Ok` or is retriable and has only + // been occurring for a short amount of time. + void logClose(Grpc::Status::GrpcStatus status, const std::string& message) { + if (Grpc::Status::WellKnownGrpcStatus::Ok == status) { + ENVOY_LOG(debug, "{} gRPC config stream closed: {}, {}", service_method_.name(), status, + message); + return; + } + + if (!isNonRetriableFailure(status)) { + // When the failure is considered non-retriable, warn. + ENVOY_LOG(warn, "{} gRPC config stream closed: {}, {}", service_method_.name(), status, + message); + return; + } + + if (!isCloseStatusSet()) { + // For the first failure, record its occurrence and log at the debug level. + ENVOY_LOG(debug, "{} gRPC config stream closed: {}, {}", service_method_.name(), status, + message); + setCloseStatus(status, message); + return; + } + + const uint64_t ms_since_first_close = std::chrono::duration_cast( + time_source_.monotonicTime() - last_close_time_) + .count(); + const Grpc::Status::GrpcStatus close_status = last_close_status_.value(); + + if (status != close_status) { + // This is a different failure. Warn on both statuses and remember the new one. + ENVOY_LOG(warn, "{} gRPC config stream closed: {}, {} (previously {}, {} since {}ms ago)", + service_method_.name(), status, message, close_status, last_close_message_, + ms_since_first_close); + setCloseStatus(status, message); + return; + } + + if (ms_since_first_close > RetryMaxDelayMs) { + // Warn if we are over the time limit. + ENVOY_LOG(warn, "{} gRPC config stream closed since {}ms ago: {}, {}", service_method_.name(), + ms_since_first_close, close_status, last_close_message_); + return; + } + + // Failure is retriable and new enough to only log at the debug level. + ENVOY_LOG(debug, "{} gRPC config stream closed: {}, {}", service_method_.name(), status, + message); + } + + bool isNonRetriableFailure(Grpc::Status::GrpcStatus status) { + // Status codes from https://grpc.github.io/grpc/core/md_doc_statuscodes.html that potentially + // indicate a high likelihood of success after retrying with backoff. + // + // - DeadlineExceeded may be from a latency spike + // - ResourceExhausted may be from a rate limit with a short window or a transient period of too + // many connections + // - Unavailable is meant to be used for a transient downtime + return Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded == status || + Grpc::Status::WellKnownGrpcStatus::ResourceExhausted == status || + Grpc::Status::WellKnownGrpcStatus::Unavailable == status; + } + + void clearCloseStatus() { last_close_status_ = absl::nullopt; } + bool isCloseStatusSet() { return last_close_status_.has_value(); } + + void setCloseStatus(Grpc::Status::GrpcStatus status, const std::string& message) { + last_close_status_ = status; + last_close_time_ = time_source_.monotonicTime(); + last_close_message_ = message; + } + GrpcStreamCallbacks* const callbacks_; Grpc::AsyncClient async_client_; @@ -153,6 +234,12 @@ class GrpcStream : public Grpc::AsyncStreamCallbacks, TokenBucketPtr limit_request_; const bool rate_limiting_enabled_; Event::TimerPtr drain_request_timer_; + + // Records the initial message and timestamp of the most recent remote closes with the same + // status. + absl::optional last_close_status_; + std::string last_close_message_; + MonotonicTime last_close_time_; }; } // namespace Config diff --git a/test/common/config/BUILD b/test/common/config/BUILD index e710497bad9be..aa0e857d6a77b 100644 --- a/test/common/config/BUILD +++ b/test/common/config/BUILD @@ -206,6 +206,8 @@ envoy_cc_test( "//test/mocks/config:config_mocks", "//test/mocks/event:event_mocks", "//test/mocks/grpc:grpc_mocks", + "//test/test_common:logging_lib", + "//test/test_common:simulated_time_system_lib", "@envoy_api//envoy/service/discovery/v3:pkg_cc_proto", ], ) diff --git a/test/common/config/grpc_stream_test.cc b/test/common/config/grpc_stream_test.cc index 5128a21b2a3bc..1ae7e7a7c27b8 100644 --- a/test/common/config/grpc_stream_test.cc +++ b/test/common/config/grpc_stream_test.cc @@ -8,6 +8,8 @@ #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/grpc/mocks.h" +#include "test/test_common/logging.h" +#include "test/test_common/simulated_time_system.h" #include "test/test_common/utility.h" #include "gmock/gmock.h" @@ -38,6 +40,7 @@ class GrpcStreamTest : public testing::Test { NiceMock callbacks_; std::unique_ptr async_client_owner_; Grpc::MockAsyncClient* async_client_; + Event::SimulatedTimeSystem time_system_; GrpcStream @@ -73,6 +76,96 @@ TEST_F(GrpcStreamTest, EstablishStream) { } } +// Tests reducing log level depending on remote close status. +TEST_F(GrpcStreamTest, LogClose) { + // Failures with statuses that do not need special handling. They are always logged in the same + // way and so never saved. + { + EXPECT_FALSE(grpc_stream_.getCloseStatus().has_value()); + + // Benign status: debug. + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("debug", "gRPC config stream closed", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Ok, "Ok"); + }); + EXPECT_FALSE(grpc_stream_.getCloseStatus().has_value()); + + // Non-retriable failure: warn. + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::NotFound, "Not Found"); + }); + EXPECT_FALSE(grpc_stream_.getCloseStatus().has_value()); + } + // Repeated failures that warn after enough time. + { + // Retriable failure: debug. + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("debug", "gRPC config stream closed", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::Unavailable, "Unavailable"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::Unavailable); + + // Different retriable failure: warn. + time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS( + "warn", "stream closed: 4, Deadline Exceeded (previously 14, Unavailable since 1000ms ago)", + { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, + "Deadline Exceeded"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + + // Same retriable failure after a short amount of time: debug. + time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("debug", "gRPC config stream closed", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, + "Deadline Exceeded"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + + // Same retriable failure after a long time: warn. + time_system_.advanceTimeWait(std::chrono::milliseconds(100000)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 101000ms ago", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, + "Deadline Exceeded"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + + // Warn again. + time_system_.advanceTimeWait(std::chrono::milliseconds(1000)); + EXPECT_CALL(callbacks_, onEstablishmentFailure()); + EXPECT_LOG_CONTAINS("warn", "gRPC config stream closed since 102000ms ago", { + grpc_stream_.onRemoteClose(Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded, + "Deadline Exceeded"); + }); + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + } + + // Successfully receiving a message clears close status. + { + EXPECT_CALL(*async_client_, startRaw(_, _, _, _)).WillOnce(Return(&async_stream_)); + EXPECT_CALL(callbacks_, onStreamEstablished()); + grpc_stream_.establishNewStream(); + EXPECT_TRUE(grpc_stream_.grpcStreamAvailable()); + // Status isn't cleared yet. + EXPECT_EQ(grpc_stream_.getCloseStatus().value(), + Grpc::Status::WellKnownGrpcStatus::DeadlineExceeded); + + auto response = std::make_unique(); + grpc_stream_.onReceiveMessage(std::move(response)); + EXPECT_FALSE(grpc_stream_.getCloseStatus().has_value()); + } +} + // A failure in the underlying gRPC machinery should result in grpcStreamAvailable() false. Calling // sendMessage would segfault. TEST_F(GrpcStreamTest, FailToEstablishNewStream) { From 3655b63747de362741dbb16a1b65fbd087355e88 Mon Sep 17 00:00:00 2001 From: Vamsee Lakamsani Date: Thu, 22 Jul 2021 15:52:46 -0700 Subject: [PATCH 36/57] docs: updated quickstart/run envoy doc to ensure a smoother first time experience (#17413) Signed-off-by: vamsee.lakamsani --- .gitignore | 2 ++ docs/root/start/quick-start/run-envoy.rst | 1 + 2 files changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 94d0778d92875..6a7b93663609b 100644 --- a/.gitignore +++ b/.gitignore @@ -38,3 +38,5 @@ cmake-build-debug bazel.output.txt *~ .coverage +**/.DS_Store +**/*.iml diff --git a/docs/root/start/quick-start/run-envoy.rst b/docs/root/start/quick-start/run-envoy.rst index dcdba9bcd5b46..721d43e996960 100644 --- a/docs/root/start/quick-start/run-envoy.rst +++ b/docs/root/start/quick-start/run-envoy.rst @@ -179,6 +179,7 @@ Save the following snippet to ``envoy-override.yaml``: admin: address: socket_address: + address: 127.0.0.1 port_value: 9902 Next, start the Envoy server using the override configuration: From 6738feaba24260d2c346e0bc04cd9c5cb84a6161 Mon Sep 17 00:00:00 2001 From: Yuchen Dai Date: Thu, 22 Jul 2021 15:53:37 -0700 Subject: [PATCH 37/57] listener: extract active_tcp_socket and active_stream_listener_base (#17355) Signed-off-by: Yuchen Dai --- source/server/BUILD | 85 +++++- source/server/active_stream_listener_base.cc | 62 ++++ source/server/active_stream_listener_base.h | 139 +++++++++ source/server/active_tcp_listener.cc | 281 ++++--------------- source/server/active_tcp_listener.h | 172 ++---------- source/server/active_tcp_socket.cc | 152 ++++++++++ source/server/active_tcp_socket.h | 102 +++++++ source/server/connection_handler_impl.cc | 3 +- source/server/connection_handler_impl.h | 4 +- test/server/active_tcp_listener_test.cc | 4 +- 10 files changed, 621 insertions(+), 383 deletions(-) create mode 100644 source/server/active_stream_listener_base.cc create mode 100644 source/server/active_stream_listener_base.h create mode 100644 source/server/active_tcp_socket.cc create mode 100644 source/server/active_tcp_socket.h diff --git a/source/server/BUILD b/source/server/BUILD index 3e6f60ab88002..ee2c07b4e8380 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -64,9 +64,9 @@ envoy_cc_library( envoy_cc_library( name = "connection_handler_lib", deps = [ - "//source/server:active_tcp_listener", - "//source/server:active_udp_listener", - "//source/server:connection_handler_impl", + ":active_tcp_listener", + ":active_udp_listener", + ":connection_handler_impl", ], ) @@ -117,25 +117,20 @@ envoy_cc_library( "active_tcp_listener.h", ], deps = [ + ":active_stream_listener_base", + ":active_tcp_socket", "//envoy/common:time_interface", "//envoy/event:deferred_deletable", "//envoy/event:dispatcher_interface", - "//envoy/event:timer_interface", "//envoy/network:connection_handler_interface", "//envoy/network:connection_interface", - "//envoy/network:filter_interface", "//envoy/network:listen_socket_interface", "//envoy/network:listener_interface", "//envoy/server:listener_manager_interface", - "//envoy/stats:timespan_interface", "//source/common/common:assert_lib", "//source/common/common:linked_object", - "//source/common/common:non_copyable", - "//source/common/common:safe_memcpy_lib", - "//source/common/event:deferred_task", "//source/common/network:connection_lib", "//source/common/stats:timespan_lib", - "//source/common/stream_info:stream_info_lib", "//source/server:active_listener_base", ], ) @@ -158,6 +153,76 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "active_tcp_listener_headers", + hdrs = [ + "active_stream_listener_base.h", + "active_tcp_socket.h", + ], + deps = [ + ":active_listener_base", + "//envoy/common:time_interface", + "//envoy/event:deferred_deletable", + "//envoy/event:dispatcher_interface", + "//envoy/event:timer_interface", + "//envoy/network:connection_handler_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//envoy/network:listen_socket_interface", + "//envoy/network:listener_interface", + "//envoy/server:listener_manager_interface", + "//source/common/common:linked_object", + ], +) + +envoy_cc_library( + name = "active_tcp_socket", + srcs = ["active_tcp_socket.cc"], + hdrs = [ + "active_tcp_socket.h", + ], + deps = [ + ":active_listener_base", + ":active_tcp_listener_headers", + "//envoy/common:time_interface", + "//envoy/event:deferred_deletable", + "//envoy/event:dispatcher_interface", + "//envoy/event:timer_interface", + "//envoy/network:connection_handler_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//envoy/network:listen_socket_interface", + "//envoy/network:listener_interface", + "//source/common/common:linked_object", + "//source/common/network:connection_lib", + "//source/common/stream_info:stream_info_lib", + ], +) + +envoy_cc_library( + name = "active_stream_listener_base", + srcs = ["active_stream_listener_base.cc"], + hdrs = [ + "active_stream_listener_base.h", + ], + deps = [ + ":active_listener_base", + ":active_tcp_listener_headers", + "//envoy/common:time_interface", + "//envoy/event:deferred_deletable", + "//envoy/event:dispatcher_interface", + "//envoy/event:timer_interface", + "//envoy/network:connection_handler_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//envoy/network:listen_socket_interface", + "//envoy/network:listener_interface", + "//envoy/stream_info:stream_info_interface", + "//source/common/common:linked_object", + "//source/common/network:connection_lib", + ], +) + envoy_cc_library( name = "drain_manager_lib", srcs = ["drain_manager_impl.cc"], diff --git a/source/server/active_stream_listener_base.cc b/source/server/active_stream_listener_base.cc new file mode 100644 index 0000000000000..39d336034c5fc --- /dev/null +++ b/source/server/active_stream_listener_base.cc @@ -0,0 +1,62 @@ +#include "source/server/active_stream_listener_base.h" + +#include "envoy/network/filter.h" + +namespace Envoy { +namespace Server { + +ActiveStreamListenerBase::ActiveStreamListenerBase(Network::ConnectionHandler& parent, + Event::Dispatcher& dispatcher, + Network::ListenerPtr&& listener, + Network::ListenerConfig& config) + : ActiveListenerImplBase(parent, &config), parent_(parent), + listener_filters_timeout_(config.listenerFiltersTimeout()), + continue_on_listener_filters_timeout_(config.continueOnListenerFiltersTimeout()), + listener_(std::move(listener)), dispatcher_(dispatcher) {} + +void ActiveStreamListenerBase::emitLogs(Network::ListenerConfig& config, + StreamInfo::StreamInfo& stream_info) { + stream_info.onRequestComplete(); + for (const auto& access_log : config.accessLogs()) { + access_log->log(nullptr, nullptr, nullptr, stream_info); + } +} + +void ActiveStreamListenerBase::newConnection(Network::ConnectionSocketPtr&& socket, + std::unique_ptr stream_info) { + // Find matching filter chain. + const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); + if (filter_chain == nullptr) { + RELEASE_ASSERT(socket->addressProvider().remoteAddress() != nullptr, ""); + ENVOY_LOG(debug, "closing connection from {}: no matching filter chain found", + socket->addressProvider().remoteAddress()->asString()); + stats_.no_filter_chain_match_.inc(); + stream_info->setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); + stream_info->setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); + emitLogs(*config_, *stream_info); + socket->close(); + return; + } + stream_info->setFilterChainName(filter_chain->name()); + auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); + stream_info->setDownstreamSslConnection(transport_socket->ssl()); + auto server_conn_ptr = dispatcher().createServerConnection( + std::move(socket), std::move(transport_socket), *stream_info); + if (const auto timeout = filter_chain->transportSocketConnectTimeout(); + timeout != std::chrono::milliseconds::zero()) { + server_conn_ptr->setTransportSocketConnectTimeout(timeout); + } + server_conn_ptr->setBufferLimits(config_->perConnectionBufferLimitBytes()); + RELEASE_ASSERT(server_conn_ptr->addressProvider().remoteAddress() != nullptr, ""); + const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain( + *server_conn_ptr, filter_chain->networkFilterFactories()); + if (empty_filter_chain) { + ENVOY_CONN_LOG(debug, "closing connection from {}: no filters", *server_conn_ptr, + server_conn_ptr->addressProvider().remoteAddress()->asString()); + server_conn_ptr->close(Network::ConnectionCloseType::NoFlush); + } + newActiveConnection(*filter_chain, std::move(server_conn_ptr), std::move(stream_info)); +} + +} // namespace Server +} // namespace Envoy diff --git a/source/server/active_stream_listener_base.h b/source/server/active_stream_listener_base.h new file mode 100644 index 0000000000000..2d99e3965bf69 --- /dev/null +++ b/source/server/active_stream_listener_base.h @@ -0,0 +1,139 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/common/time.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/connection.h" +#include "envoy/network/connection_handler.h" +#include "envoy/network/listener.h" +#include "envoy/stream_info/stream_info.h" + +#include "source/common/common/linked_object.h" +#include "source/server/active_listener_base.h" +#include "source/server/active_tcp_socket.h" + +namespace Envoy { +namespace Server { + +// The base class of the stream listener. It owns listener filter handling of active sockets. +// After the active socket passes all the listener filters, a server connection is created. The +// derived listener must override ``newActiveConnection`` to take the ownership of that server +// connection. +class ActiveStreamListenerBase : public ActiveListenerImplBase, + protected Logger::Loggable { +public: + ActiveStreamListenerBase(Network::ConnectionHandler& parent, Event::Dispatcher& dispatcher, + Network::ListenerPtr&& listener, Network::ListenerConfig& config); + static void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_info); + + Event::Dispatcher& dispatcher() { return dispatcher_; } + + /** + * Schedule to remove and destroy the active connections which are not tracked by listener + * config. Caution: The connection are not destroyed yet when function returns. + */ + void + deferredRemoveFilterChains(const std::list& draining_filter_chains) { + // Need to recover the original deleting state. + const bool was_deleting = is_deleting_; + is_deleting_ = true; + for (const auto* filter_chain : draining_filter_chains) { + removeFilterChain(filter_chain); + } + is_deleting_ = was_deleting; + } + + virtual void incNumConnections() PURE; + virtual void decNumConnections() PURE; + + /** + * Create a new connection from a socket accepted by the listener. + */ + void newConnection(Network::ConnectionSocketPtr&& socket, + std::unique_ptr stream_info); + + /** + * Remove the socket from this listener. Should be called when the socket passes the listener + * filter. + * @return std::unique_ptr the exact same socket in the parameter but in the + * state that not owned by the listener. + */ + std::unique_ptr removeSocket(ActiveTcpSocket&& socket) { + return socket.removeFromList(sockets_); + } + + /** + * @return const std::list>& the sockets going through the + * listener filters. + */ + const std::list>& sockets() const { return sockets_; } + + /** + * Schedule removal and destruction of all active connections owned by a filter chain. + */ + virtual void removeFilterChain(const Network::FilterChain* filter_chain) PURE; + + virtual Network::BalancedConnectionHandlerOptRef + getBalancedHandlerByAddress(const Network::Address::Instance& address) PURE; + + void onSocketAccepted(std::unique_ptr active_socket) { + // Create and run the filters + config_->filterChainFactory().createListenerFilterChain(*active_socket); + active_socket->continueFilterChain(true); + + // Move active_socket to the sockets_ list if filter iteration needs to continue later. + // Otherwise we let active_socket be destructed when it goes out of scope. + if (active_socket->iter_ != active_socket->accept_filters_.end()) { + active_socket->startTimer(); + LinkedList::moveIntoListBack(std::move(active_socket), sockets_); + } else { + if (!active_socket->connected_) { + // If active_socket is about to be destructed, emit logs if a connection is not created. + if (active_socket->stream_info_ != nullptr) { + emitLogs(*config_, *active_socket->stream_info_); + } else { + // If the active_socket is not connected, this socket is not promoted to active + // connection. Thus the stream_info_ is owned by this active socket. + ENVOY_BUG(active_socket->stream_info_ != nullptr, + "the unconnected active socket must have stream info."); + } + } + } + } + + // Below members are open to access by ActiveTcpSocket. + Network::ConnectionHandler& parent_; + const std::chrono::milliseconds listener_filters_timeout_; + const bool continue_on_listener_filters_timeout_; + +protected: + /** + * Create the active connection from server connection. This active listener owns the created + * active connection. + * + * @param filter_chain The network filter chain linking to the connection. + * @param server_conn_ptr The server connection. + * @param stream_info The stream info of the active connection. + */ + virtual void newActiveConnection(const Network::FilterChain& filter_chain, + Network::ServerConnectionPtr server_conn_ptr, + std::unique_ptr stream_info) PURE; + + std::list> sockets_; + Network::ListenerPtr listener_; + // True if the follow up connection deletion is raised by the connection collection deletion is + // performing. Otherwise, the collection should be deleted when the last connection in the + // collection is removed. This state is maintained in base class because this state is independent + // from concrete connection type. + bool is_deleting_{false}; + +private: + Event::Dispatcher& dispatcher_; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/active_tcp_listener.cc b/source/server/active_tcp_listener.cc index bed9854998b76..cee5494394436 100644 --- a/source/server/active_tcp_listener.cc +++ b/source/server/active_tcp_listener.cc @@ -3,12 +3,9 @@ #include #include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" -#include "envoy/network/filter.h" #include "envoy/stats/scope.h" #include "source/common/common/assert.h" -#include "source/common/event/deferred_task.h" #include "source/common/network/connection_impl.h" #include "source/common/network/utility.h" #include "source/common/stats/timespan_impl.h" @@ -16,29 +13,22 @@ namespace Envoy { namespace Server { -namespace { -void emitLogs(Network::ListenerConfig& config, StreamInfo::StreamInfo& stream_info) { - stream_info.onRequestComplete(); - for (const auto& access_log : config.accessLogs()) { - access_log->log(nullptr, nullptr, nullptr, stream_info); - } -} -} // namespace - ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config, uint32_t worker_index) - : ActiveTcpListener(parent, - parent.dispatcher().createListener( - config.listenSocketFactory().getListenSocket(worker_index), *this, - config.bindToPort()), - config) {} + : ActiveStreamListenerBase(parent, parent.dispatcher(), + parent.dispatcher().createListener( + config.listenSocketFactory().getListenSocket(worker_index), + *this, config.bindToPort()), + config), + tcp_conn_handler_(parent) { + config.connectionBalancer().registerHandler(*this); +} ActiveTcpListener::ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config) - : ActiveListenerImplBase(parent, &config), parent_(parent), listener_(std::move(listener)), - listener_filters_timeout_(config.listenerFiltersTimeout()), - continue_on_listener_filters_timeout_(config.continueOnListenerFiltersTimeout()) { + : ActiveStreamListenerBase(parent, parent.dispatcher(), std::move(listener), config), + tcp_conn_handler_(parent) { config.connectionBalancer().registerHandler(*this); } @@ -49,18 +39,18 @@ ActiveTcpListener::~ActiveTcpListener() { // Purge sockets that have not progressed to connections. This should only happen when // a listener filter stops iteration and never resumes. while (!sockets_.empty()) { - ActiveTcpSocketPtr removed = sockets_.front()->removeFromList(sockets_); - parent_.dispatcher().deferredDelete(std::move(removed)); + auto removed = sockets_.front()->removeFromList(sockets_); + dispatcher().deferredDelete(std::move(removed)); } - for (auto& chain_and_connections : connections_by_context_) { - ASSERT(chain_and_connections.second != nullptr); - auto& connections = chain_and_connections.second->connections_; + for (auto& [chain, active_connections] : connections_by_context_) { + ASSERT(active_connections != nullptr); + auto& connections = active_connections->connections_; while (!connections.empty()) { connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush); } } - parent_.dispatcher().clearDeferredDeleteList(); + dispatcher().clearDeferredDeleteList(); // By the time a listener is destroyed, in the common case, there should be no connections. // However, this is not always true if there is an in flight rebalanced connection that is @@ -75,15 +65,15 @@ ActiveTcpListener::~ActiveTcpListener() { void ActiveTcpListener::removeConnection(ActiveTcpConnection& connection) { ENVOY_CONN_LOG(debug, "adding to cleanup list", *connection.connection_); ActiveConnections& active_connections = connection.active_connections_; - ActiveTcpConnectionPtr removed = connection.removeFromList(active_connections.connections_); - parent_.dispatcher().deferredDelete(std::move(removed)); + auto removed = connection.removeFromList(active_connections.connections_); + dispatcher().deferredDelete(std::move(removed)); // Delete map entry only iff connections becomes empty. if (active_connections.connections_.empty()) { auto iter = connections_by_context_.find(&active_connections.filter_chain_); ASSERT(iter != connections_by_context_.end()); // To cover the lifetime of every single connection, Connections need to be deferred deleted // because the previously contained connection is deferred deleted. - parent_.dispatcher().deferredDelete(std::move(iter->second)); + dispatcher().deferredDelete(std::move(iter->second)); // The erase will break the iteration over the connections_by_context_ during the deletion. if (!is_deleting_) { connections_by_context_.erase(iter); @@ -97,114 +87,20 @@ void ActiveTcpListener::updateListenerConfig(Network::ListenerConfig& config) { config_ = &config; } -void ActiveTcpSocket::onTimeout() { - listener_.stats_.downstream_pre_cx_timeout_.inc(); - ASSERT(inserted()); - ENVOY_LOG(debug, "listener filter times out after {} ms", - listener_.listener_filters_timeout_.count()); - - if (listener_.continue_on_listener_filters_timeout_) { - ENVOY_LOG(debug, "fallback to default listener filter"); - newConnection(); - } - unlink(); -} - -void ActiveTcpSocket::startTimer() { - if (listener_.listener_filters_timeout_.count() > 0) { - timer_ = listener_.parent_.dispatcher().createTimer([this]() -> void { onTimeout(); }); - timer_->enableTimer(listener_.listener_filters_timeout_); - } -} - -void ActiveTcpSocket::unlink() { - ActiveTcpSocketPtr removed = removeFromList(listener_.sockets_); - if (removed->timer_ != nullptr) { - removed->timer_->disableTimer(); - } - // Emit logs if a connection is not established. - if (!connected_) { - emitLogs(*listener_.config_, *stream_info_); - } - listener_.parent_.dispatcher().deferredDelete(std::move(removed)); -} - -void ActiveTcpSocket::continueFilterChain(bool success) { - if (success) { - bool no_error = true; - if (iter_ == accept_filters_.end()) { - iter_ = accept_filters_.begin(); - } else { - iter_ = std::next(iter_); - } - - for (; iter_ != accept_filters_.end(); iter_++) { - Network::FilterStatus status = (*iter_)->onAccept(*this); - if (status == Network::FilterStatus::StopIteration) { - // The filter is responsible for calling us again at a later time to continue the filter - // chain from the next filter. - if (!socket().ioHandle().isOpen()) { - // break the loop but should not create new connection - no_error = false; - break; - } else { - // Blocking at the filter but no error - return; - } - } - } - // Successfully ran all the accept filters. - if (no_error) { - newConnection(); - } else { - // Signal the caller that no extra filter chain iteration is needed. - iter_ = accept_filters_.end(); - } - } - - // Filter execution concluded, unlink and delete this ActiveTcpSocket if it was linked. - if (inserted()) { - unlink(); - } -} - -void ActiveTcpSocket::setDynamicMetadata(const std::string& name, - const ProtobufWkt::Struct& value) { - stream_info_->setDynamicMetadata(name, value); -} - -void ActiveTcpSocket::newConnection() { - connected_ = true; - - // Check if the socket may need to be redirected to another listener. - Network::BalancedConnectionHandlerOptRef new_listener; - - if (hand_off_restored_destination_connections_ && - socket_->addressProvider().localAddressRestored()) { - // Find a listener associated with the original destination address. - new_listener = - listener_.parent_.getBalancedHandlerByAddress(*socket_->addressProvider().localAddress()); - } - if (new_listener.has_value()) { - // Hands off connections redirected by iptables to the listener associated with the - // original destination address. Pass 'hand_off_restored_destination_connections' as false to - // prevent further redirection. - // Leave the new listener to decide whether to execute re-balance. - // Note also that we must account for the number of connections properly across both listeners. - // TODO(mattklein123): See note in ~ActiveTcpSocket() related to making this accounting better. - listener_.decNumConnections(); - new_listener.value().get().onAcceptWorker(std::move(socket_), false, false); +void ActiveTcpListener::removeFilterChain(const Network::FilterChain* filter_chain) { + auto iter = connections_by_context_.find(filter_chain); + if (iter == connections_by_context_.end()) { + // It is possible when listener is stopping. } else { - // Set default transport protocol if none of the listener filters did it. - if (socket_->detectedTransportProtocol().empty()) { - socket_->setDetectedTransportProtocol("raw_buffer"); + auto& connections = iter->second->connections_; + while (!connections.empty()) { + connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush); } - // Reset the file events which are registered by listener filter. - // reference https://github.com/envoyproxy/envoy/issues/8925. - socket_->ioHandle().resetFileEvents(); - accept_filters_.clear(); - // Create a new connection on this listener. - listener_.newConnection(std::move(socket_), std::move(stream_info_)); + // Since is_deleting_ is on, we need to manually remove the map value and drive the + // iterator. Defer delete connection container to avoid race condition in destroying + // connection. + dispatcher().deferredDelete(std::move(iter->second)); + connections_by_context_.erase(iter); } } @@ -247,28 +143,7 @@ void ActiveTcpListener::onAcceptWorker(Network::ConnectionSocketPtr&& socket, auto active_socket = std::make_unique(*this, std::move(socket), hand_off_restored_destination_connections); - // Create and run the filters. - config_->filterChainFactory().createListenerFilterChain(*active_socket); - active_socket->continueFilterChain(true); - - // Move active_socket to the sockets_ list if filter iteration needs to continue later. - // Otherwise we let active_socket be destructed when it goes out of scope. - if (active_socket->iter_ != active_socket->accept_filters_.end()) { - active_socket->startTimer(); - LinkedList::moveIntoListBack(std::move(active_socket), sockets_); - } else { - // If active_socket is about to be destructed, emit logs if a connection is not created. - if (!active_socket->connected_) { - if (active_socket->stream_info_ != nullptr) { - emitLogs(*config_, *active_socket->stream_info_); - } else { - // If the active_socket is not connected, this socket is not promoted to active connection. - // Thus the stream_info_ is owned by this active socket. - ENVOY_BUG(active_socket->stream_info_ != nullptr, - "the unconnected active socket must have stream info."); - } - } - } + onSocketAccepted(std::move(active_socket)); } void ActiveTcpListener::pauseListening() { @@ -283,49 +158,18 @@ void ActiveTcpListener::resumeListening() { } } -void ActiveTcpListener::newConnection(Network::ConnectionSocketPtr&& socket, - std::unique_ptr stream_info) { - - // Find matching filter chain. - const auto filter_chain = config_->filterChainManager().findFilterChain(*socket); - if (filter_chain == nullptr) { - RELEASE_ASSERT(socket->addressProvider().remoteAddress() != nullptr, ""); - ENVOY_LOG(debug, "closing connection from {}: no matching filter chain found", - socket->addressProvider().remoteAddress()->asString()); - stats_.no_filter_chain_match_.inc(); - stream_info->setResponseFlag(StreamInfo::ResponseFlag::NoRouteFound); - stream_info->setResponseCodeDetails(StreamInfo::ResponseCodeDetails::get().FilterChainNotFound); - emitLogs(*config_, *stream_info); - socket->close(); - return; - } - - stream_info->setFilterChainName(filter_chain->name()); - auto transport_socket = filter_chain->transportSocketFactory().createTransportSocket(nullptr); - stream_info->setDownstreamSslConnection(transport_socket->ssl()); - auto& active_connections = getOrCreateActiveConnections(*filter_chain); - auto server_conn_ptr = parent_.dispatcher().createServerConnection( - std::move(socket), std::move(transport_socket), *stream_info); - if (const auto timeout = filter_chain->transportSocketConnectTimeout(); - timeout != std::chrono::milliseconds::zero()) { - server_conn_ptr->setTransportSocketConnectTimeout(timeout); - } +Network::BalancedConnectionHandlerOptRef +ActiveTcpListener::getBalancedHandlerByAddress(const Network::Address::Instance& address) { + return tcp_conn_handler_.getBalancedHandlerByAddress(address); +} +void ActiveTcpListener::newActiveConnection(const Network::FilterChain& filter_chain, + Network::ServerConnectionPtr server_conn_ptr, + std::unique_ptr stream_info) { + auto& active_connections = getOrCreateActiveConnections(filter_chain); ActiveTcpConnectionPtr active_connection( new ActiveTcpConnection(active_connections, std::move(server_conn_ptr), - parent_.dispatcher().timeSource(), std::move(stream_info))); - active_connection->connection_->setBufferLimits(config_->perConnectionBufferLimitBytes()); - - RELEASE_ASSERT(active_connection->connection_->addressProvider().remoteAddress() != nullptr, ""); - - const bool empty_filter_chain = !config_->filterChainFactory().createNetworkFilterChain( - *active_connection->connection_, filter_chain->networkFilterFactories()); - if (empty_filter_chain) { - ENVOY_CONN_LOG(debug, "closing connection from {}: no filters", *active_connection->connection_, - active_connection->connection_->addressProvider().remoteAddress()->asString()); - active_connection->connection_->close(Network::ConnectionCloseType::NoFlush); - } - + dispatcher().timeSource(), std::move(stream_info))); // If the connection is already closed, we can just let this connection immediately die. if (active_connection->connection_->state() != Network::Connection::State::Closed) { ENVOY_CONN_LOG(debug, "new connection from {}", *active_connection->connection_, @@ -337,36 +181,13 @@ void ActiveTcpListener::newConnection(Network::ConnectionSocketPtr&& socket, ActiveConnections& ActiveTcpListener::getOrCreateActiveConnections(const Network::FilterChain& filter_chain) { - ActiveConnectionsPtr& connections = connections_by_context_[&filter_chain]; + ActiveConnectionCollectionPtr& connections = connections_by_context_[&filter_chain]; if (connections == nullptr) { connections = std::make_unique(*this, filter_chain); } return *connections; } -void ActiveTcpListener::deferredRemoveFilterChains( - const std::list& draining_filter_chains) { - // Need to recover the original deleting state. - const bool was_deleting = is_deleting_; - is_deleting_ = true; - for (const auto* filter_chain : draining_filter_chains) { - auto iter = connections_by_context_.find(filter_chain); - if (iter == connections_by_context_.end()) { - // It is possible when listener is stopping. - } else { - auto& connections = iter->second->connections_; - while (!connections.empty()) { - connections.front()->connection_->close(Network::ConnectionCloseType::NoFlush); - } - // Since is_deleting_ is on, we need to manually remove the map value and drive the iterator. - // Defer delete connection container to avoid race condition in destroying connection. - parent_.dispatcher().deferredDelete(std::move(iter->second)); - connections_by_context_.erase(iter); - } - } - is_deleting_ = was_deleting; -} - void ActiveTcpListener::post(Network::ConnectionSocketPtr&& socket) { // It is not possible to capture a unique_ptr because the post() API copies the lambda, so we must // bundle the socket inside a shared_ptr that can be captured. @@ -375,9 +196,10 @@ void ActiveTcpListener::post(Network::ConnectionSocketPtr&& socket) { RebalancedSocketSharedPtr socket_to_rebalance = std::make_shared(); socket_to_rebalance->socket = std::move(socket); - parent_.dispatcher().post([socket_to_rebalance, tag = config_->listenerTag(), &parent = parent_, - handoff = config_->handOffRestoredDestinationConnections()]() { - auto balanced_handler = parent.getBalancedHandlerByTag(tag); + dispatcher().post([socket_to_rebalance, tag = config_->listenerTag(), + &tcp_conn_handler = tcp_conn_handler_, + handoff = config_->handOffRestoredDestinationConnections()]() { + auto balanced_handler = tcp_conn_handler.getBalancedHandlerByTag(tag); if (balanced_handler.has_value()) { balanced_handler->get().onAcceptWorker(std::move(socket_to_rebalance->socket), handoff, true); return; @@ -418,7 +240,7 @@ ActiveTcpConnection::ActiveTcpConnection(ActiveConnections& active_connections, } ActiveTcpConnection::~ActiveTcpConnection() { - emitLogs(*active_connections_.listener_.config_, *stream_info_); + ActiveStreamListenerBase::emitLogs(*active_connections_.listener_.config_, *stream_info_); auto& listener = active_connections_.listener_; listener.stats_.downstream_cx_active_.dec(); listener.stats_.downstream_cx_destroy_.inc(); @@ -432,5 +254,14 @@ ActiveTcpConnection::~ActiveTcpConnection() { listener.parent_.decNumConnections(); } +void ActiveTcpConnection::onEvent(Network::ConnectionEvent event) { + ENVOY_LOG(trace, "[C{}] connection on event {}", connection_->id(), static_cast(event)); + // Any event leads to destruction of the connection. + if (event == Network::ConnectionEvent::LocalClose || + event == Network::ConnectionEvent::RemoteClose) { + active_connections_.listener_.removeConnection(*this); + } +} + } // namespace Server } // namespace Envoy diff --git a/source/server/active_tcp_listener.h b/source/server/active_tcp_listener.h index 9898f3ae45909..9ea378f445395 100644 --- a/source/server/active_tcp_listener.h +++ b/source/server/active_tcp_listener.h @@ -1,22 +1,21 @@ #pragma once #include "envoy/event/dispatcher.h" -#include "envoy/event/timer.h" #include "envoy/stats/timespan.h" +#include "envoy/stream_info/stream_info.h" #include "source/common/common/linked_object.h" -#include "source/common/stream_info/stream_info_impl.h" #include "source/server/active_listener_base.h" +#include "source/server/active_stream_listener_base.h" +#include "source/server/active_tcp_socket.h" namespace Envoy { namespace Server { struct ActiveTcpConnection; using ActiveTcpConnectionPtr = std::unique_ptr; -struct ActiveTcpSocket; -using ActiveTcpSocketPtr = std::unique_ptr; class ActiveConnections; -using ActiveConnectionsPtr = std::unique_ptr; +using ActiveConnectionCollectionPtr = std::unique_ptr; namespace { // Structure used to allow a unique_ptr to be captured in a posted lambda. See below. @@ -30,22 +29,22 @@ using RebalancedSocketSharedPtr = std::shared_ptr; * Wrapper for an active tcp listener owned by this handler. */ class ActiveTcpListener final : public Network::TcpListenerCallbacks, - public ActiveListenerImplBase, - public Network::BalancedConnectionHandler, - Logger::Loggable { + public ActiveStreamListenerBase, + public Network::BalancedConnectionHandler { public: ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerConfig& config, uint32_t worker_index); ActiveTcpListener(Network::TcpConnectionHandler& parent, Network::ListenerPtr&& listener, Network::ListenerConfig& config); ~ActiveTcpListener() override; + bool listenerConnectionLimitReached() const { // TODO(tonya11en): Delegate enforcement of per-listener connection limits to overload // manager. return !config_->openConnections().canCreate(); } - void decNumConnections() { + void decNumConnections() override { ASSERT(num_listener_connections_ > 0); --num_listener_connections_; config_->openConnections().dec(); @@ -57,6 +56,9 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, // ActiveListenerImplBase Network::Listener* listener() override { return listener_.get(); } + Network::BalancedConnectionHandlerOptRef + getBalancedHandlerByAddress(const Network::Address::Instance& address) override; + void pauseListening() override; void resumeListening() override; void shutdownListener() override { listener_.reset(); } @@ -71,49 +73,40 @@ class ActiveTcpListener final : public Network::TcpListenerCallbacks, void onAcceptWorker(Network::ConnectionSocketPtr&& socket, bool hand_off_restored_destination_connections, bool rebalanced) override; - /** - * Remove and destroy an active connection. - * @param connection supplies the connection to remove. - */ - void removeConnection(ActiveTcpConnection& connection); - - /** - * Create a new connection from a socket accepted by the listener. - */ - void newConnection(Network::ConnectionSocketPtr&& socket, - std::unique_ptr stream_info); + void newActiveConnection(const Network::FilterChain& filter_chain, + Network::ServerConnectionPtr server_conn_ptr, + std::unique_ptr stream_info) override; /** * Return the active connections container attached with the given filter chain. */ ActiveConnections& getOrCreateActiveConnections(const Network::FilterChain& filter_chain); - /** - * Schedule to remove and destroy the active connections which are not tracked by listener - * config. Caution: The connection are not destroyed yet when function returns. - */ - void - deferredRemoveFilterChains(const std::list& draining_filter_chains); - /** * Update the listener config. The follow up connections will see the new config. The existing * connections are not impacted. */ void updateListenerConfig(Network::ListenerConfig& config); - Network::TcpConnectionHandler& parent_; - Network::ListenerPtr listener_; - const std::chrono::milliseconds listener_filters_timeout_; - const bool continue_on_listener_filters_timeout_; - std::list sockets_; - absl::flat_hash_map connections_by_context_; + void removeFilterChain(const Network::FilterChain* filter_chain) override; + + /** + * Remove and destroy an active connection. + * @param connection supplies the connection to remove. + */ + void removeConnection(ActiveTcpConnection& connection); + + absl::flat_hash_map> + connections_by_context_; + Network::TcpConnectionHandler& tcp_conn_handler_; // The number of connections currently active on this listener. This is typically used for // connection balancing across per-handler listeners. std::atomic num_listener_connections_{}; - bool is_deleting_{false}; }; +using ActiveTcpListenerOptRef = absl::optional>; + /** * Wrapper for a group of active connections which are attached to the same filter chain context. */ @@ -134,20 +127,15 @@ class ActiveConnections : public Event::DeferredDeletable { */ struct ActiveTcpConnection : LinkedObject, public Event::DeferredDeletable, - public Network::ConnectionCallbacks { + public Network::ConnectionCallbacks, + Logger::Loggable { ActiveTcpConnection(ActiveConnections& active_connections, Network::ConnectionPtr&& new_connection, TimeSource& time_system, std::unique_ptr&& stream_info); ~ActiveTcpConnection() override; // Network::ConnectionCallbacks - void onEvent(Network::ConnectionEvent event) override { - // Any event leads to destruction of the connection. - if (event == Network::ConnectionEvent::LocalClose || - event == Network::ConnectionEvent::RemoteClose) { - active_connections_.listener_.removeConnection(*this); - } - } + void onEvent(Network::ConnectionEvent event) override; void onAboveWriteBufferHighWatermark() override {} void onBelowWriteBufferLowWatermark() override {} @@ -157,105 +145,5 @@ struct ActiveTcpConnection : LinkedObject, Stats::TimespanPtr conn_length_; }; -/** - * Wrapper for an active accepted TCP socket owned by this handler. - */ -struct ActiveTcpSocket : public Network::ListenerFilterManager, - public Network::ListenerFilterCallbacks, - LinkedObject, - public Event::DeferredDeletable, - Logger::Loggable { - ActiveTcpSocket(ActiveTcpListener& listener, Network::ConnectionSocketPtr&& socket, - bool hand_off_restored_destination_connections) - : listener_(listener), socket_(std::move(socket)), - hand_off_restored_destination_connections_(hand_off_restored_destination_connections), - iter_(accept_filters_.end()), - stream_info_(std::make_unique( - listener_.parent_.dispatcher().timeSource(), socket_->addressProviderSharedPtr(), - StreamInfo::FilterState::LifeSpan::Connection)) { - listener_.stats_.downstream_pre_cx_active_.inc(); - } - ~ActiveTcpSocket() override { - accept_filters_.clear(); - listener_.stats_.downstream_pre_cx_active_.dec(); - - // If the underlying socket is no longer attached, it means that it has been transferred to - // an active connection. In this case, the active connection will decrement the number - // of listener connections. - // TODO(mattklein123): In general the way we account for the number of listener connections - // is incredibly fragile. Revisit this by potentially merging ActiveTcpSocket and - // ActiveTcpConnection, having a shared object which does accounting (but would require - // another allocation, etc.). - if (socket_ != nullptr) { - listener_.decNumConnections(); - } - } - - void onTimeout(); - void startTimer(); - void unlink(); - void newConnection(); - - class GenericListenerFilter : public Network::ListenerFilter { - public: - GenericListenerFilter(const Network::ListenerFilterMatcherSharedPtr& matcher, - Network::ListenerFilterPtr listener_filter) - : listener_filter_(std::move(listener_filter)), matcher_(std::move(matcher)) {} - Network::FilterStatus onAccept(ListenerFilterCallbacks& cb) override { - if (isDisabled(cb)) { - return Network::FilterStatus::Continue; - } - return listener_filter_->onAccept(cb); - } - /** - * Check if this filter filter should be disabled on the incoming socket. - * @param cb the callbacks the filter instance can use to communicate with the filter chain. - **/ - bool isDisabled(ListenerFilterCallbacks& cb) { - if (matcher_ == nullptr) { - return false; - } else { - return matcher_->matches(cb); - } - } - - private: - const Network::ListenerFilterPtr listener_filter_; - const Network::ListenerFilterMatcherSharedPtr matcher_; - }; - using ListenerFilterWrapperPtr = std::unique_ptr; - - // Network::ListenerFilterManager - void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, - Network::ListenerFilterPtr&& filter) override { - accept_filters_.emplace_back( - std::make_unique(listener_filter_matcher, std::move(filter))); - } - - // Network::ListenerFilterCallbacks - Network::ConnectionSocket& socket() override { return *socket_.get(); } - Event::Dispatcher& dispatcher() override { return listener_.parent_.dispatcher(); } - void continueFilterChain(bool success) override; - void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override; - envoy::config::core::v3::Metadata& dynamicMetadata() override { - return stream_info_->dynamicMetadata(); - }; - const envoy::config::core::v3::Metadata& dynamicMetadata() const override { - return stream_info_->dynamicMetadata(); - }; - - StreamInfo::FilterState& filterState() override { return *stream_info_->filterState().get(); } - - ActiveTcpListener& listener_; - Network::ConnectionSocketPtr socket_; - const bool hand_off_restored_destination_connections_; - std::list accept_filters_; - std::list::iterator iter_; - Event::TimerPtr timer_; - std::unique_ptr stream_info_; - bool connected_{false}; -}; -using ActiveTcpListenerOptRef = absl::optional>; - } // namespace Server } // namespace Envoy diff --git a/source/server/active_tcp_socket.cc b/source/server/active_tcp_socket.cc new file mode 100644 index 0000000000000..e939bffc26128 --- /dev/null +++ b/source/server/active_tcp_socket.cc @@ -0,0 +1,152 @@ +#include "source/server/active_tcp_socket.h" + +#include "envoy/network/filter.h" + +#include "source/common/stream_info/stream_info_impl.h" +#include "source/server/active_stream_listener_base.h" + +namespace Envoy { +namespace Server { + +ActiveTcpSocket::ActiveTcpSocket(ActiveStreamListenerBase& listener, + Network::ConnectionSocketPtr&& socket, + bool hand_off_restored_destination_connections) + : listener_(listener), socket_(std::move(socket)), + hand_off_restored_destination_connections_(hand_off_restored_destination_connections), + iter_(accept_filters_.end()), + stream_info_(std::make_unique( + listener_.dispatcher().timeSource(), socket_->addressProviderSharedPtr(), + StreamInfo::FilterState::LifeSpan::Connection)) { + listener_.stats_.downstream_pre_cx_active_.inc(); +} + +ActiveTcpSocket::~ActiveTcpSocket() { + accept_filters_.clear(); + listener_.stats_.downstream_pre_cx_active_.dec(); + + // If the underlying socket is no longer attached, it means that it has been transferred to + // an active connection. In this case, the active connection will decrement the number + // of listener connections. + // TODO(mattklein123): In general the way we account for the number of listener connections + // is incredibly fragile. Revisit this by potentially merging ActiveTcpSocket and + // ActiveTcpConnection, having a shared object which does accounting (but would require + // another allocation, etc.). + if (socket_ != nullptr) { + listener_.decNumConnections(); + } +} + +Event::Dispatcher& ActiveTcpSocket::dispatcher() { return listener_.dispatcher(); } + +void ActiveTcpSocket::onTimeout() { + listener_.stats_.downstream_pre_cx_timeout_.inc(); + ASSERT(inserted()); + ENVOY_LOG(debug, "listener filter times out after {} ms", + listener_.listener_filters_timeout_.count()); + + if (listener_.continue_on_listener_filters_timeout_) { + ENVOY_LOG(debug, "fallback to default listener filter"); + newConnection(); + } + unlink(); +} + +void ActiveTcpSocket::startTimer() { + if (listener_.listener_filters_timeout_.count() > 0) { + timer_ = listener_.dispatcher().createTimer([this]() -> void { onTimeout(); }); + timer_->enableTimer(listener_.listener_filters_timeout_); + } +} + +void ActiveTcpSocket::unlink() { + auto removed = listener_.removeSocket(std::move(*this)); + if (removed->timer_ != nullptr) { + removed->timer_->disableTimer(); + } + // Emit logs if a connection is not established. + if (!connected_ && stream_info_ != nullptr) { + ActiveStreamListenerBase::emitLogs(*listener_.config_, *stream_info_); + } + listener_.dispatcher().deferredDelete(std::move(removed)); +} + +void ActiveTcpSocket::continueFilterChain(bool success) { + if (success) { + bool no_error = true; + if (iter_ == accept_filters_.end()) { + iter_ = accept_filters_.begin(); + } else { + iter_ = std::next(iter_); + } + + for (; iter_ != accept_filters_.end(); iter_++) { + Network::FilterStatus status = (*iter_)->onAccept(*this); + if (status == Network::FilterStatus::StopIteration) { + // The filter is responsible for calling us again at a later time to continue the filter + // chain from the next filter. + if (!socket().ioHandle().isOpen()) { + // break the loop but should not create new connection + no_error = false; + break; + } else { + // Blocking at the filter but no error + return; + } + } + } + // Successfully ran all the accept filters. + if (no_error) { + newConnection(); + } else { + // Signal the caller that no extra filter chain iteration is needed. + iter_ = accept_filters_.end(); + } + } + + // Filter execution concluded, unlink and delete this ActiveTcpSocket if it was linked. + if (inserted()) { + unlink(); + } +} + +void ActiveTcpSocket::setDynamicMetadata(const std::string& name, + const ProtobufWkt::Struct& value) { + stream_info_->setDynamicMetadata(name, value); +} + +void ActiveTcpSocket::newConnection() { + connected_ = true; + + // Check if the socket may need to be redirected to another listener. + Network::BalancedConnectionHandlerOptRef new_listener; + + if (hand_off_restored_destination_connections_ && + socket_->addressProvider().localAddressRestored()) { + // Find a listener associated with the original destination address. + new_listener = + listener_.getBalancedHandlerByAddress(*socket_->addressProvider().localAddress()); + } + if (new_listener.has_value()) { + // Hands off connections redirected by iptables to the listener associated with the + // original destination address. Pass 'hand_off_restored_destination_connections' as false to + // prevent further redirection. + // Leave the new listener to decide whether to execute re-balance. + // Note also that we must account for the number of connections properly across both listeners. + // TODO(mattklein123): See note in ~ActiveTcpSocket() related to making this accounting better. + listener_.decNumConnections(); + new_listener.value().get().onAcceptWorker(std::move(socket_), false, false); + } else { + // Set default transport protocol if none of the listener filters did it. + if (socket_->detectedTransportProtocol().empty()) { + socket_->setDetectedTransportProtocol("raw_buffer"); + } + // Reset the file events which are registered by listener filter. + // reference https://github.com/envoyproxy/envoy/issues/8925. + socket_->ioHandle().resetFileEvents(); + accept_filters_.clear(); + // Create a new connection on this listener. + listener_.newConnection(std::move(socket_), std::move(stream_info_)); + } +} +} // namespace Server +} // namespace Envoy diff --git a/source/server/active_tcp_socket.h b/source/server/active_tcp_socket.h new file mode 100644 index 0000000000000..9900bbd66b883 --- /dev/null +++ b/source/server/active_tcp_socket.h @@ -0,0 +1,102 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/common/time.h" +#include "envoy/event/deferred_deletable.h" +#include "envoy/event/dispatcher.h" +#include "envoy/network/filter.h" +#include "envoy/network/listen_socket.h" +#include "envoy/network/listener.h" + +#include "source/common/common/linked_object.h" +#include "source/server/active_listener_base.h" + +namespace Envoy { +namespace Server { + +class ActiveStreamListenerBase; + +/** + * Wrapper for an active accepted socket owned by the active tcp listener. + */ +struct ActiveTcpSocket : public Network::ListenerFilterManager, + public Network::ListenerFilterCallbacks, + LinkedObject, + public Event::DeferredDeletable, + Logger::Loggable { + ActiveTcpSocket(ActiveStreamListenerBase& listener, Network::ConnectionSocketPtr&& socket, + bool hand_off_restored_destination_connections); + ~ActiveTcpSocket() override; + + void onTimeout(); + void startTimer(); + void unlink(); + void newConnection(); + + class GenericListenerFilter : public Network::ListenerFilter { + public: + GenericListenerFilter(const Network::ListenerFilterMatcherSharedPtr& matcher, + Network::ListenerFilterPtr listener_filter) + : listener_filter_(std::move(listener_filter)), matcher_(std::move(matcher)) {} + Network::FilterStatus onAccept(ListenerFilterCallbacks& cb) override { + if (isDisabled(cb)) { + return Network::FilterStatus::Continue; + } + return listener_filter_->onAccept(cb); + } + /** + * Check if this filter filter should be disabled on the incoming socket. + * @param cb the callbacks the filter instance can use to communicate with the filter chain. + **/ + bool isDisabled(ListenerFilterCallbacks& cb) { + if (matcher_ == nullptr) { + return false; + } else { + return matcher_->matches(cb); + } + } + + private: + const Network::ListenerFilterPtr listener_filter_; + const Network::ListenerFilterMatcherSharedPtr matcher_; + }; + using ListenerFilterWrapperPtr = std::unique_ptr; + + // Network::ListenerFilterManager + void addAcceptFilter(const Network::ListenerFilterMatcherSharedPtr& listener_filter_matcher, + Network::ListenerFilterPtr&& filter) override { + accept_filters_.emplace_back( + std::make_unique(listener_filter_matcher, std::move(filter))); + } + + // Network::ListenerFilterCallbacks + Network::ConnectionSocket& socket() override { return *socket_.get(); } + Event::Dispatcher& dispatcher() override; + void continueFilterChain(bool success) override; + void setDynamicMetadata(const std::string& name, const ProtobufWkt::Struct& value) override; + envoy::config::core::v3::Metadata& dynamicMetadata() override { + return stream_info_->dynamicMetadata(); + }; + const envoy::config::core::v3::Metadata& dynamicMetadata() const override { + return stream_info_->dynamicMetadata(); + }; + + StreamInfo::FilterState& filterState() override { return *stream_info_->filterState().get(); } + + // The owner of this ActiveTcpSocket. + ActiveStreamListenerBase& listener_; + Network::ConnectionSocketPtr socket_; + const bool hand_off_restored_destination_connections_; + std::list accept_filters_; + std::list::iterator iter_; + Event::TimerPtr timer_; + std::unique_ptr stream_info_; + bool connected_{false}; +}; + +} // namespace Server +} // namespace Envoy diff --git a/source/server/connection_handler_impl.cc b/source/server/connection_handler_impl.cc index c79387f8c2033..4f64afc227b06 100644 --- a/source/server/connection_handler_impl.cc +++ b/source/server/connection_handler_impl.cc @@ -134,7 +134,8 @@ void ConnectionHandlerImpl::setListenerRejectFraction(UnitFloat reject_fraction) } } -ActiveTcpListenerOptRef ConnectionHandlerImpl::ActiveListenerDetails::tcpListener() { +ConnectionHandlerImpl::ActiveTcpListenerOptRef +ConnectionHandlerImpl::ActiveListenerDetails::tcpListener() { auto* val = absl::get_if>(&typed_listener_); return (val != nullptr) ? absl::make_optional(*val) : absl::nullopt; } diff --git a/source/server/connection_handler_impl.h b/source/server/connection_handler_impl.h index 24ff9cffafde0..f10424b7c1a2d 100644 --- a/source/server/connection_handler_impl.h +++ b/source/server/connection_handler_impl.h @@ -68,9 +68,7 @@ class ConnectionHandlerImpl : public Network::TcpConnectionHandler, // Strong pointer to the listener, whether TCP, UDP, QUIC, etc. Network::ConnectionHandler::ActiveListenerPtr listener_; - absl::variant, + absl::variant, std::reference_wrapper> typed_listener_; diff --git a/test/server/active_tcp_listener_test.cc b/test/server/active_tcp_listener_test.cc index baaf57421ec08..8284559cd5ca8 100644 --- a/test/server/active_tcp_listener_test.cc +++ b/test/server/active_tcp_listener_test.cc @@ -110,9 +110,9 @@ TEST_F(ActiveTcpListenerTest, PopulateSNIWhenActiveTcpSocketTimeout) { // calling the onAcceptWorker() to create the ActiveTcpSocket. active_listener->onAcceptWorker(std::move(accepted_socket), false, false); // get the ActiveTcpSocket pointer before unlink() removed from the link-list. - ActiveTcpSocket* tcp_socket = active_listener->sockets_.front().get(); + ActiveTcpSocket* tcp_socket = active_listener->sockets().front().get(); // trigger the onTimeout event manually, since the timer is fake. - active_listener->sockets_.front()->onTimeout(); + active_listener->sockets().front()->onTimeout(); EXPECT_EQ(server_name, tcp_socket->stream_info_->downstreamAddressProvider().requestedServerName()); From 2f134875b4024d8247ce1a4f15de71b38e80666f Mon Sep 17 00:00:00 2001 From: tyxia <72890320+tyxia@users.noreply.github.com> Date: Thu, 22 Jul 2021 20:52:56 -0400 Subject: [PATCH 38/57] Remove `hidden_envoy_deprecated_idle_timeout` (#17451) Risk Level: LOW Testing: CI Signed-off-by: Tianyu Xia --- .../network/http_connection_manager/config.cc | 6 ------ .../http_connection_manager/config_test.cc | 19 ------------------- 2 files changed, 25 deletions(-) diff --git a/source/extensions/filters/network/http_connection_manager/config.cc b/source/extensions/filters/network/http_connection_manager/config.cc index 4e5b9c4757d78..d1ebbddaea137 100644 --- a/source/extensions/filters/network/http_connection_manager/config.cc +++ b/source/extensions/filters/network/http_connection_manager/config.cc @@ -333,12 +333,6 @@ HttpConnectionManagerConfig::HttpConnectionManagerConfig( strip_trailing_host_dot_(config.strip_trailing_host_dot()), max_requests_per_connection_(PROTOBUF_GET_WRAPPED_OR_DEFAULT( config.common_http_protocol_options(), max_requests_per_connection, 0)) { - // If idle_timeout_ was not configured in common_http_protocol_options, use value in deprecated - // idle_timeout field. - // TODO(asraa): Remove when idle_timeout is removed. - if (!idle_timeout_) { - idle_timeout_ = PROTOBUF_GET_OPTIONAL_MS(config, hidden_envoy_deprecated_idle_timeout); - } if (!idle_timeout_) { idle_timeout_ = std::chrono::hours(1); } else if (idle_timeout_.value().count() == 0) { diff --git a/test/extensions/filters/network/http_connection_manager/config_test.cc b/test/extensions/filters/network/http_connection_manager/config_test.cc index bf073cd25a2a2..80065d021eb91 100644 --- a/test/extensions/filters/network/http_connection_manager/config_test.cc +++ b/test/extensions/filters/network/http_connection_manager/config_test.cc @@ -776,25 +776,6 @@ TEST_F(HttpConnectionManagerConfigTest, DisabledStreamIdleTimeout) { EXPECT_EQ(0, config.streamIdleTimeout().count()); } -// Validate that deprecated idle_timeout is still ingested. -TEST_F(HttpConnectionManagerConfigTest, DEPRECATED_FEATURE_TEST(IdleTimeout)) { - TestDeprecatedV2Api _deprecated_v2_api; - const std::string yaml_string = R"EOF( - stat_prefix: ingress_http - idle_timeout: 1s - route_config: - name: local_route - http_filters: - - name: envoy.filters.http.router - )EOF"; - - HttpConnectionManagerConfig config(parseHttpConnectionManagerFromYaml(yaml_string, false), - context_, date_provider_, route_config_provider_manager_, - scoped_routes_config_provider_manager_, http_tracer_manager_, - filter_config_provider_manager_); - EXPECT_EQ(1000, config.idleTimeout().value().count()); -} - // Validate that idle_timeout set in common_http_protocol_options is used. TEST_F(HttpConnectionManagerConfigTest, CommonHttpProtocolIdleTimeout) { const std::string yaml_string = R"EOF( From d977ce37fccbaca95706716c7d0df75855a21e5a Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Thu, 22 Jul 2021 17:56:07 -0700 Subject: [PATCH 39/57] access log: fix %UPSTREAM_CLUSTER% in upstream http access logs (#17453) This has been broken for several versions, possibly by commit 980c84d. Risk Level: Low Testing: Added new test Signed-off-by: Greg Greenway --- docs/root/version_history/current.rst | 1 + source/common/router/upstream_request.cc | 5 ++++ .../common/router/router_upstream_log_test.cc | 30 +++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 38a869bf3c79d..c1081bb800819 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -36,6 +36,7 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* +* access log: fix `%UPSTREAM_CLUSTER%` when used in http upstream access logs. Previously, it was always logging as an unset value. * xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. Removed Config or Runtime diff --git a/source/common/router/upstream_request.cc b/source/common/router/upstream_request.cc index 3dd9cf0394129..03efc074b9a96 100644 --- a/source/common/router/upstream_request.cc +++ b/source/common/router/upstream_request.cc @@ -62,6 +62,11 @@ UpstreamRequest::UpstreamRequest(RouterFilterInterface& parent, } stream_info_.healthCheck(parent_.callbacks()->streamInfo().healthCheck()); + absl::optional cluster_info = + parent_.callbacks()->streamInfo().upstreamClusterInfo(); + if (cluster_info.has_value()) { + stream_info_.setUpstreamClusterInfo(*cluster_info); + } } UpstreamRequest::~UpstreamRequest() { diff --git a/test/common/router/router_upstream_log_test.cc b/test/common/router/router_upstream_log_test.cc index e9d2260504d90..6831f32dd3963 100644 --- a/test/common/router/router_upstream_log_test.cc +++ b/test/common/router/router_upstream_log_test.cc @@ -83,6 +83,12 @@ class RouterUpstreamLogTest : public testing::Test { public: void init(absl::optional upstream_log) { envoy::extensions::filters::http::router::v3::Router router_proto; + static const std::string cluster_name = "cluster_0"; + + cluster_info_ = std::make_shared>(); + ON_CALL(*cluster_info_, name()).WillByDefault(ReturnRef(cluster_name)); + ON_CALL(*cluster_info_, observabilityName()).WillByDefault(ReturnRef(cluster_name)); + ON_CALL(callbacks_.stream_info_, upstreamClusterInfo()).WillByDefault(Return(cluster_info_)); if (upstream_log) { ON_CALL(*context_.access_log_manager_.file_, write(_)) @@ -160,6 +166,7 @@ class RouterUpstreamLogTest : public testing::Test { EXPECT_CALL(context_.cluster_manager_.thread_local_cluster_.conn_pool_.host_->outlier_detector_, putHttpResponseCode(response_code)); + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) response_decoder->decodeHeaders(std::move(response_headers), false); Http::ResponseTrailerMapPtr response_trailers( @@ -245,6 +252,7 @@ class RouterUpstreamLogTest : public testing::Test { NiceMock callbacks_; std::shared_ptr config_; std::shared_ptr router_; + std::shared_ptr> cluster_info_; NiceMock stream_info_; }; @@ -365,5 +373,27 @@ name: accesslog EXPECT_EQ(output_.front(), "110 49 41"); } +// Test UPSTREAM_CLUSTER log formatter. +TEST_F(RouterUpstreamLogTest, UpstreamCluster) { + const std::string yaml = R"EOF( +name: accesslog +typed_config: + "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog + log_format: + text_format_source: + inline_string: "%UPSTREAM_CLUSTER%" + path: "/dev/null" + )EOF"; + + envoy::config::accesslog::v3::AccessLog upstream_log; + TestUtility::loadFromYaml(yaml, upstream_log); + + init(absl::optional(upstream_log)); + run(); + + EXPECT_EQ(output_.size(), 1U); + EXPECT_EQ(output_.front(), "cluster_0"); +} + } // namespace Router } // namespace Envoy From 550e4ad0adee3793b4c4895593dd71873babfb6d Mon Sep 17 00:00:00 2001 From: Kateryna Nezdolii Date: Fri, 23 Jul 2021 17:43:28 +0200 Subject: [PATCH 40/57] Fix yaml example in api (#17462) Signed-off-by: Kateryna Nezdolii --- .../http_connection_manager/v3/http_connection_manager.proto | 1 + .../http_connection_manager/v3/http_connection_manager.proto | 1 + .../v4alpha/http_connection_manager.proto | 1 + 3 files changed, 3 insertions(+) diff --git a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 959906d880cd1..fa65ae4bcf757 100644 --- a/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -512,6 +512,7 @@ message HttpConnectionManager { // .. code-block:: yaml // // original_ip_detection_extensions: + // - name: envoy.http.original_ip_detection.xff // typed_config: // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig // xff_num_trusted_hops: 1 diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto index 533340aaf194b..f09aac839adc5 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto @@ -517,6 +517,7 @@ message HttpConnectionManager { // .. code-block:: yaml // // original_ip_detection_extensions: + // - name: envoy.http.original_ip_detection.xff // typed_config: // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig // xff_num_trusted_hops: 1 diff --git a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto index 8d2799e750d61..d2332a1c9bb91 100644 --- a/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto +++ b/generated_api_shadow/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto @@ -515,6 +515,7 @@ message HttpConnectionManager { // .. code-block:: yaml // // original_ip_detection_extensions: + // - name: envoy.http.original_ip_detection.xff // typed_config: // "@type": type.googleapis.com/envoy.extensions.http.original_ip_detection.xff.v3.XffConfig // xff_num_trusted_hops: 1 From de3f511b658e3a5d182177b49aca83d446896c3c Mon Sep 17 00:00:00 2001 From: Matt Klein Date: Fri, 23 Jul 2021 14:15:12 -0600 Subject: [PATCH 41/57] config: allow .yml files to load as YAML (#17455) This is a common source of user confusion. Signed-off-by: Matt Klein --- docs/root/version_history/current.rst | 2 + source/common/protobuf/utility.cc | 7 ++- source/common/protobuf/utility.h | 1 + test/common/protobuf/utility_test.cc | 59 ++++++++++++++++++- test/config/integration/BUILD | 2 +- ...ootstrap.yaml => server_xds.bootstrap.yml} | 0 .../dynamic_validation_integration_test.cc | 10 ++-- test/integration/xds_integration_test.cc | 4 +- test/test_common/environment.cc | 4 +- 9 files changed, 73 insertions(+), 16 deletions(-) rename test/config/integration/{server_xds.bootstrap.yaml => server_xds.bootstrap.yml} (100%) diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index c1081bb800819..9877d650b1233 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -9,6 +9,8 @@ Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* +* config: configuration files ending in .yml now load as YAML. +* config: configuration file extensions now ignore case when deciding the file type. E.g., .JSON file load as JSON. * config: reduced log level for "Unable to establish new stream" xDS logs to debug. The log level for "gRPC config stream closed" is now reduced to debug when the status is ``Ok`` or has been retriable (``DeadlineExceeded``, ``ResourceExhausted``, or ``Unavailable``) for less than 30 diff --git a/source/common/protobuf/utility.cc b/source/common/protobuf/utility.cc index d611a3a69318b..04f5907db2cb1 100644 --- a/source/common/protobuf/utility.cc +++ b/source/common/protobuf/utility.cc @@ -423,7 +423,7 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa Api::Api& api, bool do_boosting) { const std::string contents = api.fileSystem().fileReadToEnd(path); // If the filename ends with .pb, attempt to parse it as a binary proto. - if (absl::EndsWith(path, FileExtensions::get().ProtoBinary)) { + if (absl::EndsWithIgnoreCase(path, FileExtensions::get().ProtoBinary)) { // Attempt to parse the binary format. auto read_proto_binary = [&contents, &validation_visitor](Protobuf::Message& message, MessageVersion message_version) { @@ -459,7 +459,7 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa } // If the filename ends with .pb_text, attempt to parse it as a text proto. - if (absl::EndsWith(path, FileExtensions::get().ProtoText)) { + if (absl::EndsWithIgnoreCase(path, FileExtensions::get().ProtoText)) { auto read_proto_text = [&contents, &path](Protobuf::Message& message, MessageVersion message_version) { if (Protobuf::TextFormat::ParseFromString(contents, &message)) { @@ -482,7 +482,8 @@ void MessageUtil::loadFromFile(const std::string& path, Protobuf::Message& messa } return; } - if (absl::EndsWith(path, FileExtensions::get().Yaml)) { + if (absl::EndsWithIgnoreCase(path, FileExtensions::get().Yaml) || + absl::EndsWithIgnoreCase(path, FileExtensions::get().Yml)) { loadFromYaml(contents, message, validation_visitor, do_boosting); } else { loadFromJson(contents, message, validation_visitor, do_boosting); diff --git a/source/common/protobuf/utility.h b/source/common/protobuf/utility.h index 0ba43337a8344..7ccc41394eafd 100644 --- a/source/common/protobuf/utility.h +++ b/source/common/protobuf/utility.h @@ -237,6 +237,7 @@ class MessageUtil { const std::string ProtoText = ".pb_text"; const std::string Json = ".json"; const std::string Yaml = ".yaml"; + const std::string Yml = ".yml"; }; using FileExtensions = ConstSingleton; diff --git a/test/common/protobuf/utility_test.cc b/test/common/protobuf/utility_test.cc index 070b6cecb46b0..bbb9c9c8a9130 100644 --- a/test/common/protobuf/utility_test.cc +++ b/test/common/protobuf/utility_test.cc @@ -293,8 +293,9 @@ TEST_F(ProtobufUtilityTest, LoadBinaryProtoFromFile) { ->mutable_source_address() ->set_address("1.1.1.1"); + // Test mixed case extension. const std::string filename = - TestEnvironment::writeStringToFileForTest("proto.pb", bootstrap.SerializeAsString()); + TestEnvironment::writeStringToFileForTest("proto.pB", bootstrap.SerializeAsString()); envoy::config::bootstrap::v3::Bootstrap proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); @@ -302,6 +303,59 @@ TEST_F(ProtobufUtilityTest, LoadBinaryProtoFromFile) { EXPECT_TRUE(TestUtility::protoEqual(bootstrap, proto_from_file)); } +// Verify different YAML extensions using different cases. +TEST_F(ProtobufUtilityTest, YamlExtensions) { + const std::string bootstrap_yaml = R"EOF( +layered_runtime: + layers: + - name: static_layer + static_layer: + foo: true)EOF"; + + { + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.yAml", bootstrap_yaml); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + TestUtility::validate(proto_from_file); + } + { + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.yMl", bootstrap_yaml); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + TestUtility::validate(proto_from_file); + } +} + +// Verify different JSON extensions using different cases. +TEST_F(ProtobufUtilityTest, JsonExtensions) { + const std::string bootstrap_json = R"EOF( +{ + "layered_runtime": { + "layers": [ + { + "name": "static_layer", + "static_layer": { + "foo": true + } + } + ] + } +})EOF"; + + { + const std::string filename = + TestEnvironment::writeStringToFileForTest("proto.JSoN", bootstrap_json); + + envoy::config::bootstrap::v3::Bootstrap proto_from_file; + TestUtility::loadFromFile(filename, proto_from_file, *api_); + TestUtility::validate(proto_from_file); + } +} + // Verify that a config with a deprecated field can be loaded with runtime global override. TEST_F(ProtobufUtilityTest, DEPRECATED_FEATURE_TEST(LoadBinaryGlobalOverrideProtoFromFile)) { const std::string bootstrap_yaml = R"EOF( @@ -364,8 +418,9 @@ TEST_F(ProtobufUtilityTest, LoadTextProtoFromFile) { std::string bootstrap_text; ASSERT_TRUE(Protobuf::TextFormat::PrintToString(bootstrap, &bootstrap_text)); + // Test mixed case extension. const std::string filename = - TestEnvironment::writeStringToFileForTest("proto.pb_text", bootstrap_text); + TestEnvironment::writeStringToFileForTest("proto.pB_Text", bootstrap_text); envoy::config::bootstrap::v3::Bootstrap proto_from_file; TestUtility::loadFromFile(filename, proto_from_file, *api_); diff --git a/test/config/integration/BUILD b/test/config/integration/BUILD index 0c401ad01bb0a..94c9f35bd99d5 100644 --- a/test/config/integration/BUILD +++ b/test/config/integration/BUILD @@ -16,7 +16,7 @@ filegroup( name = "server_xds_files", srcs = [ "server_xds.bootstrap.udpa.yaml", - "server_xds.bootstrap.yaml", + "server_xds.bootstrap.yml", "server_xds.cds.with_unknown_field.yaml", "server_xds.cds.yaml", "server_xds.eds.ads_cluster.yaml", diff --git a/test/config/integration/server_xds.bootstrap.yaml b/test/config/integration/server_xds.bootstrap.yml similarity index 100% rename from test/config/integration/server_xds.bootstrap.yaml rename to test/config/integration/server_xds.bootstrap.yml diff --git a/test/integration/dynamic_validation_integration_test.cc b/test/integration/dynamic_validation_integration_test.cc index e6f7c5dddc53d..171b7bc22cf07 100644 --- a/test/integration/dynamic_validation_integration_test.cc +++ b/test/integration/dynamic_validation_integration_test.cc @@ -96,7 +96,7 @@ INSTANTIATE_TEST_SUITE_P( // Protocol options in CDS with unknown fields are rejected if and only if strict. TEST_P(DynamicValidationIntegrationTest, CdsProtocolOptionsRejected) { api_filesystem_config_ = { - "test/config/integration/server_xds.bootstrap.yaml", + "test/config/integration/server_xds.bootstrap.yml", "test/config/integration/server_xds.cds.with_unknown_field.yaml", "test/config/integration/server_xds.eds.yaml", "test/config/integration/server_xds.lds.yaml", @@ -122,7 +122,7 @@ TEST_P(DynamicValidationIntegrationTest, CdsProtocolOptionsRejected) { TEST_P(DynamicValidationIntegrationTest, LdsFilterRejected) { allow_lds_rejection_ = true; api_filesystem_config_ = { - "test/config/integration/server_xds.bootstrap.yaml", + "test/config/integration/server_xds.bootstrap.yml", "test/config/integration/server_xds.cds.yaml", "test/config/integration/server_xds.eds.yaml", "test/config/integration/server_xds.lds.with_unknown_field.yaml", @@ -153,7 +153,7 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejected) { TEST_P(DynamicValidationIntegrationTest, LdsFilterRejectedTypedStruct) { allow_lds_rejection_ = true; api_filesystem_config_ = { - "test/config/integration/server_xds.bootstrap.yaml", + "test/config/integration/server_xds.bootstrap.yml", "test/config/integration/server_xds.cds.yaml", "test/config/integration/server_xds.eds.yaml", "test/config/integration/server_xds.lds.with_unknown_field.typed_struct.yaml", @@ -182,7 +182,7 @@ TEST_P(DynamicValidationIntegrationTest, LdsFilterRejectedTypedStruct) { // Unknown fields in RDS cause config load failure if and only if strict. TEST_P(DynamicValidationIntegrationTest, RdsFailedBySubscription) { api_filesystem_config_ = { - "test/config/integration/server_xds.bootstrap.yaml", + "test/config/integration/server_xds.bootstrap.yml", "test/config/integration/server_xds.cds.yaml", "test/config/integration/server_xds.eds.yaml", "test/config/integration/server_xds.lds.yaml", @@ -210,7 +210,7 @@ TEST_P(DynamicValidationIntegrationTest, RdsFailedBySubscription) { // Unknown fields in EDS cause config load failure if and only if strict. TEST_P(DynamicValidationIntegrationTest, EdsFailedBySubscription) { api_filesystem_config_ = { - "test/config/integration/server_xds.bootstrap.yaml", + "test/config/integration/server_xds.bootstrap.yml", "test/config/integration/server_xds.cds.yaml", "test/config/integration/server_xds.eds.with_unknown_field.yaml", "test/config/integration/server_xds.lds.yaml", diff --git a/test/integration/xds_integration_test.cc b/test/integration/xds_integration_test.cc index 6a0cadb551b49..fe6a3cca164f4 100644 --- a/test/integration/xds_integration_test.cc +++ b/test/integration/xds_integration_test.cc @@ -27,7 +27,7 @@ class XdsIntegrationTest : public testing::TestWithParam Date: Fri, 23 Jul 2021 13:16:38 -0700 Subject: [PATCH 42/57] bazel: set PATH automatically on macOS (#17467) Previously there was a docs recommendation to set the path manually when building on macOS. Since we can use the `build:macos` config we can do this automatically. This also allows us to transparently solve PATH differences with Apple Silicon homebrew and intel homebrew, for the host configuration as well. This fixes this issue https://github.com/bazelbuild/rules_foreign_cc/issues/672 that intel homebrew didn't have because `--incompatible_strict_action_env` contains `/usr/local/bin` but not `/opt/homebrew/bin` This also reorders these so that we prefer pre-installed tools for stability. Signed-off-by: Keith Smiley --- .bazelrc | 2 ++ bazel/README.md | 11 ----------- ci/mac_ci_steps.sh | 1 - 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/.bazelrc b/.bazelrc index 2a9ede51a1aaa..2b2b0bad1f812 100644 --- a/.bazelrc +++ b/.bazelrc @@ -79,6 +79,8 @@ build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN build:macos --cxxopt=-std=c++17 +build:macos --action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin +build:macos --host_action_env=PATH=/usr/bin:/bin:/opt/homebrew/bin:/usr/local/bin:/opt/local/bin build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 diff --git a/bazel/README.md b/bazel/README.md index 790c69e70d965..b8c2b0a03f8df 100644 --- a/bazel/README.md +++ b/bazel/README.md @@ -113,17 +113,6 @@ for how to update or override dependencies. Envoy compiles and passes tests with the version of clang installed by Xcode 11.1: Apple clang version 11.0.0 (clang-1100.0.33.8). - In order for bazel to be aware of the tools installed by brew, the PATH - variable must be set for bazel builds. This can be accomplished by setting - this in your `user.bazelrc` file: - - ``` - build --action_env=PATH="/usr/local/bin:/opt/local/bin:/usr/bin:/bin" - ``` - - Alternatively, you can pass `--action_env` on the command line when running - `bazel build`/`bazel test`. - Having the binutils keg installed in Brew is known to cause issues due to putting an incompatible version of `ar` on the PATH, so if you run into issues building third party code like luajit consider uninstalling binutils. diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh index ce95312827d5d..844351d51d91e 100755 --- a/ci/mac_ci_steps.sh +++ b/ci/mac_ci_steps.sh @@ -25,7 +25,6 @@ BAZEL_BUILD_OPTIONS=( "--curses=no" --show_task_finish --verbose_failures - "--action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin" "--test_output=all" "--flaky_test_attempts=integration@2" "--override_repository=envoy_build_config=${BUILD_CONFIG}" From b4027cf67516e094505d3e7d3489b3941c740a5a Mon Sep 17 00:00:00 2001 From: Greg Brail Date: Mon, 26 Jul 2021 05:27:22 -0700 Subject: [PATCH 43/57] ext_proc: Update docs landing page (#17431) This clarifies the language on the documentation page for the ext_proc filter to clarify the implementation status and to link to the reference page where it is kept up to date. Signed-off-by: Gregory Brail --- .../http/http_filters/ext_proc_filter.rst | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/docs/root/configuration/http/http_filters/ext_proc_filter.rst b/docs/root/configuration/http/http_filters/ext_proc_filter.rst index 004e2744213a3..989413a878171 100644 --- a/docs/root/configuration/http/http_filters/ext_proc_filter.rst +++ b/docs/root/configuration/http/http_filters/ext_proc_filter.rst @@ -5,18 +5,28 @@ External Processing * :ref:`Http filter v3 API reference ` * This filter should be configured with the name *envoy.filters.http.ext_proc* -The external processing filter calls an external gRPC service to enable it to participate in -HTTP filter chain processing. The filter is called using a gRPC bidirectional stream, and allows -the filter to make decisions in real time about what parts of the HTTP request / response stream -are sent to the filter for processing. +The external processing filter connects an external service, called an "external processor," +to the filter chain. The processing service itself implements a gRPC interface that allows +it to respond to events in the lifecycle of an HTTP request / response by examining +and modifying the headers, body, and trailers of each message, or by returning a brand-new response. The protocol itself is based on a bidirectional gRPC stream. Envoy will send the -server +external processor :ref:`ProcessingRequest ` -messages, and the server must reply with -:ref:`ProcessingResponse `. - -This filter is a work in progress. In its current state, it actually does nothing. +messages, and the processor must reply with +:ref:`ProcessingResponse ` +messages. + +Configuration options are provided to control which events are sent to the processor. +This way, the processor may receive headers, body, and trailers for both +request and response in any combination. The processor may also change this configuration +on a message-by-message basis. This allows for the construction of sophisticated processors +that decide how to respond to each message individually to eliminate unnecessary +stream requests from the proxy. + +This filter is a work in progress. Most of the major bits of functionality +are complete. The updated list of supported features and implementation status may +be found on the :ref:`reference page `. Statistics ---------- From 507f5e6d27e5ed676b6c2e7ed794b27e0d9cc876 Mon Sep 17 00:00:00 2001 From: Le Yao Date: Mon, 26 Jul 2021 20:52:16 +0800 Subject: [PATCH 44/57] docs: Fix the confusing word in http3 upsteam documents (#17481) Signed-off-by: Le Yao --- source/docs/http3_upstream.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/docs/http3_upstream.md b/source/docs/http3_upstream.md index 456998e949ab4..f97103d1e3e44 100644 --- a/source/docs/http3_upstream.md +++ b/source/docs/http3_upstream.md @@ -26,7 +26,7 @@ HTTP/3 is marked as broken it will be for the initial 5 minutes. #### Alternate Protocols Cache The `AlternateProtocolsCache` is responsible for tracking servers which advertise HTTP/3. -These advertisements may arrive via HTTP Altnernate Service (alt-svc) or soon via the HTTPS +These advertisements may arrive via HTTP Alternative Service (alt-svc) or soon via the HTTPS DNS RR. Currently only advertisements which specify the same hostname and port are stored. #### Connectivity Grid From 63933a30aa079f1d5f0cdcbacd288bd6fabc1537 Mon Sep 17 00:00:00 2001 From: phlax Date: Mon, 26 Jul 2021 14:08:17 +0100 Subject: [PATCH 45/57] tooling: Cleanups and utils (#17477) Signed-off-by: Ryan Northey --- .github/dependabot.yml | 5 -- bazel/repositories_extra.bzl | 5 -- tools/base/BUILD | 7 +- tools/base/checker.py | 16 ++++- tools/base/requirements.txt | 31 +++++++++ tools/base/tests/test_checker.py | 39 +++++++++-- tools/base/tests/test_utils.py | 64 +++++++++++++++++-- tools/base/utils.py | 28 +++++++- tools/dependency/BUILD | 3 +- tools/dependency/pip_check.py | 7 +- tools/dependency/requirements.txt | 31 --------- tools/dependency/tests/test_pip_check.py | 15 ++--- tools/docs/BUILD | 4 +- .../docs/generate_extensions_security_rst.py | 6 +- tools/docs/requirements.txt | 31 --------- tools/docs/sphinx_runner.py | 8 +-- tools/docs/tests/test_sphinx_runner.py | 18 ++---- tools/extensions/BUILD | 3 +- tools/extensions/extensions_check.py | 7 +- tools/extensions/requirements.txt | 37 ----------- .../extensions/tests/test_extensions_check.py | 15 ++--- tools/protodoc/BUILD | 2 +- tools/protodoc/protodoc.py | 16 ++--- tools/protodoc/requirements.txt | 30 --------- 24 files changed, 203 insertions(+), 225 deletions(-) delete mode 100644 tools/extensions/requirements.txt diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 11e168f63a854..85f49c528d5d5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -61,11 +61,6 @@ updates: schedule: interval: "daily" -- package-ecosystem: "pip" - directory: "/tools/extensions" - schedule: - interval: "daily" - - package-ecosystem: "pip" directory: "/tools/gpg" schedule: diff --git a/bazel/repositories_extra.bzl b/bazel/repositories_extra.bzl index d7d0a869a0c95..e04a4ac41d7a5 100644 --- a/bazel/repositories_extra.bzl +++ b/bazel/repositories_extra.bzl @@ -57,11 +57,6 @@ def _python_deps(): requirements = "@envoy//tools/dependency:requirements.txt", extra_pip_args = ["--require-hashes"], ) - pip_install( - name = "extensions_pip3", - requirements = "@envoy//tools/extensions:requirements.txt", - extra_pip_args = ["--require-hashes"], - ) pip_install( name = "git_pip3", requirements = "@envoy//tools/git:requirements.txt", diff --git a/tools/base/BUILD b/tools/base/BUILD index a6a9934c7b83f..f768786837847 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -16,7 +16,12 @@ envoy_py_library( ], ) -envoy_py_library("tools.base.utils") +envoy_py_library( + "tools.base.utils", + deps = [ + requirement("pyyaml"), + ], +) envoy_py_library( "tools.base.checker", diff --git a/tools/base/checker.py b/tools/base/checker.py index c7b03b7742611..8a73fef52dfe2 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,8 +1,9 @@ import argparse import asyncio +import logging import os from functools import cached_property -from typing import Sequence, Tuple, Type +from typing import Optional, Sequence, Tuple, Type from tools.base import runner @@ -13,6 +14,7 @@ class Checker(runner.Runner): Check methods should call the `self.warn`, `self.error` or `self.succeed` depending upon the outcome of the checks. """ + _active_check: Optional[str] = None checks: Tuple[str, ...] = () def __init__(self, *args): @@ -21,6 +23,10 @@ def __init__(self, *args): self.errors = {} self.warnings = {} + @property + def active_check(self) -> Optional[str]: + return self._active_check + @property def diff(self) -> bool: """Flag to determine whether the checker should print diffs to the console""" @@ -170,6 +176,8 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: def error(self, name: str, errors: list, log: bool = True, log_type: str = "error") -> int: """Record (and log) errors for a check type""" + if not errors: + return 0 self.errors[name] = self.errors.get(name, []) self.errors[name].extend(errors) if not log: @@ -178,7 +186,9 @@ def error(self, name: str, errors: list, log: bool = True, log_type: str = "erro getattr(self.log, log_type)(f"[{name}] {message}") return 1 - def exit(self): + def exit(self) -> int: + self.log.handlers[0].setLevel(logging.FATAL) + self.stdout.handlers[0].setLevel(logging.FATAL) return self.error("exiting", ["Keyboard exit"], log_type="fatal") def get_checks(self) -> Sequence[str]: @@ -188,10 +198,12 @@ def get_checks(self) -> Sequence[str]: [check for check in self.args.check if check in self.checks]) def on_check_begin(self, check: str) -> None: + self._active_check = check self.log.notice(f"[{check}] Running check") def on_check_run(self, check: str) -> None: """Callback hook called after each check run""" + self._active_check = None if self.exiting: return elif check in self.errors: diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index f6de56917c002..f7f9ecb473a03 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -22,6 +22,37 @@ humanfriendly==9.2 \ # via # -r tools/base/requirements.txt # coloredlogs +pyyaml==5.4.1 \ + --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ + --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ + --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ + --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ + --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ + --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ + --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ + --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ + --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ + --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ + --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ + --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ + --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ + --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ + --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ + --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ + --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ + --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ + --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ + --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ + --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ + --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ + --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ + --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ + --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ + --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ + --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ + --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ + --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 + # via -r tools/distribution/requirements.txt verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 46fa8a152ca34..9b18a023187c6 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -1,3 +1,4 @@ +import logging from unittest.mock import MagicMock, patch, PropertyMock import pytest @@ -49,6 +50,9 @@ def test_checker_constructor(): == [('path1', 'path2', 'path3'), {}]) assert checker.summary_class == CheckerSummary + assert checker.active_check is None + assert "active_check" not in checker.__dict__ + def test_checker_diff(): checker = Checker("path1", "path2", "path3") @@ -352,20 +356,27 @@ def test_checker_add_arguments(patches): @pytest.mark.parametrize("log", [True, False]) @pytest.mark.parametrize("log_type", [None, "fatal"]) @pytest.mark.parametrize("errors", TEST_ERRORS) -def test_checker_error(log, log_type, errors): +@pytest.mark.parametrize("newerrors", [[], ["err1", "err2", "err3"]]) +def test_checker_error(log, log_type, errors, newerrors): checker = Checker("path1", "path2", "path3") log_mock = patch( "tools.base.checker.Checker.log", new_callable=PropertyMock) checker.errors = errors.copy() + result = 1 if newerrors else 0 with log_mock as m_log: if log_type: - assert checker.error("mycheck", ["err1", "err2", "err3"], log, log_type=log_type) == 1 + assert checker.error("mycheck", newerrors, log, log_type=log_type) == result else: - assert checker.error("mycheck", ["err1", "err2", "err3"], log) == 1 + assert checker.error("mycheck", newerrors, log) == result - assert checker.errors["mycheck"] == errors.get("mycheck", []) + ["err1", "err2", "err3"] + if not newerrors: + assert not m_log.called + assert "mycheck" not in checker.errors + return + + assert checker.errors["mycheck"] == errors.get("mycheck", []) + newerrors for k, v in errors.items(): if k != "mycheck": assert checker.errors[k] == v @@ -381,11 +392,25 @@ def test_checker_exit(patches): checker = Checker("path1", "path2", "path3") patched = patches( "Checker.error", + ("Checker.log", dict(new_callable=PropertyMock)), + ("Checker.stdout", dict(new_callable=PropertyMock)), prefix="tools.base.checker") - with patched as (m_error, ): + with patched as (m_error, m_log, m_stdout): assert checker.exit() == m_error.return_value + assert ( + list(m_log.return_value.handlers.__getitem__.call_args) + == [(0,), {}]) + assert ( + list(m_log.return_value.handlers.__getitem__.return_value.setLevel.call_args) + == [(logging.FATAL,), {}]) + assert ( + list(m_stdout.return_value.handlers.__getitem__.call_args) + == [(0,), {}]) + assert ( + list(m_stdout.return_value.handlers.__getitem__.return_value.setLevel.call_args) + == [(logging.FATAL,), {}]) assert ( list(m_error.call_args) == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) @@ -427,6 +452,7 @@ def test_checker_on_check_begin(patches): with patched as (m_log, ): assert not checker.on_check_begin("checkname") + assert checker.active_check == "checkname" assert ( list(m_log.return_value.notice.call_args) == [('[checkname] Running check',), {}]) @@ -445,11 +471,14 @@ def test_checker_on_check_run(patches, errors, warnings, exiting): check = "CHECK1" checker.errors = errors checker.warnings = warnings + checker._active_check = check with patched as (m_exit, m_log): m_exit.return_value = exiting assert not checker.on_check_run(check) + assert checker.active_check is None + if exiting: assert not m_log.called return diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py index 43b14f33a59de..dc403ec999eb4 100644 --- a/tools/base/tests/test_utils.py +++ b/tools/base/tests/test_utils.py @@ -119,22 +119,76 @@ def test_util_coverage_with_data_file(patches): == [(m_open.return_value.__enter__.return_value,), {}]) -def test_util_untar(patches): +def test_util_extract(patches): patched = patches( "tempfile.TemporaryDirectory", "tarfile.open", prefix="tools.base.utils") with patched as (m_tmp, m_open): + assert utils.extract("TARBALL", "PATH") == "PATH" + + assert ( + list(m_open.call_args) + == [('TARBALL',), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.extractall.call_args) + == [(), {'path': "PATH"}]) + + +def test_util_untar(patches): + patched = patches( + "tempfile.TemporaryDirectory", + "extract", + prefix="tools.base.utils") + + with patched as (m_tmp, m_extract): with utils.untar("PATH") as tmpdir: - assert tmpdir == m_tmp.return_value.__enter__.return_value + assert tmpdir == m_extract.return_value assert ( list(m_tmp.call_args) == [(), {}]) + assert ( + list(m_extract.call_args) + == [('PATH', m_tmp.return_value.__enter__.return_value), {}]) + + +def test_util_from_yaml(patches): + patched = patches( + "open", + "yaml", + prefix="tools.base.utils") + + with patched as (m_open, m_yaml): + assert utils.from_yaml("PATH") == m_yaml.safe_load.return_value + assert ( list(m_open.call_args) - == [('PATH',), {}]) + == [("PATH", ), {}]) assert ( - list(m_open.return_value.__enter__.return_value.extractall.call_args) - == [(), {'path': tmpdir}]) + list(m_yaml.safe_load.call_args) + == [(m_open.return_value.__enter__.return_value.read.return_value, ), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.read.call_args) + == [(), {}]) + + +def test_util_to_yaml(patches): + patched = patches( + "open", + "yaml", + prefix="tools.base.utils") + + with patched as (m_open, m_yaml): + assert utils.to_yaml("DATA", "PATH") == "PATH" + + assert ( + list(m_open.call_args) + == [("PATH", "w"), {}]) + assert ( + list(m_yaml.dump.call_args) + == [("DATA", ), {}]) + assert ( + list(m_open.return_value.__enter__.return_value.write.call_args) + == [(m_yaml.dump.return_value, ), {}]) diff --git a/tools/base/utils.py b/tools/base/utils.py index 3cf7409615896..379e8f4326333 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -10,6 +10,8 @@ from contextlib import ExitStack, contextmanager, redirect_stderr, redirect_stdout from typing import Callable, Iterator, List, Optional, Union +import yaml + # this is testing specific - consider moving to tools.testing.utils @contextmanager @@ -72,6 +74,12 @@ def buffered( stderr.extend(mangle(_stderr.read().strip().split("\n"))) +def extract(tarball: str, path: str) -> str: + with tarfile.open(tarball) as tarfiles: + tarfiles.extractall(path=path) + return path + + @contextmanager def untar(tarball: str) -> Iterator[str]: """Untar a tarball into a temporary directory @@ -94,6 +102,20 @@ def untar(tarball: str) -> Iterator[str]: """ with tempfile.TemporaryDirectory() as tmpdir: - with tarfile.open(tarball) as tarfiles: - tarfiles.extractall(path=tmpdir) - yield tmpdir + yield extract(tarball, tmpdir) + + +def from_yaml(path: str) -> Union[dict, list, str, int]: + """Returns the loaded python object from a yaml file given by `path`""" + with open(path) as f: + return yaml.safe_load(f.read()) + + +def to_yaml(data: Union[dict, list, str, int], path: str) -> str: + """For given `data` dumps as yaml to provided `path`. + + Returns `path` + """ + with open(path, "w") as f: + f.write(yaml.dump(data)) + return path diff --git a/tools/dependency/BUILD b/tools/dependency/BUILD index 2e9942d3c2f83..f8945ed0136cc 100644 --- a/tools/dependency/BUILD +++ b/tools/dependency/BUILD @@ -1,5 +1,4 @@ load("@rules_python//python:defs.bzl", "py_binary", "py_library") -load("@deps_pip3//:requirements.bzl", "requirement") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_py_binary") @@ -46,6 +45,6 @@ envoy_py_binary( name = "tools.dependency.pip_check", deps = [ "//tools/base:checker", - requirement("PyYaml"), + "//tools/base:utils", ], ) diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index c7165778cbf12..c924c44936549 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -15,9 +15,7 @@ import sys from functools import cached_property -import yaml - -from tools.base import checker +from tools.base import checker, utils DEPENDABOT_CONFIG = ".github/dependabot.yml" REQUIREMENTS_FILENAME = "requirements.txt" @@ -43,8 +41,7 @@ def config_requirements(self) -> set: @cached_property def dependabot_config(self) -> dict: """Parsed dependabot config""" - with open(os.path.join(self.path, self.dependabot_config_path)) as f: - return yaml.safe_load(f.read()) + return utils.from_yaml(os.path.join(self.path, self.dependabot_config_path)) @property def dependabot_config_path(self) -> str: diff --git a/tools/dependency/requirements.txt b/tools/dependency/requirements.txt index 9c8d43a9e9343..978eb224c37f9 100644 --- a/tools/dependency/requirements.txt +++ b/tools/dependency/requirements.txt @@ -121,37 +121,6 @@ pyparsing==2.4.7 \ # via # -r tools/dependency/requirements.txt # packaging -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/dependency/requirements.txt requests==2.25.1 \ --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py index 0e659676d8965..1fa7171573015 100644 --- a/tools/dependency/tests/test_pip_check.py +++ b/tools/dependency/tests/test_pip_check.py @@ -34,26 +34,19 @@ def test_pip_checker_config_requirements(): def test_pip_checker_dependabot_config(patches): checker = pip_check.PipChecker("path1", "path2", "path3") patched = patches( - "open", - "yaml.safe_load", + "utils", ("PipChecker.path", dict(new_callable=PropertyMock)), "os.path.join", prefix="tools.dependency.pip_check") - with patched as (m_open, m_yaml, m_path, m_join): - assert checker.dependabot_config == m_yaml.return_value + with patched as (m_utils, m_path, m_join): + assert checker.dependabot_config == m_utils.from_yaml.return_value assert ( list(m_join.call_args) == [(m_path.return_value, checker._dependabot_config), {}]) assert ( - list(m_yaml.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value,), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args,) - == [(), {}]) - assert ( - list(m_open.call_args) + list(m_utils.from_yaml.call_args) == [(m_join.return_value,), {}]) diff --git a/tools/docs/BUILD b/tools/docs/BUILD index db47c9f37d362..39049acc40cfa 100644 --- a/tools/docs/BUILD +++ b/tools/docs/BUILD @@ -12,7 +12,7 @@ py_binary( srcs = ["generate_extensions_security_rst.py"], data = ["//source/extensions:extensions_metadata.yaml"], deps = [ - requirement("pyyaml"), + "//tools/base:utils", ], ) @@ -38,6 +38,7 @@ envoy_py_binary( name = "tools.docs.sphinx_runner", deps = [ "//tools/base:runner", + "//tools/base:utils", requirement("alabaster"), requirement("Babel"), requirement("certifi"), @@ -54,7 +55,6 @@ envoy_py_binary( requirement("Pygments"), requirement("pyparsing"), requirement("pytz"), - requirement("pyyaml"), requirement("requests"), requirement("setuptools"), requirement("six"), diff --git a/tools/docs/generate_extensions_security_rst.py b/tools/docs/generate_extensions_security_rst.py index 4c3f649b18a8b..2dc6d8e84ee29 100644 --- a/tools/docs/generate_extensions_security_rst.py +++ b/tools/docs/generate_extensions_security_rst.py @@ -8,7 +8,7 @@ import sys import tarfile -import yaml +from tools.base import utils def format_item(extension, metadata): @@ -26,9 +26,7 @@ def main(): output_filename = sys.argv[2] generated_rst_dir = os.path.dirname(output_filename) security_rst_root = os.path.join(generated_rst_dir, "intro/arch_overview/security") - - with open(metadata_filepath) as f: - extension_db = yaml.safe_load(f.read()) + extension_db = utils.from_yaml(metadata_filepath) pathlib.Path(security_rst_root).mkdir(parents=True, exist_ok=True) diff --git a/tools/docs/requirements.txt b/tools/docs/requirements.txt index d322bbb1e62a4..5b0251375c9db 100644 --- a/tools/docs/requirements.txt +++ b/tools/docs/requirements.txt @@ -131,37 +131,6 @@ pytz==2021.1 \ # via # -r tools/docs/requirements.txt # babel -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/docs/requirements.txt requests==2.25.1 \ --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \ --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py index 4476b6ae3a5d0..2550bd7c57299 100644 --- a/tools/docs/sphinx_runner.py +++ b/tools/docs/sphinx_runner.py @@ -7,13 +7,11 @@ import tempfile from functools import cached_property -import yaml - from colorama import Fore, Style from sphinx.cmd.build import main as sphinx_build -from tools.base import runner +from tools.base import runner, utils class SphinxBuildError(Exception): @@ -51,9 +49,7 @@ def colors(self) -> dict: @cached_property def config_file(self) -> str: """Populates a config file with self.configs and returns the file path""" - with open(self.config_file_path, "w") as f: - f.write(yaml.dump(self.configs)) - return self.config_file_path + return utils.to_yaml(self.configs, self.config_file_path) @property def config_file_path(self) -> str: diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py index 527b7ef44c12b..f85ddd4004342 100644 --- a/tools/docs/tests/test_sphinx_runner.py +++ b/tools/docs/tests/test_sphinx_runner.py @@ -68,27 +68,19 @@ def test_sphinx_runner_colors(patches): def test_sphinx_runner_config_file(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "open", - "yaml", + "utils", ("SphinxRunner.config_file_path", dict(new_callable=PropertyMock)), ("SphinxRunner.configs", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_open, m_yaml, m_fpath, m_configs): + with patched as (m_utils, m_fpath, m_configs): assert ( runner.config_file - == m_fpath.return_value) + == m_utils.to_yaml.return_value) assert ( - list(m_open.call_args) - == [(m_fpath.return_value, 'w'), {}]) - assert ( - list(m_yaml.dump.call_args) - == [(m_configs.return_value,), {}]) - assert ( - m_open.return_value.__enter__.return_value.write.call_args - == [(m_yaml.dump.return_value,), {}]) - + list(m_utils.to_yaml.call_args) + == [(m_configs.return_value, m_fpath.return_value), {}]) assert "config_file" in runner.__dict__ diff --git a/tools/extensions/BUILD b/tools/extensions/BUILD index 5afa137c1c6f0..b3f2dcbecaddb 100644 --- a/tools/extensions/BUILD +++ b/tools/extensions/BUILD @@ -1,5 +1,4 @@ load("//bazel:envoy_build_system.bzl", "envoy_package") -load("@extensions_pip3//:requirements.bzl", "requirement") load("//source/extensions:all_extensions.bzl", "envoy_all_extensions") load("//tools/base:envoy_python.bzl", "envoy_py_binary") @@ -16,6 +15,6 @@ envoy_py_binary( ] + envoy_all_extensions(), deps = [ "//tools/base:checker", - requirement("pyyaml"), + "//tools/base:utils", ], ) diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index 7f12c3e3de46f..c9cddf61a35e6 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -10,9 +10,7 @@ from importlib.machinery import SourceFileLoader from typing import Iterator -import yaml - -from tools.base import checker +from tools.base import checker, utils BUILD_CONFIG_PATH = "source/extensions/extensions_build_config.bzl" @@ -103,8 +101,7 @@ def fuzzed_count(self) -> int: @cached_property def metadata(self) -> dict: - with open(METADATA_PATH) as f: - return yaml.safe_load(f.read()) + return utils.from_yaml(METADATA_PATH) @property def robust_to_downstream_count(self) -> int: diff --git a/tools/extensions/requirements.txt b/tools/extensions/requirements.txt deleted file mode 100644 index 910e1e3519fe0..0000000000000 --- a/tools/extensions/requirements.txt +++ /dev/null @@ -1,37 +0,0 @@ -# -# This file is autogenerated by pip-compile -# To update, run: -# -# pip-compile --generate-hashes tools/extensions/requirements.txt -# -pyyaml==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/extensions/requirements.txt diff --git a/tools/extensions/tests/test_extensions_check.py b/tools/extensions/tests/test_extensions_check.py index 95ccd9f2aa128..ed1ede98d5af9 100644 --- a/tools/extensions/tests/test_extensions_check.py +++ b/tools/extensions/tests/test_extensions_check.py @@ -91,24 +91,17 @@ def test_extensions_fuzzed_count(patches): def test_extensions_metadata(patches): checker = extensions_check.ExtensionsChecker() patched = patches( - "open", - "yaml", + "utils", prefix="tools.extensions.extensions_check") - with patched as (m_open, m_yaml): + with patched as (m_utils, ): assert ( checker.metadata - == m_yaml.safe_load.return_value) + == m_utils.from_yaml.return_value) assert ( - list(m_open.call_args) + list(m_utils.from_yaml.call_args) == [(extensions_check.METADATA_PATH,), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) - == [(), {}]) - assert ( - list(m_yaml.safe_load.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value,), {}]) assert "metadata" in checker.__dict__ diff --git a/tools/protodoc/BUILD b/tools/protodoc/BUILD index b6cb29bccef13..8ac4ca1c43d60 100644 --- a/tools/protodoc/BUILD +++ b/tools/protodoc/BUILD @@ -31,12 +31,12 @@ py_binary( deps = [ ":manifest_proto_py_proto", "//tools/api_proto_plugin", + "//tools/base:utils", "//tools/config_validation:validate_fragment", "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_github_cncf_udpa//udpa/annotations:pkg_py_proto", "@com_google_protobuf//:protobuf_python", requirement("Jinja2"), - requirement("PyYAML"), ], ) diff --git a/tools/protodoc/protodoc.py b/tools/protodoc/protodoc.py index 2e6c5296bb1be..a95efe31b4b46 100755 --- a/tools/protodoc/protodoc.py +++ b/tools/protodoc/protodoc.py @@ -24,6 +24,7 @@ from tools.api_proto_plugin import annotations from tools.api_proto_plugin import plugin from tools.api_proto_plugin import visitor +from tools.base import utils from tools.config_validation import validate_fragment from tools.protodoc import manifest_pb2 @@ -115,8 +116,7 @@ r = runfiles.Create() -with open(r.Rlocation("envoy/source/extensions/extensions_metadata.yaml")) as f: - EXTENSION_DB = yaml.safe_load(f.read()) +EXTENSION_DB = utils.from_yaml(r.Rlocation("envoy/source/extensions/extensions_metadata.yaml")) # create an index of extension categories from extension db EXTENSION_CATEGORIES = {} @@ -666,12 +666,12 @@ def __init__(self): with open(r.Rlocation('envoy/docs/v2_mapping.json'), 'r') as f: self.v2_mapping = json.load(f) - with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f: - # Load as YAML, emit as JSON and then parse as proto to provide type - # checking. - protodoc_manifest_untyped = yaml.safe_load(f.read()) - self.protodoc_manifest = manifest_pb2.Manifest() - json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) + # Load as YAML, emit as JSON and then parse as proto to provide type + # checking. + protodoc_manifest_untyped = utils.from_yaml( + r.Rlocation('envoy/docs/protodoc_manifest.yaml')) + self.protodoc_manifest = manifest_pb2.Manifest() + json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest) def visit_enum(self, enum_proto, type_context): normal_enum_type = normalize_type_context_name(type_context.name) diff --git a/tools/protodoc/requirements.txt b/tools/protodoc/requirements.txt index 6540ec1621645..1cd69909b9962 100644 --- a/tools/protodoc/requirements.txt +++ b/tools/protodoc/requirements.txt @@ -1,36 +1,6 @@ Jinja2==3.0.1 \ --hash=sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4 \ --hash=sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4 -PyYAML==5.4.1 \ - --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \ - --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \ - --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \ - --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \ - --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \ - --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \ - --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \ - --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \ - --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \ - --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \ - --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \ - --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \ - --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \ - --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \ - --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \ - --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \ - --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \ - --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \ - --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \ - --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \ - --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \ - --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \ - --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \ - --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \ - --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \ - --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \ - --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ - --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ - --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 MarkupSafe==2.0.1 \ --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \ --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \ From 878393e267f7afeba6c6e644468c2720ad11e828 Mon Sep 17 00:00:00 2001 From: Long Dai Date: Mon, 26 Jul 2021 22:11:39 +0800 Subject: [PATCH 46/57] io_error: cleanup TODO by renaming member variable (#17060) Risk Level: low Testing: n/a Docs Changes: n/a Release Notes: n/a Signed-off-by: Long Dai --- envoy/api/io_error.h | 14 +-- envoy/api/os_sys_calls_common.h | 2 +- .../access_log/access_log_manager_impl.cc | 14 +-- source/common/api/win32/os_sys_calls_impl.cc | 22 ++-- source/common/event/win32/signal_impl.cc | 6 +- .../posix/directory_iterator_impl.cc | 2 +- .../filesystem/posix/filesystem_impl.cc | 11 +- .../filesystem/win32/filesystem_impl.cc | 2 +- .../common/filesystem/win32/watcher_impl.cc | 10 +- .../formatter/substitution_formatter.cc | 2 +- source/common/network/connection_impl.cc | 21 ++-- .../common/network/io_socket_handle_impl.cc | 37 +++---- source/common/network/io_socket_handle_impl.h | 4 +- source/common/network/listen_socket_impl.cc | 2 +- source/common/network/listen_socket_impl.h | 2 +- source/common/network/raw_buffer_socket.cc | 10 +- source/common/network/socket_impl.cc | 6 +- .../common/network/socket_interface_impl.cc | 17 +-- source/common/network/socket_option_impl.cc | 2 +- source/common/network/udp_listener_impl.cc | 4 +- source/common/network/utility.cc | 28 ++--- .../win32_redirect_records_option_impl.cc | 2 +- .../common/quic/envoy_quic_packet_writer.cc | 2 +- source/exe/win32/platform_impl.cc | 2 +- source/exe/win32/service_base.cc | 2 +- .../listener/http_inspector/http_inspector.cc | 4 +- .../listener/original_dst/original_dst.cc | 2 +- .../listener/proxy_protocol/proxy_protocol.cc | 20 ++-- .../listener/tls_inspector/tls_inspector.cc | 8 +- .../extensions/tracers/xray/daemon_broker.cc | 2 +- .../proxy_protocol/proxy_protocol.cc | 4 +- .../transport_sockets/tls/io_handle_bio.cc | 4 +- source/server/hot_restart_impl.cc | 10 +- source/server/hot_restarting_base.cc | 6 +- source/server/options_impl_platform_linux.cc | 2 +- test/common/buffer/buffer_fuzz.cc | 19 ++-- test/common/buffer/owned_impl_test.cc | 46 ++++---- test/common/buffer/watermark_buffer_test.cc | 6 +- test/common/event/dispatcher_impl_test.cc | 2 +- test/common/event/file_event_impl_test.cc | 16 +-- .../common/filesystem/filesystem_impl_test.cc | 52 ++++----- test/common/network/address_impl_test.cc | 30 +++--- .../io_socket_handle_impl_integration_test.cc | 2 +- .../common/network/listen_socket_impl_test.cc | 7 +- .../common/network/socket_option_impl_test.cc | 2 +- test/common/network/socket_option_test.h | 6 +- .../udp_listener_impl_batch_writer_test.cc | 10 +- test/common/network/udp_listener_impl_test.cc | 8 +- test/common/quic/active_quic_listener_test.cc | 4 +- .../quic/platform/quic_test_output_impl.cc | 4 +- .../listener/common/fuzz/fuzzed_input_test.cc | 14 +-- .../proxy_protocol/proxy_protocol_test.cc | 17 +-- .../filters/udp/dns_filter/dns_filter_test.cc | 2 +- .../udp/udp_proxy/udp_proxy_filter_test.cc | 2 +- .../user_space/io_handle_impl_test.cc | 102 +++++++++--------- .../common/statsd/udp_statsd_test.cc | 2 +- test/integration/fake_upstream.cc | 4 +- .../filters/test_socket_interface.cc | 12 +-- test/integration/overload_integration_test.cc | 3 +- test/integration/uds_integration_test.cc | 2 +- test/mocks/filesystem/mocks.cc | 4 +- test/server/listener_manager_impl_test.cc | 6 +- test/server/server_test.cc | 2 +- test/test_common/environment.cc | 4 +- test/test_common/network_utility.cc | 10 +- test/test_common/network_utility.h | 2 +- 66 files changed, 358 insertions(+), 332 deletions(-) diff --git a/envoy/api/io_error.h b/envoy/api/io_error.h index ae03b17dad865..ebb4f99b79854 100644 --- a/envoy/api/io_error.h +++ b/envoy/api/io_error.h @@ -50,20 +50,21 @@ using IoErrorPtr = std::unique_ptr; /** * Basic type for return result which has a return code and error code defined * according to different implementations. - * If the call succeeds, ok() should return true and |rc_| is valid. Otherwise |err_| + * If the call succeeds, ok() should return true and |return_value_| is valid. Otherwise |err_| * can be passed into IoError::getErrorCode() to extract the error. In this - * case, |rc_| is invalid. + * case, |return_value_| is invalid. */ template struct IoCallResult { - IoCallResult(ReturnValue rc, IoErrorPtr err) : rc_(rc), err_(std::move(err)) {} + IoCallResult(ReturnValue return_value, IoErrorPtr err) + : return_value_(return_value), err_(std::move(err)) {} IoCallResult(IoCallResult&& result) noexcept - : rc_(result.rc_), err_(std::move(result.err_)) {} + : return_value_(result.return_value_), err_(std::move(result.err_)) {} virtual ~IoCallResult() = default; IoCallResult& operator=(IoCallResult&& result) noexcept { - rc_ = result.rc_; + return_value_ = result.return_value_; err_ = std::move(result.err_); return *this; } @@ -79,8 +80,7 @@ template struct IoCallResult { */ bool wouldBlock() const { return !ok() && err_->getErrorCode() == IoError::IoErrorCode::Again; } - // TODO(danzh): rename it to be more meaningful, i.e. return_value_. - ReturnValue rc_; + ReturnValue return_value_; IoErrorPtr err_; }; diff --git a/envoy/api/os_sys_calls_common.h b/envoy/api/os_sys_calls_common.h index c59469e53e6f4..e26bc125e3109 100644 --- a/envoy/api/os_sys_calls_common.h +++ b/envoy/api/os_sys_calls_common.h @@ -15,7 +15,7 @@ template struct SysCallResult { /** * The return code from the system call. */ - T rc_; + T return_value_; /** * The errno value as captured after the system call. diff --git a/source/common/access_log/access_log_manager_impl.cc b/source/common/access_log/access_log_manager_impl.cc index 9c5f83a189e74..5d50de4d74a5f 100644 --- a/source/common/access_log/access_log_manager_impl.cc +++ b/source/common/access_log/access_log_manager_impl.cc @@ -53,7 +53,7 @@ AccessLogFileImpl::AccessLogFileImpl(Filesystem::FilePtr&& file, Event::Dispatch thread_factory_(thread_factory), flush_interval_msec_(flush_interval_msec), stats_(stats) { flush_timer_->enableTimer(flush_interval_msec_); auto open_result = open(); - if (!open_result.rc_) { + if (!open_result.return_value_) { throw EnvoyException(fmt::format("unable to open file '{}': {}", file_->path(), open_result.err_->getErrorDetails())); } @@ -91,8 +91,8 @@ AccessLogFileImpl::~AccessLogFileImpl() { doWrite(flush_buffer_); } const Api::IoCallBoolResult result = file_->close(); - ASSERT(result.rc_, fmt::format("unable to close file '{}': {}", file_->path(), - result.err_->getErrorDetails())); + ASSERT(result.return_value_, fmt::format("unable to close file '{}': {}", file_->path(), + result.err_->getErrorDetails())); } } @@ -112,7 +112,7 @@ void AccessLogFileImpl::doWrite(Buffer::Instance& buffer) { for (const Buffer::RawSlice& slice : slices) { absl::string_view data(static_cast(slice.mem_), slice.len_); const Api::IoCallSizeResult result = file_->write(data); - if (result.ok() && result.rc_ == static_cast(slice.len_)) { + if (result.ok() && result.return_value_ == static_cast(slice.len_)) { stats_.write_completed_.inc(); } else { // Probably disk full. @@ -154,10 +154,10 @@ void AccessLogFileImpl::flushThreadFunc() { if (reopen_file_) { reopen_file_ = false; const Api::IoCallBoolResult result = file_->close(); - ASSERT(result.rc_, fmt::format("unable to close file '{}': {}", file_->path(), - result.err_->getErrorDetails())); + ASSERT(result.return_value_, fmt::format("unable to close file '{}': {}", file_->path(), + result.err_->getErrorDetails())); const Api::IoCallBoolResult open_result = open(); - if (!open_result.rc_) { + if (!open_result.return_value_) { stats_.reopen_failed_.inc(); return; } diff --git a/source/common/api/win32/os_sys_calls_impl.cc b/source/common/api/win32/os_sys_calls_impl.cc index d534e5e07cd02..3766c54a2100c 100644 --- a/source/common/api/win32/os_sys_calls_impl.cc +++ b/source/common/api/win32/os_sys_calls_impl.cc @@ -279,11 +279,11 @@ SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, sv[0] = sv[1] = INVALID_SOCKET; SysCallSocketResult socket_result = socket(domain, type, protocol); - if (SOCKET_INVALID(socket_result.rc_)) { + if (SOCKET_INVALID(socket_result.return_value_)) { return {SOCKET_ERROR, socket_result.errno_}; } - os_fd_t listener = socket_result.rc_; + os_fd_t listener = socket_result.return_value_; typedef union { struct sockaddr_storage sa; @@ -313,44 +313,44 @@ SysCallIntResult OsSysCallsImpl::socketpair(int domain, int type, int protocol, }; SysCallIntResult int_result = bind(listener, reinterpret_cast(&a), sa_size); - if (int_result.rc_ == SOCKET_ERROR) { + if (int_result.return_value_ == SOCKET_ERROR) { onErr(); return int_result; } int_result = listen(listener, 1); - if (int_result.rc_ == SOCKET_ERROR) { + if (int_result.return_value_ == SOCKET_ERROR) { onErr(); return int_result; } socket_result = socket(domain, type, protocol); - if (SOCKET_INVALID(socket_result.rc_)) { + if (SOCKET_INVALID(socket_result.return_value_)) { onErr(); return {SOCKET_ERROR, socket_result.errno_}; } - sv[0] = socket_result.rc_; + sv[0] = socket_result.return_value_; a = {}; int_result = getsockname(listener, reinterpret_cast(&a), &sa_size); - if (int_result.rc_ == SOCKET_ERROR) { + if (int_result.return_value_ == SOCKET_ERROR) { onErr(); return int_result; } int_result = connect(sv[0], reinterpret_cast(&a), sa_size); - if (int_result.rc_ == SOCKET_ERROR) { + if (int_result.return_value_ == SOCKET_ERROR) { onErr(); return int_result; } - socket_result.rc_ = ::accept(listener, nullptr, nullptr); - if (SOCKET_INVALID(socket_result.rc_)) { + socket_result.return_value_ = ::accept(listener, nullptr, nullptr); + if (SOCKET_INVALID(socket_result.return_value_)) { socket_result.errno_ = ::WSAGetLastError(); onErr(); return {SOCKET_ERROR, socket_result.errno_}; } - sv[1] = socket_result.rc_; + sv[1] = socket_result.return_value_; ::closesocket(listener); return {0, 0}; diff --git a/source/common/event/win32/signal_impl.cc b/source/common/event/win32/signal_impl.cc index d2a96b0de6fe3..0466ee6b732f0 100644 --- a/source/common/event/win32/signal_impl.cc +++ b/source/common/event/win32/signal_impl.cc @@ -21,14 +21,14 @@ SignalEventImpl::SignalEventImpl(DispatcherImpl& dispatcher, signal_t signal_num os_fd_t socks[2]; Api::SysCallIntResult result = Api::OsSysCallsSingleton::get().socketpair(AF_INET, SOCK_STREAM, IPPROTO_TCP, socks); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); read_handle_ = std::make_unique(socks[0], false, AF_INET); result = read_handle_->setBlocking(false); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); auto write_handle = std::make_shared(socks[1], false, AF_INET); result = write_handle->setBlocking(false); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); read_handle_->initializeFileEvent( dispatcher, diff --git a/source/common/filesystem/posix/directory_iterator_impl.cc b/source/common/filesystem/posix/directory_iterator_impl.cc index 65c1fe2bd7dfe..eb760c76cfdfd 100644 --- a/source/common/filesystem/posix/directory_iterator_impl.cc +++ b/source/common/filesystem/posix/directory_iterator_impl.cc @@ -55,7 +55,7 @@ FileType DirectoryIteratorImpl::fileType(const std::string& full_path, struct stat stat_buf; const Api::SysCallIntResult result = os_sys_calls.stat(full_path.c_str(), &stat_buf); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { if (errno == ENOENT) { // Special case. This directory entity is likely to be a symlink, // but the reference is broken as the target could not be stat()'ed. diff --git a/source/common/filesystem/posix/filesystem_impl.cc b/source/common/filesystem/posix/filesystem_impl.cc index c89cacd6dafb4..95e103c7f3c74 100644 --- a/source/common/filesystem/posix/filesystem_impl.cc +++ b/source/common/filesystem/posix/filesystem_impl.cc @@ -25,8 +25,9 @@ namespace Filesystem { FileImplPosix::~FileImplPosix() { if (isOpen()) { + // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall) const Api::IoCallBoolResult result = close(); - ASSERT(result.rc_); + ASSERT(result.return_value_); } } @@ -152,7 +153,7 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { } const Api::SysCallStringResult canonical_path = canonicalPath(path); - if (canonical_path.rc_.empty()) { + if (canonical_path.return_value_.empty()) { ENVOY_LOG_MISC(debug, "Unable to determine canonical path for {}: {}", path, errorDetails(canonical_path.errno_)); return true; @@ -163,9 +164,9 @@ bool InstanceImplPosix::illegalPath(const std::string& path) { // platform in the future, growing these or relaxing some constraints (e.g. // there are valid reasons to go via /proc for file paths). // TODO(htuch): Optimize this as a hash lookup if we grow any further. - if (absl::StartsWith(canonical_path.rc_, "/dev") || - absl::StartsWith(canonical_path.rc_, "/sys") || - absl::StartsWith(canonical_path.rc_, "/proc")) { + if (absl::StartsWith(canonical_path.return_value_, "/dev") || + absl::StartsWith(canonical_path.return_value_, "/sys") || + absl::StartsWith(canonical_path.return_value_, "/proc")) { return true; } return false; diff --git a/source/common/filesystem/win32/filesystem_impl.cc b/source/common/filesystem/win32/filesystem_impl.cc index 9ec504375951d..0be9c219ef6b9 100644 --- a/source/common/filesystem/win32/filesystem_impl.cc +++ b/source/common/filesystem/win32/filesystem_impl.cc @@ -22,7 +22,7 @@ namespace Filesystem { FileImplWin32::~FileImplWin32() { if (isOpen()) { const Api::IoCallBoolResult result = close(); - ASSERT(result.rc_); + ASSERT(result.return_value_); } } diff --git a/source/common/filesystem/win32/watcher_impl.cc b/source/common/filesystem/win32/watcher_impl.cc index e4a3dd7e87f9e..ed89d98ceba95 100644 --- a/source/common/filesystem/win32/watcher_impl.cc +++ b/source/common/filesystem/win32/watcher_impl.cc @@ -11,14 +11,14 @@ WatcherImpl::WatcherImpl(Event::Dispatcher& dispatcher, Api::Api& api) : api_(api), os_sys_calls_(Api::OsSysCallsSingleton::get()) { os_fd_t socks[2]; Api::SysCallIntResult result = os_sys_calls_.socketpair(AF_INET, SOCK_STREAM, IPPROTO_TCP, socks); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); read_handle_ = std::make_unique(socks[0], false, AF_INET); result = read_handle_->setBlocking(false); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); write_handle_ = std::make_unique(socks[1], false, AF_INET); result = write_handle_->setBlocking(false); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); read_handle_->initializeFileEvent( dispatcher, @@ -154,7 +154,7 @@ void WatcherImpl::endDirectoryWatch(Network::IoHandle& io_handle, HANDLE event_h constexpr absl::string_view data{"a"}; buffer.add(data); auto result = io_handle.write(buffer); - RELEASE_ASSERT(result.rc_ == 1, + RELEASE_ASSERT(result.return_value_ == 1, fmt::format("failed to write 1 byte: {}", result.err_->getErrorDetails())); } @@ -207,7 +207,7 @@ void WatcherImpl::directoryChangeCompletion(DWORD err, DWORD num_bytes, LPOVERLA // not in this completion routine Buffer::RawSlice buffer{(void*)data.data(), 1}; auto result = watcher->write_handle_->writev(&buffer, 1); - RELEASE_ASSERT(result.rc_ == 1, + RELEASE_ASSERT(result.return_value_ == 1, fmt::format("failed to write 1 byte: {}", result.err_->getErrorDetails())); } } diff --git a/source/common/formatter/substitution_formatter.cc b/source/common/formatter/substitution_formatter.cc index e1001871f038f..a50fef64617a8 100644 --- a/source/common/formatter/substitution_formatter.cc +++ b/source/common/formatter/substitution_formatter.cc @@ -93,7 +93,7 @@ const absl::optional SubstitutionFormatUtils::getHostname() { const Api::SysCallIntResult result = os_sys_calls.gethostname(name, len); absl::optional hostname; - if (result.rc_ == 0) { + if (result.return_value_ == 0) { hostname = name; } diff --git a/source/common/network/connection_impl.cc b/source/common/network/connection_impl.cc index cf9b86f35f72e..5325dab742648 100644 --- a/source/common/network/connection_impl.cc +++ b/source/common/network/connection_impl.cc @@ -288,13 +288,13 @@ void ConnectionImpl::noDelay(bool enable) { Api::SysCallIntResult result = socket_->setSocketOption(IPPROTO_TCP, TCP_NODELAY, &new_value, sizeof(new_value)); #if defined(__APPLE__) - if (SOCKET_FAILURE(result.rc_) && result.errno_ == SOCKET_ERROR_INVAL) { + if (SOCKET_FAILURE(result.return_value_) && result.errno_ == SOCKET_ERROR_INVAL) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. return; } #elif defined(WIN32) - if (SOCKET_FAILURE(result.rc_) && + if (SOCKET_FAILURE(result.return_value_) && (result.errno_ == SOCKET_ERROR_AGAIN || result.errno_ == SOCKET_ERROR_INVAL)) { // Sometimes occurs when the connection is not yet fully formed. Empirically, TCP_NODELAY is // enabled despite this result. @@ -302,8 +302,9 @@ void ConnectionImpl::noDelay(bool enable) { } #endif - RELEASE_ASSERT(result.rc_ == 0, fmt::format("Failed to set TCP_NODELAY with error {}, {}", - result.errno_, errorDetails(result.errno_))); + RELEASE_ASSERT(result.return_value_ == 0, + fmt::format("Failed to set TCP_NODELAY with error {}, {}", result.errno_, + errorDetails(result.errno_))); } void ConnectionImpl::onRead(uint64_t read_buffer_size) { @@ -648,7 +649,7 @@ ConnectionImpl::unixSocketPeerCredentials() const { #else struct ucred ucred; socklen_t ucred_size = sizeof(ucred); - int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).rc_; + int rc = socket_->getSocketOption(SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_size).return_value_; if (SOCKET_FAILURE(rc)) { return absl::nullopt; } @@ -663,8 +664,8 @@ void ConnectionImpl::onWriteReady() { if (connecting_) { int error; socklen_t error_size = sizeof(error); - RELEASE_ASSERT(socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).rc_ == 0, - ""); + RELEASE_ASSERT( + socket_->getSocketOption(SOL_SOCKET, SO_ERROR, &error, &error_size).return_value_ == 0, ""); if (error == 0) { ENVOY_CONN_LOG(debug, "connected", *this); @@ -846,7 +847,7 @@ ClientConnectionImpl::ClientConnectionImpl( if (*source != nullptr) { Api::SysCallIntResult result = socket_->bind(*source); - if (result.rc_ < 0) { + if (result.return_value_ < 0) { // TODO(lizan): consider add this error into transportFailureReason. ENVOY_LOG_MISC(debug, "Bind failure. Failed to bind to {}: {}", source->get()->asString(), errorDetails(result.errno_)); @@ -865,13 +866,13 @@ void ClientConnectionImpl::connect() { ENVOY_CONN_LOG(debug, "connecting to {}", *this, socket_->addressProvider().remoteAddress()->asString()); const Api::SysCallIntResult result = socket_->connect(socket_->addressProvider().remoteAddress()); - if (result.rc_ == 0) { + if (result.return_value_ == 0) { // write will become ready. ASSERT(connecting_); return; } - ASSERT(SOCKET_FAILURE(result.rc_)); + ASSERT(SOCKET_FAILURE(result.return_value_)); #ifdef WIN32 // winsock2 connect returns EWOULDBLOCK if the socket is non-blocking and the connection // cannot be completed immediately. We do not check for `EINPROGRESS` as that error is for diff --git a/source/common/network/io_socket_handle_impl.cc b/source/common/network/io_socket_handle_impl.cc index 8f2d2e0b56872..452f57711b5e4 100644 --- a/source/common/network/io_socket_handle_impl.cc +++ b/source/common/network/io_socket_handle_impl.cc @@ -74,7 +74,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::close() { } ASSERT(SOCKET_VALID(fd_)); - const int rc = Api::OsSysCallsSingleton::get().close(fd_).rc_; + const int rc = Api::OsSysCallsSingleton::get().close(fd_).return_value_; SET_SOCKET_INVALID(fd_); return Api::IoCallUint64Result(rc, Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); } @@ -117,7 +117,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::read(Buffer::Instance& buffer, Buffer::Reservation reservation = buffer.reserveForRead(); Api::IoCallUint64Result result = readv(std::min(reservation.length(), max_length), reservation.slices(), reservation.numSlices()); - uint64_t bytes_to_commit = result.ok() ? result.rc_ : 0; + uint64_t bytes_to_commit = result.ok() ? result.return_value_ : 0; ASSERT(bytes_to_commit <= max_length); reservation.commit(bytes_to_commit); @@ -164,8 +164,8 @@ Api::IoCallUint64Result IoSocketHandleImpl::write(Buffer::Instance& buffer) { constexpr uint64_t MaxSlices = 16; Buffer::RawSliceVector slices = buffer.getRawSlices(MaxSlices); Api::IoCallUint64Result result = writev(slices.begin(), slices.size()); - if (result.ok() && result.rc_ > 0) { - buffer.drain(static_cast(result.rc_)); + if (result.ok() && result.return_value_ > 0) { + buffer.drain(static_cast(result.return_value_)); } // Emulated edge events need to registered if the socket operation did not complete @@ -342,7 +342,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, hdr.msg_controllen = cmsg_space_; Api::SysCallSizeResult result = Api::OsSysCallsSingleton::get().recvmsg(fd_, &hdr, messageTruncatedOption()); - if (result.rc_ < 0) { + if (result.return_value_ < 0) { auto io_result = sysCallResultToIoCallResult(result); // Emulated edge events need to registered if the socket operation did not complete // because the socket would block. @@ -354,8 +354,8 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmsg(Buffer::RawSlice* slices, return io_result; } if ((hdr.msg_flags & MSG_TRUNC) != 0) { - ENVOY_LOG_MISC(debug, "Dropping truncated UDP packet with size: {}.", result.rc_); - result.rc_ = 0; + ENVOY_LOG_MISC(debug, "Dropping truncated UDP packet with size: {}.", result.return_value_); + result.return_value_ = 0; (*output.dropped_packets_)++; output.msg_[0].truncated_and_dropped_ = true; return sysCallResultToIoCallResult(result); @@ -441,7 +441,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uin Api::OsSysCallsSingleton::get().recvmmsg(fd_, mmsg_hdr.data(), num_packets_per_mmsg_call, messageTruncatedOption() | MSG_WAITFORONE, nullptr); - if (result.rc_ <= 0) { + if (result.return_value_ <= 0) { auto io_result = sysCallResultToIoCallResult(result); // Emulated edge events need to registered if the socket operation did not complete // because the socket would block. @@ -453,7 +453,7 @@ Api::IoCallUint64Result IoSocketHandleImpl::recvmmsg(RawSliceArrays& slices, uin return io_result; } - int num_packets_read = result.rc_; + int num_packets_read = result.return_value_; for (int i = 0; i < num_packets_read; ++i) { msghdr& hdr = mmsg_hdr[i].msg_hdr; @@ -533,11 +533,11 @@ Api::SysCallIntResult IoSocketHandleImpl::listen(int backlog) { IoHandlePtr IoSocketHandleImpl::accept(struct sockaddr* addr, socklen_t* addrlen) { auto result = Api::OsSysCallsSingleton::get().accept(fd_, addr, addrlen); - if (SOCKET_INVALID(result.rc_)) { + if (SOCKET_INVALID(result.return_value_)) { return nullptr; } - return std::make_unique(result.rc_, socket_v6only_, domain_); + return std::make_unique(result.return_value_, socket_v6only_, domain_); } Api::SysCallIntResult IoSocketHandleImpl::connect(Address::InstanceConstSharedPtr address) { @@ -568,9 +568,10 @@ Api::SysCallIntResult IoSocketHandleImpl::setBlocking(bool blocking) { IoHandlePtr IoSocketHandleImpl::duplicate() { auto result = Api::OsSysCallsSingleton::get().duplicate(fd_); - RELEASE_ASSERT(result.rc_ != -1, fmt::format("duplicate failed for '{}': ({}) {}", fd_, - result.errno_, errorDetails(result.errno_))); - return std::make_unique(result.rc_, socket_v6only_, domain_); + RELEASE_ASSERT(result.return_value_ != -1, + fmt::format("duplicate failed for '{}': ({}) {}", fd_, result.errno_, + errorDetails(result.errno_))); + return std::make_unique(result.return_value_, socket_v6only_, domain_); } absl::optional IoSocketHandleImpl::domain() { return domain_; } @@ -581,7 +582,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::localAddress() { auto& os_sys_calls = Api::OsSysCallsSingleton::get(); Api::SysCallIntResult result = os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { throw EnvoyException(fmt::format("getsockname failed for '{}': ({}) {}", fd_, result.errno_, errorDetails(result.errno_))); } @@ -594,7 +595,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { auto& os_sys_calls = Api::OsSysCallsSingleton::get(); Api::SysCallIntResult result = os_sys_calls.getpeername(fd_, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { throw EnvoyException( fmt::format("getpeername failed for '{}': {}", errorDetails(result.errno_))); } @@ -605,7 +606,7 @@ Address::InstanceConstSharedPtr IoSocketHandleImpl::peerAddress() { // mechanisms to hide things, of which there are many). ss_len = sizeof ss; result = os_sys_calls.getsockname(fd_, reinterpret_cast(&ss), &ss_len); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { throw EnvoyException( fmt::format("getsockname failed for '{}': {}", fd_, errorDetails(result.errno_))); } @@ -643,7 +644,7 @@ Api::SysCallIntResult IoSocketHandleImpl::shutdown(int how) { absl::optional IoSocketHandleImpl::lastRoundTripTime() { Api::EnvoyTcpInfo info; auto result = Api::OsSysCallsSingleton::get().socketTcpInfo(fd_, &info); - if (!result.rc_) { + if (!result.return_value_) { return {}; } return std::chrono::duration_cast(info.tcpi_rtt); diff --git a/source/common/network/io_socket_handle_impl.h b/source/common/network/io_socket_handle_impl.h index 398ab1f554f5f..ae4129de73e5a 100644 --- a/source/common/network/io_socket_handle_impl.h +++ b/source/common/network/io_socket_handle_impl.h @@ -85,9 +85,9 @@ class IoSocketHandleImpl : public IoHandle, protected Logger::Loggable Api::IoCallUint64Result sysCallResultToIoCallResult(const Api::SysCallResult& result) { - if (result.rc_ >= 0) { + if (result.return_value_ >= 0) { // Return nullptr as IoError upon success. - return Api::IoCallUint64Result(result.rc_, + return Api::IoCallUint64Result(result.return_value_, Api::IoErrorPtr(nullptr, IoSocketError::deleteIoError)); } RELEASE_ASSERT(result.errno_ != SOCKET_ERROR_INVAL, "Invalid argument passed in."); diff --git a/source/common/network/listen_socket_impl.cc b/source/common/network/listen_socket_impl.cc index 283ef3fac76de..4c91190581e7c 100644 --- a/source/common/network/listen_socket_impl.cc +++ b/source/common/network/listen_socket_impl.cc @@ -22,7 +22,7 @@ Api::SysCallIntResult ListenSocketImpl::bind(Network::Address::InstanceConstShar address_provider_->setLocalAddress(address); const Api::SysCallIntResult result = SocketImpl::bind(address_provider_->localAddress()); - if (SOCKET_FAILURE(result.rc_)) { + if (SOCKET_FAILURE(result.return_value_)) { close(); throw SocketBindException(fmt::format("cannot bind '{}': {}", address_provider_->localAddress()->asString(), diff --git a/source/common/network/listen_socket_impl.h b/source/common/network/listen_socket_impl.h index b3f10a13c14d7..08eb48d0f2fbc 100644 --- a/source/common/network/listen_socket_impl.h +++ b/source/common/network/listen_socket_impl.h @@ -108,7 +108,7 @@ template class NetworkListenSocket : public ListenSocketImpl { #ifndef WIN32 int on = 1; auto status = setSocketOption(SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); - RELEASE_ASSERT(status.rc_ != -1, "failed to set SO_REUSEADDR socket option"); + RELEASE_ASSERT(status.return_value_ != -1, "failed to set SO_REUSEADDR socket option"); #endif } }; diff --git a/source/common/network/raw_buffer_socket.cc b/source/common/network/raw_buffer_socket.cc index 788cdc7b1d38e..447da05eeb53c 100644 --- a/source/common/network/raw_buffer_socket.cc +++ b/source/common/network/raw_buffer_socket.cc @@ -21,13 +21,13 @@ IoResult RawBufferSocket::doRead(Buffer::Instance& buffer) { Api::IoCallUint64Result result = callbacks_->ioHandle().read(buffer, absl::nullopt); if (result.ok()) { - ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.rc_); - if (result.rc_ == 0) { + ENVOY_CONN_LOG(trace, "read returns: {}", callbacks_->connection(), result.return_value_); + if (result.return_value_ == 0) { // Remote close. end_stream = true; break; } - bytes_read += result.rc_; + bytes_read += result.return_value_; if (callbacks_->shouldDrainReadBuffer()) { callbacks_->setTransportSocketIsReadable(); break; @@ -64,8 +64,8 @@ IoResult RawBufferSocket::doWrite(Buffer::Instance& buffer, bool end_stream) { Api::IoCallUint64Result result = callbacks_->ioHandle().write(buffer); if (result.ok()) { - ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); - bytes_written += result.rc_; + ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.return_value_); + bytes_written += result.return_value_; } else { ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), result.err_->getErrorDetails()); diff --git a/source/common/network/socket_impl.cc b/source/common/network/socket_impl.cc index 08505cb65b0a4..76d1bd47c37cf 100644 --- a/source/common/network/socket_impl.cc +++ b/source/common/network/socket_impl.cc @@ -56,9 +56,9 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr } // Not storing a reference to syscalls singleton because of unit test mocks bind_result = io_handle_->bind(address); - if (pipe->mode() != 0 && !abstract_namespace && bind_result.rc_ == 0) { + if (pipe->mode() != 0 && !abstract_namespace && bind_result.return_value_ == 0) { auto set_permissions = Api::OsSysCallsSingleton::get().chmod(pipe_sa->sun_path, pipe->mode()); - if (set_permissions.rc_ != 0) { + if (set_permissions.return_value_ != 0) { throw EnvoyException(fmt::format("Failed to create socket with mode {}: {}", std::to_string(pipe->mode()), errorDetails(set_permissions.errno_))); @@ -68,7 +68,7 @@ Api::SysCallIntResult SocketImpl::bind(Network::Address::InstanceConstSharedPtr } bind_result = io_handle_->bind(address); - if (bind_result.rc_ == 0 && address->ip()->port() == 0) { + if (bind_result.return_value_ == 0 && address->ip()->port() == 0) { address_provider_->setLocalAddress(io_handle_->localAddress()); } return bind_result; diff --git a/source/common/network/socket_interface_impl.cc b/source/common/network/socket_interface_impl.cc index 4847161321e5e..1d87e3295ee9c 100644 --- a/source/common/network/socket_interface_impl.cc +++ b/source/common/network/socket_interface_impl.cc @@ -47,13 +47,13 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, Address::Type } const Api::SysCallSocketResult result = Api::OsSysCallsSingleton::get().socket(domain, flags, 0); - RELEASE_ASSERT(SOCKET_VALID(result.rc_), + RELEASE_ASSERT(SOCKET_VALID(result.return_value_), fmt::format("socket(2) failed, got error: {}", errorDetails(result.errno_))); - IoHandlePtr io_handle = makeSocket(result.rc_, socket_v6only, domain); + IoHandlePtr io_handle = makeSocket(result.return_value_, socket_v6only, domain); #if defined(__APPLE__) || defined(WIN32) // Cannot set SOCK_NONBLOCK as a ::socket flag. - const int rc = io_handle->setBlocking(false).rc_; + const int rc = io_handle->setBlocking(false).return_value_; RELEASE_ASSERT(!SOCKET_FAILURE(rc), ""); #endif @@ -74,7 +74,7 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, // Setting IPV6_V6ONLY restricts the IPv6 socket to IPv6 connections only. const Api::SysCallIntResult result = io_handle->setOption( IPPROTO_IPV6, IPV6_V6ONLY, reinterpret_cast(&v6only), sizeof(v6only)); - RELEASE_ASSERT(!SOCKET_FAILURE(result.rc_), ""); + RELEASE_ASSERT(!SOCKET_FAILURE(result.return_value_), ""); } return io_handle; } @@ -82,11 +82,12 @@ IoHandlePtr SocketInterfaceImpl::socket(Socket::Type socket_type, bool SocketInterfaceImpl::ipFamilySupported(int domain) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); const Api::SysCallSocketResult result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - if (SOCKET_VALID(result.rc_)) { - RELEASE_ASSERT(os_sys_calls.close(result.rc_).rc_ == 0, - fmt::format("Fail to close fd: response code {}", errorDetails(result.rc_))); + if (SOCKET_VALID(result.return_value_)) { + RELEASE_ASSERT( + os_sys_calls.close(result.return_value_).return_value_ == 0, + fmt::format("Fail to close fd: response code {}", errorDetails(result.return_value_))); } - return SOCKET_VALID(result.rc_); + return SOCKET_VALID(result.return_value_); } Server::BootstrapExtensionPtr diff --git a/source/common/network/socket_option_impl.cc b/source/common/network/socket_option_impl.cc index 53d3bc8efd1ff..929979c8fa128 100644 --- a/source/common/network/socket_option_impl.cc +++ b/source/common/network/socket_option_impl.cc @@ -22,7 +22,7 @@ bool SocketOptionImpl::setOption(Socket& socket, const Api::SysCallIntResult result = SocketOptionImpl::setSocketOption(socket, optname_, value_.data(), value_.size()); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { ENVOY_LOG(warn, "Setting {} option on socket failed: {}", optname_.name(), errorDetails(result.errno_)); return false; diff --git a/source/common/network/udp_listener_impl.cc b/source/common/network/udp_listener_impl.cc index d21ebb0add8a8..f6dcb614d2788 100644 --- a/source/common/network/udp_listener_impl.cc +++ b/source/common/network/udp_listener_impl.cc @@ -112,9 +112,9 @@ Api::IoCallUint64Result UdpListenerImpl::send(const UdpSendData& send_data) { Api::IoCallUint64Result send_result = cb_.udpPacketWriter().writePacket(buffer, send_data.local_ip_, send_data.peer_address_); - // The send_result normalizes the rc_ value to 0 in error conditions. + // The send_result normalizes the return_value_ value to 0 in error conditions. // The drain call is hence 'safe' in success and failure cases. - buffer.drain(send_result.rc_); + buffer.drain(send_result.return_value_); return send_result; } diff --git a/source/common/network/utility.cc b/source/common/network/utility.cc index 08b830fd799ca..4145f3f4d5d68 100644 --- a/source/common/network/utility.cc +++ b/source/common/network/utility.cc @@ -110,7 +110,7 @@ Api::IoCallUint64Result receiveMessage(uint64_t max_rx_datagram_size, Buffer::In Api::IoCallUint64Result result = handle.recvmsg(&slice, 1, local_address.ip()->port(), output); if (result.ok()) { - reservation.commit(std::min(max_rx_datagram_size, result.rc_)); + reservation.commit(std::min(max_rx_datagram_size, result.return_value_)); } return result; @@ -405,9 +405,10 @@ Address::InstanceConstSharedPtr Utility::getOriginalDst(Socket& sock) { int status; if (*ipVersion == Address::IpVersion::v4) { - status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; + status = sock.getSocketOption(SOL_IP, SO_ORIGINAL_DST, &orig_addr, &addr_len).return_value_; } else { - status = sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).rc_; + status = + sock.getSocketOption(SOL_IPV6, IP6T_SO_ORIGINAL_DST, &orig_addr, &addr_len).return_value_; } if (status != 0) { @@ -557,7 +558,7 @@ Api::IoCallUint64Result Utility::writeToSocket(IoHandle& handle, Buffer::RawSlic send_result.err_->getErrorCode() == Api::IoError::IoErrorCode::Interrupt); if (send_result.ok()) { - ENVOY_LOG_MISC(trace, "sendmsg bytes {}", send_result.rc_); + ENVOY_LOG_MISC(trace, "sendmsg bytes {}", send_result.return_value_); } else { ENVOY_LOG_MISC(debug, "sendmsg failed with error code {}: {}", static_cast(send_result.err_->getErrorCode()), @@ -607,13 +608,14 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, } const uint64_t gso_size = output.msg_[0].gso_size_; - ENVOY_LOG_MISC(trace, "gro recvmsg bytes {} with gso_size as {}", result.rc_, gso_size); + ENVOY_LOG_MISC(trace, "gro recvmsg bytes {} with gso_size as {}", result.return_value_, + gso_size); // Skip gso segmentation and proceed as a single payload. if (gso_size == 0u) { - passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), - std::move(output.msg_[0].local_address_), udp_packet_processor, - receive_time); + passPayloadToProcessor( + result.return_value_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, receive_time); return result; } @@ -662,7 +664,7 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, return result; } - uint64_t packets_read = result.rc_; + uint64_t packets_read = result.return_value_; ENVOY_LOG_MISC(trace, "recvmmsg read {} packets", packets_read); for (uint64_t i = 0; i < packets_read; ++i) { if (output.msg_[i].truncated_and_dropped_) { @@ -694,11 +696,11 @@ Api::IoCallUint64Result Utility::readFromSocket(IoHandle& handle, return result; } - ENVOY_LOG_MISC(trace, "recvmsg bytes {}", result.rc_); + ENVOY_LOG_MISC(trace, "recvmsg bytes {}", result.return_value_); - passPayloadToProcessor(result.rc_, std::move(buffer), std::move(output.msg_[0].peer_address_), - std::move(output.msg_[0].local_address_), udp_packet_processor, - receive_time); + passPayloadToProcessor( + result.return_value_, std::move(buffer), std::move(output.msg_[0].peer_address_), + std::move(output.msg_[0].local_address_), udp_packet_processor, receive_time); return result; } diff --git a/source/common/network/win32_redirect_records_option_impl.cc b/source/common/network/win32_redirect_records_option_impl.cc index 2786e85df1140..b6dfce8e54105 100644 --- a/source/common/network/win32_redirect_records_option_impl.cc +++ b/source/common/network/win32_redirect_records_option_impl.cc @@ -44,7 +44,7 @@ bool Win32RedirectRecordsOptionImpl::setOption( socket.ioctl(ENVOY_SIO_SET_WFP_CONNECTION_REDIRECT_RECORDS, const_cast(reinterpret_cast(redirect_records_.buf_)), redirect_records_.buf_size_, nullptr, 0, &size); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { ENVOY_LOG(warn, "Setting WFP records on socket failed: {}", errorDetails(result.errno_)); return false; } diff --git a/source/common/quic/envoy_quic_packet_writer.cc b/source/common/quic/envoy_quic_packet_writer.cc index 5ee3b5da3a57f..6a3d358bae017 100644 --- a/source/common/quic/envoy_quic_packet_writer.cc +++ b/source/common/quic/envoy_quic_packet_writer.cc @@ -11,7 +11,7 @@ namespace { quic::WriteResult convertToQuicWriteResult(Api::IoCallUint64Result& result) { if (result.ok()) { - return {quic::WRITE_STATUS_OK, static_cast(result.rc_)}; + return {quic::WRITE_STATUS_OK, static_cast(result.return_value_)}; } quic::WriteStatus status = result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again ? quic::WRITE_STATUS_BLOCKED diff --git a/source/exe/win32/platform_impl.cc b/source/exe/win32/platform_impl.cc index 8b788a60e586d..7653541cafdcb 100644 --- a/source/exe/win32/platform_impl.cc +++ b/source/exe/win32/platform_impl.cc @@ -30,7 +30,7 @@ BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) { char data[] = {'a'}; Buffer::RawSlice buffer{data, 1}; auto result = handler->writev(&buffer, 1); - RELEASE_ASSERT(result.rc_ == 1, + RELEASE_ASSERT(result.return_value_ == 1, fmt::format("failed to write 1 byte: {}", result.err_->getErrorDetails())); if (fdwCtrlType == CTRL_LOGOFF_EVENT || fdwCtrlType == CTRL_SHUTDOWN_EVENT) { diff --git a/source/exe/win32/service_base.cc b/source/exe/win32/service_base.cc index 0c226a8acf637..1f553b2a37779 100644 --- a/source/exe/win32/service_base.cc +++ b/source/exe/win32/service_base.cc @@ -111,7 +111,7 @@ void ServiceBase::Stop(DWORD control) { char data[] = {'a'}; Buffer::RawSlice buffer{data, 1}; auto result = handler->writev(&buffer, 1); - RELEASE_ASSERT(result.rc_ == 1, + RELEASE_ASSERT(result.return_value_ == 1, fmt::format("failed to write 1 byte: {}", result.err_->getErrorDetails())); } diff --git a/source/extensions/filters/listener/http_inspector/http_inspector.cc b/source/extensions/filters/listener/http_inspector/http_inspector.cc index 2a69f38c77714..59b28e1febcb3 100644 --- a/source/extensions/filters/listener/http_inspector/http_inspector.cc +++ b/source/extensions/filters/listener/http_inspector/http_inspector.cc @@ -98,7 +98,7 @@ Network::FilterStatus Filter::onAccept(Network::ListenerFilterCallbacks& cb) { ParseState Filter::onRead() { auto result = cb_->socket().ioHandle().recv(buf_, Config::MAX_INSPECT_SIZE, MSG_PEEK); - ENVOY_LOG(trace, "http inspector: recv: {}", result.rc_); + ENVOY_LOG(trace, "http inspector: recv: {}", result.return_value_); if (!result.ok()) { if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { return ParseState::Continue; @@ -108,7 +108,7 @@ ParseState Filter::onRead() { } const auto parse_state = - parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.rc_)); + parseHttpHeader(absl::string_view(reinterpret_cast(buf_), result.return_value_)); switch (parse_state) { case ParseState::Continue: // do nothing but wait for the next event diff --git a/source/extensions/filters/listener/original_dst/original_dst.cc b/source/extensions/filters/listener/original_dst/original_dst.cc index f1465fad803e2..1b737c2c2a1a4 100644 --- a/source/extensions/filters/listener/original_dst/original_dst.cc +++ b/source/extensions/filters/listener/original_dst/original_dst.cc @@ -38,7 +38,7 @@ Network::FilterStatus OriginalDstFilter::onAccept(Network::ListenerFilterCallbac auto status = socket.ioctl(SIO_QUERY_WFP_CONNECTION_REDIRECT_RECORDS, NULL, 0, redirect_records->buf_, sizeof(redirect_records->buf_), &redirect_records->buf_size_); - if (status.rc_ != 0) { + if (status.return_value_ != 0) { ENVOY_LOG(debug, "closing connection: cannot broker connection to original destination " "[Query redirect record failed] with error {}", diff --git a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc index 4a2c2db29c3b7..6e9fa236f38d0 100644 --- a/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/filters/listener/proxy_protocol/proxy_protocol.cc @@ -313,10 +313,10 @@ ReadOrParseState Filter::parseExtensions(Network::IoHandle& io_handle, uint8_t* return ReadOrParseState::Error; } - proxy_protocol_header_.value().extensions_length_ -= recv_result.rc_; + proxy_protocol_header_.value().extensions_length_ -= recv_result.return_value_; if (nullptr != buf_off) { - *buf_off += recv_result.rc_; + *buf_off += recv_result.return_value_; } } @@ -427,7 +427,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { ENVOY_LOG(debug, "failed to read proxy protocol (no bytes read)"); return ReadOrParseState::Error; } - ssize_t nread = result.rc_; + ssize_t nread = result.return_value_; if (nread < 1) { ENVOY_LOG(debug, "failed to read proxy protocol (no bytes read)"); @@ -454,12 +454,12 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (buf_off_ < PROXY_PROTO_V2_HEADER_LEN) { ssize_t exp = PROXY_PROTO_V2_HEADER_LEN - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, exp, 0); - if (!result.ok() || read_result.rc_ != uint64_t(exp)) { + if (!result.ok() || read_result.return_value_ != uint64_t(exp)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } - buf_off_ += read_result.rc_; - nread -= read_result.rc_; + buf_off_ += read_result.return_value_; + nread -= read_result.return_value_; } absl::optional addr_len_opt = lenV2Address(buf_); if (!addr_len_opt.has_value()) { @@ -476,11 +476,11 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { if (ssize_t(buf_off_) + nread >= PROXY_PROTO_V2_HEADER_LEN + addr_len) { ssize_t missing = (PROXY_PROTO_V2_HEADER_LEN + addr_len) - buf_off_; const auto read_result = io_handle.recv(buf_ + buf_off_, missing, 0); - if (!result.ok() || read_result.rc_ != uint64_t(missing)) { + if (!result.ok() || read_result.return_value_ != uint64_t(missing)) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; } - buf_off_ += read_result.rc_; + buf_off_ += read_result.return_value_; // The TLV remain, they are read/discard in parseExtensions() which is called from the // parent (if needed). if (parseV2Header(buf_)) { @@ -490,7 +490,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { } } else { const auto result = io_handle.recv(buf_ + buf_off_, nread, 0); - nread = result.rc_; + nread = result.return_value_; if (!result.ok()) { ENVOY_LOG(debug, "failed to read proxy protocol (remote closed)"); return ReadOrParseState::Error; @@ -524,7 +524,7 @@ ReadOrParseState Filter::readProxyHeader(Network::IoHandle& io_handle) { } const auto result = io_handle.recv(buf_ + buf_off_, ntoread, 0); - nread = result.rc_; + nread = result.return_value_; ASSERT(result.ok() && size_t(nread) == ntoread); buf_off_ += nread; diff --git a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc index c2798827f6c91..fb0f06e6c2d56 100644 --- a/source/extensions/filters/listener/tls_inspector/tls_inspector.cc +++ b/source/extensions/filters/listener/tls_inspector/tls_inspector.cc @@ -166,7 +166,7 @@ ParseState Filter::onRead() { // TODO(ggreenway): write an integration test to ensure the events work as expected on all // platforms. const auto result = cb_->socket().ioHandle().recv(buf_, config_->maxClientHelloSize(), MSG_PEEK); - ENVOY_LOG(trace, "tls inspector: recv: {}", result.rc_); + ENVOY_LOG(trace, "tls inspector: recv: {}", result.return_value_); if (!result.ok()) { if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { @@ -178,10 +178,10 @@ ParseState Filter::onRead() { // Because we're doing a MSG_PEEK, data we've seen before gets returned every time, so // skip over what we've already processed. - if (static_cast(result.rc_) > read_) { + if (static_cast(result.return_value_) > read_) { const uint8_t* data = buf_ + read_; - const size_t len = result.rc_ - read_; - read_ = result.rc_; + const size_t len = result.return_value_ - read_; + read_ = result.return_value_; return parseClientHello(data, len); } return ParseState::Continue; diff --git a/source/extensions/tracers/xray/daemon_broker.cc b/source/extensions/tracers/xray/daemon_broker.cc index 2734996be51f8..6d4406b5d51e0 100644 --- a/source/extensions/tracers/xray/daemon_broker.cc +++ b/source/extensions/tracers/xray/daemon_broker.cc @@ -42,7 +42,7 @@ void DaemonBrokerImpl::send(const std::string& data) const { const auto rc = Network::Utility::writeToSocket(*io_handle_, &buf, 1 /*num_slices*/, nullptr /*local_ip*/, *address_); - if (rc.rc_ != payload.length()) { + if (rc.return_value_ != payload.length()) { // TODO(marcomagdy): report this in stats ENVOY_LOG_TO_LOGGER(logger, debug, "Failed to send trace payload to the X-Ray daemon."); } diff --git a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc index 66a93f45e9021..4488396f3a513 100644 --- a/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc +++ b/source/extensions/transport_sockets/proxy_protocol/proxy_protocol.cc @@ -85,8 +85,8 @@ Network::IoResult UpstreamProxyProtocolSocket::writeHeader() { Api::IoCallUint64Result result = callbacks_->ioHandle().write(header_buffer_); if (result.ok()) { - ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.rc_); - bytes_written += result.rc_; + ENVOY_CONN_LOG(trace, "write returns: {}", callbacks_->connection(), result.return_value_); + bytes_written += result.return_value_; } else { ENVOY_CONN_LOG(trace, "write error: {}", callbacks_->connection(), result.err_->getErrorDetails()); diff --git a/source/extensions/transport_sockets/tls/io_handle_bio.cc b/source/extensions/transport_sockets/tls/io_handle_bio.cc index f7ede00cd45a7..3b595f7062aa2 100644 --- a/source/extensions/transport_sockets/tls/io_handle_bio.cc +++ b/source/extensions/transport_sockets/tls/io_handle_bio.cc @@ -61,7 +61,7 @@ int io_handle_read(BIO* b, char* out, int outl) { } return -1; } - return result.rc_; + return result.return_value_; } // NOLINTNEXTLINE(readability-identifier-naming) @@ -78,7 +78,7 @@ int io_handle_write(BIO* b, const char* in, int inl) { } return -1; } - return result.rc_; + return result.return_value_; } // NOLINTNEXTLINE(readability-identifier-naming) diff --git a/source/server/hot_restart_impl.cc b/source/server/hot_restart_impl.cc index 3b2160c9eb63e..347167217b957 100644 --- a/source/server/hot_restart_impl.cc +++ b/source/server/hot_restart_impl.cc @@ -39,20 +39,20 @@ SharedMemory* attachSharedMemory(uint32_t base_id, uint32_t restart_epoch) { const Api::SysCallIntResult result = hot_restart_os_sys_calls.shmOpen(shmem_name.c_str(), flags, S_IRUSR | S_IWUSR); - if (result.rc_ == -1) { + if (result.return_value_ == -1) { PANIC(fmt::format("cannot open shared memory region {} check user permissions. Error: {}", shmem_name, errorDetails(result.errno_))); } if (restart_epoch == 0) { const Api::SysCallIntResult truncateRes = - os_sys_calls.ftruncate(result.rc_, sizeof(SharedMemory)); - RELEASE_ASSERT(truncateRes.rc_ != -1, ""); + os_sys_calls.ftruncate(result.return_value_, sizeof(SharedMemory)); + RELEASE_ASSERT(truncateRes.return_value_ != -1, ""); } const Api::SysCallPtrResult mmapRes = os_sys_calls.mmap( - nullptr, sizeof(SharedMemory), PROT_READ | PROT_WRITE, MAP_SHARED, result.rc_, 0); - SharedMemory* shmem = reinterpret_cast(mmapRes.rc_); + nullptr, sizeof(SharedMemory), PROT_READ | PROT_WRITE, MAP_SHARED, result.return_value_, 0); + SharedMemory* shmem = reinterpret_cast(mmapRes.return_value_); RELEASE_ASSERT(shmem != MAP_FAILED, ""); RELEASE_ASSERT((reinterpret_cast(shmem) % alignof(decltype(shmem))) == 0, ""); diff --git a/source/server/hot_restarting_base.cc b/source/server/hot_restarting_base.cc index 22e8286ca02d2..bc1e164b8effa 100644 --- a/source/server/hot_restarting_base.cc +++ b/source/server/hot_restarting_base.cc @@ -20,7 +20,7 @@ HotRestartingBase::~HotRestartingBase() { if (my_domain_socket_ != -1) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); Api::SysCallIntResult result = os_sys_calls.close(my_domain_socket_); - ASSERT(result.rc_ == 0); + ASSERT(result.return_value_ == 0); } } @@ -56,7 +56,7 @@ void HotRestartingBase::bindDomainSocket(uint64_t id, const std::string& role, unlink(address.sun_path); Api::SysCallIntResult result = os_sys_calls.bind(my_domain_socket_, reinterpret_cast(&address), sizeof(address)); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { const auto msg = fmt::format( "unable to bind domain socket with base_id={}, id={}, errno={} (see --base-id option)", base_id_, id, result.errno_); @@ -119,7 +119,7 @@ void HotRestartingBase::sendHotRestartMessage(sockaddr_un& address, bool sent = false; for (int i = 0; i < SENDMSG_MAX_RETRIES; i++) { auto result = os_sys_calls.sendmsg(my_domain_socket_, &message, 0); - rc = result.rc_; + rc = result.return_value_; saved_errno = result.errno_; if (rc == static_cast(cur_chunk_size)) { diff --git a/source/server/options_impl_platform_linux.cc b/source/server/options_impl_platform_linux.cc index 9365614dc1840..f649f7a6f0305 100644 --- a/source/server/options_impl_platform_linux.cc +++ b/source/server/options_impl_platform_linux.cc @@ -22,7 +22,7 @@ uint32_t OptionsImplPlatformLinux::getCpuAffinityCount(unsigned int hw_threads) CPU_ZERO(&mask); const Api::SysCallIntResult result = linux_os_syscalls.sched_getaffinity(pid, sizeof(cpu_set_t), &mask); - if (result.rc_ == -1) { + if (result.return_value_ == -1) { // Fall back to number of hardware threads. return hw_threads; } diff --git a/test/common/buffer/buffer_fuzz.cc b/test/common/buffer/buffer_fuzz.cc index 46241032beff0..9d7cb96004e1c 100644 --- a/test/common/buffer/buffer_fuzz.cc +++ b/test/common/buffer/buffer_fuzz.cc @@ -361,7 +361,7 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff const ssize_t rc = ::write(pipe_fds[1], data.data(), max_length); FUZZ_ASSERT(rc > 0); Api::IoCallUint64Result result = io_handle.read(target_buffer, max_length); - FUZZ_ASSERT(result.rc_ == static_cast(rc)); + FUZZ_ASSERT(result.return_value_ == static_cast(rc)); FUZZ_ASSERT(::close(pipe_fds[1]) == 0); break; } @@ -371,23 +371,24 @@ uint32_t bufferAction(Context& ctxt, char insert_value, uint32_t max_alloc, Buff Network::IoSocketHandleImpl io_handle(pipe_fds[1]); FUZZ_ASSERT(::fcntl(pipe_fds[0], F_SETFL, O_NONBLOCK) == 0); FUZZ_ASSERT(::fcntl(pipe_fds[1], F_SETFL, O_NONBLOCK) == 0); - uint64_t rc; + uint64_t return_value; do { const bool empty = target_buffer.length() == 0; const std::string previous_data = target_buffer.toString(); const auto result = io_handle.write(target_buffer); FUZZ_ASSERT(result.ok()); - rc = result.rc_; - ENVOY_LOG_MISC(trace, "Write rc: {} errno: {}", rc, + return_value = result.return_value_; + ENVOY_LOG_MISC(trace, "Write return_value: {} errno: {}", return_value, result.err_ != nullptr ? result.err_->getErrorDetails() : "-"); if (empty) { - FUZZ_ASSERT(rc == 0); + FUZZ_ASSERT(return_value == 0); } else { - auto buf = std::make_unique(rc); - FUZZ_ASSERT(static_cast(::read(pipe_fds[0], buf.get(), rc)) == rc); - FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), rc) == 0); + auto buf = std::make_unique(return_value); + FUZZ_ASSERT(static_cast(::read(pipe_fds[0], buf.get(), return_value)) == + return_value); + FUZZ_ASSERT(::memcmp(buf.get(), previous_data.data(), return_value) == 0); } - } while (rc > 0); + } while (return_value > 0); FUZZ_ASSERT(::close(pipe_fds[0]) == 0); break; } diff --git a/test/common/buffer/owned_impl_test.cc b/test/common/buffer/owned_impl_test.cc index ea5cf249ae1dd..55a877a01370e 100644 --- a/test/common/buffer/owned_impl_test.cc +++ b/test/common/buffer/owned_impl_test.cc @@ -263,44 +263,44 @@ TEST_F(OwnedImplTest, Write) { EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{7, 0})); Api::IoCallUint64Result result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); - EXPECT_EQ(7, result.rc_); + EXPECT_EQ(7, result.return_value_); EXPECT_EQ(0, buffer.length()); buffer.add("example"); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{6, 0})); result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); - EXPECT_EQ(6, result.rc_); + EXPECT_EQ(6, result.return_value_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); result = io_handle.write(buffer); EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)) .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = io_handle.write(buffer); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(1, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{1, 0})); result = io_handle.write(buffer); EXPECT_TRUE(result.ok()); - EXPECT_EQ(1, result.rc_); + EXPECT_EQ(1, result.return_value_); EXPECT_EQ(0, buffer.length()); EXPECT_CALL(os_sys_calls, writev(_, _, _)).Times(0); result = io_handle.write(buffer); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(0, buffer.length()); } @@ -313,14 +313,14 @@ TEST_F(OwnedImplTest, Read) { EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{0, 0})); Api::IoCallUint64Result result = io_handle.read(buffer, 100); EXPECT_TRUE(result.ok()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).WillOnce(Return(Api::SysCallSizeResult{-1, 0})); result = io_handle.read(buffer, 100); EXPECT_EQ(Api::IoError::IoErrorCode::UnknownError, result.err_->getErrorCode()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); @@ -328,13 +328,13 @@ TEST_F(OwnedImplTest, Read) { .WillOnce(Return(Api::SysCallSizeResult{-1, SOCKET_ERROR_AGAIN})); result = io_handle.read(buffer, 100); EXPECT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); EXPECT_CALL(os_sys_calls, readv(_, _, _)).Times(0); result = io_handle.read(buffer, 0); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); EXPECT_EQ(0, buffer.length()); EXPECT_THAT(buffer.describeSlicesForTest(), testing::IsEmpty()); } @@ -1151,21 +1151,21 @@ TEST_F(OwnedImplTest, ReserveZeroCommit) { os_fd_t pipe_fds[2] = {0, 0}; auto& os_sys_calls = Api::OsSysCallsSingleton::get(); #ifdef WIN32 - ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_, 0); + ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).return_value_, 0); #else ASSERT_EQ(pipe(pipe_fds), 0); #endif Network::IoSocketHandleImpl io_handle(pipe_fds[0]); - ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).rc_, 0); - ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).rc_, 0); + ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).return_value_, 0); + ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).return_value_, 0); const uint32_t max_length = 1953; std::string data(max_length, 'e'); - const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), max_length).rc_; + const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), max_length).return_value_; ASSERT_GT(rc, 0); const uint32_t previous_length = buf.length(); Api::IoCallUint64Result result = io_handle.read(buf, max_length); - ASSERT_EQ(result.rc_, static_cast(rc)); - ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); + ASSERT_EQ(result.return_value_, static_cast(rc)); + ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).return_value_, 0); ASSERT_EQ(previous_length, buf.search(data.data(), rc, previous_length, 0)); EXPECT_EQ("bbbbb", buf.toString().substr(0, 5)); expectSlices({{5, 0, 4096}, {1953, 14431, 16384}}, buf); @@ -1179,21 +1179,21 @@ TEST_F(OwnedImplTest, ReadReserveAndCommit) { os_fd_t pipe_fds[2] = {0, 0}; auto& os_sys_calls = Api::OsSysCallsSingleton::get(); #ifdef WIN32 - ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_, 0); + ASSERT_EQ(os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).return_value_, 0); #else ASSERT_EQ(pipe(pipe_fds), 0); #endif Network::IoSocketHandleImpl io_handle(pipe_fds[0]); - ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).rc_, 0); - ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).rc_, 0); + ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[0], false).return_value_, 0); + ASSERT_EQ(os_sys_calls.setsocketblocking(pipe_fds[1], false).return_value_, 0); const uint32_t read_length = 32768; std::string data = "e"; - const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), data.size()).rc_; + const ssize_t rc = os_sys_calls.write(pipe_fds[1], data.data(), data.size()).return_value_; ASSERT_GT(rc, 0); Api::IoCallUint64Result result = io_handle.read(buf, read_length); - ASSERT_EQ(result.rc_, static_cast(rc)); - ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).rc_, 0); + ASSERT_EQ(result.return_value_, static_cast(rc)); + ASSERT_EQ(os_sys_calls.close(pipe_fds[1]).return_value_, 0); EXPECT_EQ("bbbbbe", buf.toString()); expectSlices({{6, 4090, 4096}}, buf); } diff --git a/test/common/buffer/watermark_buffer_test.cc b/test/common/buffer/watermark_buffer_test.cc index 548318a8065ab..071d9f1169067 100644 --- a/test/common/buffer/watermark_buffer_test.cc +++ b/test/common/buffer/watermark_buffer_test.cc @@ -247,7 +247,7 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { os_fd_t pipe_fds[2] = {0, 0}; #ifdef WIN32 auto& os_sys_calls = Api::OsSysCallsSingleton::get(); - ASSERT_EQ(0, os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).rc_); + ASSERT_EQ(0, os_sys_calls.socketpair(AF_INET, SOCK_STREAM, 0, pipe_fds).return_value_); #else ASSERT_EQ(0, pipe(pipe_fds)); #endif @@ -264,7 +264,7 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { if (!result.ok()) { ASSERT_EQ(Api::IoError::IoErrorCode::Again, result.err_->getErrorCode()); } else { - bytes_written_total += result.rc_; + bytes_written_total += result.return_value_; } } EXPECT_EQ(1, times_high_watermark_called_); @@ -275,7 +275,7 @@ TEST_F(WatermarkBufferTest, WatermarkFdFunctions) { Network::IoSocketHandleImpl io_handle2(pipe_fds[0]); while (bytes_read_total < 20) { Api::IoCallUint64Result result = io_handle2.read(buffer_, 20); - bytes_read_total += result.rc_; + bytes_read_total += result.return_value_; } EXPECT_EQ(2, times_high_watermark_called_); EXPECT_EQ(20, buffer_.length()); diff --git a/test/common/event/dispatcher_impl_test.cc b/test/common/event/dispatcher_impl_test.cc index 240a9485196fd..7538a8c495c07 100644 --- a/test/common/event/dispatcher_impl_test.cc +++ b/test/common/event/dispatcher_impl_test.cc @@ -1534,7 +1534,7 @@ TEST_F(DispatcherWithWatchdogTest, TouchBeforeTimer) { } TEST_F(DispatcherWithWatchdogTest, TouchBeforeFdEvent) { - os_fd_t fd = os_sys_calls_.socket(AF_INET6, SOCK_DGRAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(AF_INET6, SOCK_DGRAM, 0).return_value_; ASSERT_TRUE(SOCKET_VALID(fd)); ReadyWatcher watcher; diff --git a/test/common/event/file_event_impl_test.cc b/test/common/event/file_event_impl_test.cc index 67986afab8da6..6bcd002d3c390 100644 --- a/test/common/event/file_event_impl_test.cc +++ b/test/common/event/file_event_impl_test.cc @@ -25,14 +25,14 @@ class FileEventImplTest : public testing::Test { void SetUp() override { #ifdef WIN32 - ASSERT_EQ(0, os_sys_calls_.socketpair(AF_INET, SOCK_STREAM, 0, fds_).rc_); + ASSERT_EQ(0, os_sys_calls_.socketpair(AF_INET, SOCK_STREAM, 0, fds_).return_value_); #else - ASSERT_EQ(0, os_sys_calls_.socketpair(AF_UNIX, SOCK_DGRAM, 0, fds_).rc_); + ASSERT_EQ(0, os_sys_calls_.socketpair(AF_UNIX, SOCK_DGRAM, 0, fds_).return_value_); #endif int data = 1; const Api::SysCallSizeResult result = os_sys_calls_.write(fds_[1], &data, sizeof(data)); - ASSERT_EQ(sizeof(data), static_cast(result.rc_)); + ASSERT_EQ(sizeof(data), static_cast(result.return_value_)); } void clearReadable() { @@ -42,8 +42,8 @@ class FileEventImplTest : public testing::Test { buffer, sizeof(buffer) }; const Api::SysCallSizeResult result = os_sys_calls_.readv(fds_[0], &vec, 1); - EXPECT_LT(0, static_cast(result.rc_)); - EXPECT_GT(sizeof(buffer), static_cast(result.rc_)); + EXPECT_LT(0, static_cast(result.return_value_)); + EXPECT_GT(sizeof(buffer), static_cast(result.return_value_)); } void TearDown() override { @@ -79,7 +79,7 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, FileEventImplActivateTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest())); TEST_P(FileEventImplActivateTest, Activate) { - os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_STREAM, 0).return_value_; ASSERT_TRUE(SOCKET_VALID(fd)); Api::ApiPtr api = Api::createApiForTest(); @@ -117,7 +117,7 @@ TEST_P(FileEventImplActivateTest, Activate) { } TEST_P(FileEventImplActivateTest, ActivateChaining) { - os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_DGRAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_DGRAM, 0).return_value_; ASSERT_TRUE(SOCKET_VALID(fd)); Api::ApiPtr api = Api::createApiForTest(); @@ -181,7 +181,7 @@ TEST_P(FileEventImplActivateTest, ActivateChaining) { } TEST_P(FileEventImplActivateTest, SetEnableCancelsActivate) { - os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_DGRAM, 0).rc_; + os_fd_t fd = os_sys_calls_.socket(domain(), SOCK_DGRAM, 0).return_value_; ASSERT_TRUE(SOCKET_VALID(fd)); Api::ApiPtr api = Api::createApiForTest(); diff --git a/test/common/filesystem/filesystem_impl_test.cc b/test/common/filesystem/filesystem_impl_test.cc index fe21296388147..a079d2e422e25 100644 --- a/test/common/filesystem/filesystem_impl_test.cc +++ b/test/common/filesystem/filesystem_impl_test.cc @@ -112,13 +112,15 @@ TEST_F(FileSystemImplTest, FileReadToEndDenylisted) { } #ifndef WIN32 -TEST_F(FileSystemImplTest, CanonicalPathSuccess) { EXPECT_EQ("/", canonicalPath("//").rc_); } +TEST_F(FileSystemImplTest, CanonicalPathSuccess) { + EXPECT_EQ("/", canonicalPath("//").return_value_); +} #endif #ifndef WIN32 TEST_F(FileSystemImplTest, CanonicalPathFail) { const Api::SysCallStringResult result = canonicalPath("/_some_non_existent_file"); - EXPECT_TRUE(result.rc_.empty()); + EXPECT_TRUE(result.return_value_.empty()); EXPECT_EQ("No such file or directory", errorDetails(result.errno_)); } #endif @@ -262,7 +264,7 @@ TEST_F(FileSystemImplTest, Open) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult result = file->open(DefaultFlags); - EXPECT_TRUE(result.rc_); + EXPECT_TRUE(result.return_value_); EXPECT_TRUE(file->isOpen()); } @@ -276,7 +278,7 @@ TEST_F(FileSystemImplTest, OpenReadOnly) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult result = file->open(ReadOnlyFlags); - EXPECT_TRUE(result.rc_); + EXPECT_TRUE(result.return_value_); EXPECT_TRUE(file->isOpen()); } @@ -290,13 +292,13 @@ TEST_F(FileSystemImplTest, OpenTwice) { const Api::IoCallBoolResult result1 = file->open(DefaultFlags); const filesystem_os_id_t initial_fd = getFd(file.get()); - EXPECT_TRUE(result1.rc_); + EXPECT_TRUE(result1.return_value_); EXPECT_TRUE(file->isOpen()); // check that we don't leak a file descriptor const Api::IoCallBoolResult result2 = file->open(DefaultFlags); EXPECT_EQ(initial_fd, getFd(file.get())); - EXPECT_TRUE(result2.rc_); + EXPECT_TRUE(result2.return_value_); EXPECT_TRUE(file->isOpen()); } @@ -304,7 +306,7 @@ TEST_F(FileSystemImplTest, OpenBadFilePath) { FilePathAndType new_file_info{Filesystem::DestinationType::File, ""}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult result = file->open(DefaultFlags); - EXPECT_FALSE(result.rc_); + EXPECT_FALSE(result.return_value_); } TEST_F(FileSystemImplTest, ExistingFile) { @@ -315,10 +317,10 @@ TEST_F(FileSystemImplTest, ExistingFile) { FilePathAndType new_file_info{Filesystem::DestinationType::File, file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult open_result = file->open(DefaultFlags); - EXPECT_TRUE(open_result.rc_); + EXPECT_TRUE(open_result.return_value_); std::string data(" new data"); const Api::IoCallSizeResult result = file->write(data); - EXPECT_EQ(data.length(), result.rc_); + EXPECT_EQ(data.length(), result.return_value_); } auto contents = TestEnvironment::readFileToStringForTest(file_path); @@ -333,10 +335,10 @@ TEST_F(FileSystemImplTest, NonExistingFile) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult open_result = file->open(DefaultFlags); - EXPECT_TRUE(open_result.rc_); + EXPECT_TRUE(open_result.return_value_); std::string data(" new data"); const Api::IoCallSizeResult result = file->write(data); - EXPECT_EQ(data.length(), result.rc_); + EXPECT_EQ(data.length(), result.return_value_); } auto contents = TestEnvironment::readFileToStringForTest(new_file_path); @@ -347,22 +349,24 @@ TEST_F(FileSystemImplTest, StdOut) { FilePathAndType file_info{Filesystem::DestinationType::Stdout, ""}; FilePtr file = file_system_.createFile(file_info); const Api::IoCallBoolResult open_result = file->open(DefaultFlags); - EXPECT_TRUE(open_result.rc_); + EXPECT_TRUE(open_result.return_value_); EXPECT_TRUE(file->isOpen()); std::string data(" new data\n"); const Api::IoCallSizeResult result = file->write(data); - EXPECT_EQ(data.length(), result.rc_) << fmt::format("{}", result.err_->getErrorDetails()); + EXPECT_EQ(data.length(), result.return_value_) + << fmt::format("{}", result.err_->getErrorDetails()); } TEST_F(FileSystemImplTest, StdErr) { FilePathAndType file_info{Filesystem::DestinationType::Stderr, ""}; FilePtr file = file_system_.createFile(file_info); const Api::IoCallBoolResult open_result = file->open(DefaultFlags); - EXPECT_TRUE(open_result.rc_) << fmt::format("{}", open_result.err_->getErrorDetails()); + EXPECT_TRUE(open_result.return_value_) << fmt::format("{}", open_result.err_->getErrorDetails()); EXPECT_TRUE(file->isOpen()); std::string data(" new data\n"); const Api::IoCallSizeResult result = file->write(data); - EXPECT_EQ(data.length(), result.rc_) << fmt::format("{}", result.err_->getErrorDetails()); + EXPECT_EQ(data.length(), result.return_value_) + << fmt::format("{}", result.err_->getErrorDetails()); } #ifdef WIN32 @@ -375,7 +379,7 @@ TEST_F(FileSystemImplTest, Win32InvalidHandleThrows) { auto original_handle = GetStdHandle(STD_OUTPUT_HANDLE); EXPECT_TRUE(SetStdHandle(STD_OUTPUT_HANDLE, NULL)); const Api::IoCallBoolResult result = file->open(DefaultFlags); - EXPECT_FALSE(result.rc_); + EXPECT_FALSE(result.return_value_); EXPECT_TRUE(SetStdHandle(STD_OUTPUT_HANDLE, original_handle)); } #endif @@ -387,11 +391,11 @@ TEST_F(FileSystemImplTest, Close) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult result1 = file->open(DefaultFlags); - EXPECT_TRUE(result1.rc_); + EXPECT_TRUE(result1.return_value_); EXPECT_TRUE(file->isOpen()); const Api::IoCallBoolResult result2 = file->close(); - EXPECT_TRUE(result2.rc_); + EXPECT_TRUE(result2.return_value_); EXPECT_FALSE(file->isOpen()); } @@ -402,11 +406,11 @@ TEST_F(FileSystemImplTest, WriteAfterClose) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult bool_result1 = file->open(DefaultFlags); - EXPECT_TRUE(bool_result1.rc_); + EXPECT_TRUE(bool_result1.return_value_); const Api::IoCallBoolResult bool_result2 = file->close(); - EXPECT_TRUE(bool_result2.rc_); + EXPECT_TRUE(bool_result2.return_value_); const Api::IoCallSizeResult size_result = file->write(" new data"); - EXPECT_EQ(-1, size_result.rc_); + EXPECT_EQ(-1, size_result.return_value_); EXPECT_EQ(IoFileError::IoErrorCode::BadFd, size_result.err_->getErrorCode()); } @@ -418,7 +422,7 @@ TEST_F(FileSystemImplTest, NonExistingFileAndReadOnly) { FilePathAndType new_file_info{Filesystem::DestinationType::File, new_file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult open_result = file->open(flag); - EXPECT_FALSE(open_result.rc_); + EXPECT_FALSE(open_result.return_value_); } TEST_F(FileSystemImplTest, ExistingReadOnlyFileAndWrite) { @@ -430,10 +434,10 @@ TEST_F(FileSystemImplTest, ExistingReadOnlyFileAndWrite) { FilePathAndType new_file_info{Filesystem::DestinationType::File, file_path}; FilePtr file = file_system_.createFile(new_file_info); const Api::IoCallBoolResult open_result = file->open(flag); - EXPECT_TRUE(open_result.rc_); + EXPECT_TRUE(open_result.return_value_); std::string data(" new data"); const Api::IoCallSizeResult result = file->write(data); - EXPECT_TRUE(result.rc_ < 0); + EXPECT_TRUE(result.return_value_ < 0); #ifdef WIN32 EXPECT_EQ(IoFileError::IoErrorCode::Permission, result.err_->getErrorCode()); #else diff --git a/test/common/network/address_impl_test.cc b/test/common/network/address_impl_test.cc index 3eb42caf7fd27..76f7e9e4c42f8 100644 --- a/test/common/network/address_impl_test.cc +++ b/test/common/network/address_impl_test.cc @@ -56,18 +56,21 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl if (addr_port->ip()->version() == IpVersion::v6) { int socket_v6only = 0; socklen_t size_int = sizeof(socket_v6only); - ASSERT_GE(sock.getSocketOption(IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int).rc_, 0); + ASSERT_GE( + sock.getSocketOption(IPPROTO_IPV6, IPV6_V6ONLY, &socket_v6only, &size_int).return_value_, + 0); EXPECT_EQ(v6only, socket_v6only != 0); } // Bind the socket to the desired address and port. const Api::SysCallIntResult result = sock.bind(addr_port); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) - << "\nerrno: " << result.errno_; + ASSERT_EQ(result.return_value_, 0) + << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) + << "\nerrno: " << result.errno_; // Do a bare listen syscall. Not bothering to accept connections as that would // require another thread. - ASSERT_EQ(sock.listen(128).rc_, 0); + ASSERT_EQ(sock.listen(128).return_value_, 0); auto client_connect = [](Address::InstanceConstSharedPtr addr_port) { // Create a client socket and connect to the server. @@ -79,12 +82,13 @@ void testSocketBindAndConnect(Network::Address::IpVersion ip_version, bool v6onl // operation of ::connect(), so connect returns with errno==EWOULDBLOCK before the tcp // handshake can complete. For testing convenience, re-enable blocking on the socket // so that connect will wait for the handshake to complete. - ASSERT_EQ(client_sock.setBlockingForTest(true).rc_, 0); + ASSERT_EQ(client_sock.setBlockingForTest(true).return_value_, 0); // Connect to the server. const Api::SysCallIntResult result = client_sock.connect(addr_port); - ASSERT_EQ(result.rc_, 0) << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) - << "\nerrno: " << result.errno_; + ASSERT_EQ(result.return_value_, 0) + << addr_port->asString() << "\nerror: " << errorDetails(result.errno_) + << "\nerrno: " << result.errno_; }; auto client_addr_port = Network::Utility::parseInternetAddressAndPort( @@ -349,13 +353,14 @@ TEST(PipeInstanceTest, BasicPermission) { EXPECT_TRUE(sock.ioHandle().isOpen()) << pipe.asString(); Api::SysCallIntResult result = sock.bind(address); - ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) - << "\terrno: " << result.errno_; + ASSERT_EQ(result.return_value_, 0) + << pipe.asString() << "\nerror: " << errorDetails(result.errno_) + << "\terrno: " << result.errno_; Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); struct stat stat_buf; result = os_sys_calls.stat(path.c_str(), &stat_buf); - EXPECT_EQ(result.rc_, 0); + EXPECT_EQ(result.return_value_, 0); // Get file permissions bits ASSERT_EQ(stat_buf.st_mode & 07777, mode) << path << std::oct << "\t" << (stat_buf.st_mode & 07777) << std::dec << "\t" @@ -449,8 +454,9 @@ TEST(PipeInstanceTest, UnlinksExistingFile) { const Api::SysCallIntResult result = sock.bind(address); - ASSERT_EQ(result.rc_, 0) << pipe.asString() << "\nerror: " << errorDetails(result.errno_) - << "\nerrno: " << result.errno_; + ASSERT_EQ(result.return_value_, 0) + << pipe.asString() << "\nerror: " << errorDetails(result.errno_) + << "\nerrno: " << result.errno_; }; const std::string path = TestEnvironment::unixDomainSocketPath("UnlinksExistingFile.sock"); diff --git a/test/common/network/io_socket_handle_impl_integration_test.cc b/test/common/network/io_socket_handle_impl_integration_test.cc index 31d8ac67de946..d438a636d19f6 100644 --- a/test/common/network/io_socket_handle_impl_integration_test.cc +++ b/test/common/network/io_socket_handle_impl_integration_test.cc @@ -26,7 +26,7 @@ TEST(IoSocketHandleImplIntegration, LastRoundTripIntegrationTest) { Address::InstanceConstSharedPtr addr(new Address::Ipv4Instance(&server)); auto socket_ = std::make_shared(addr, nullptr); socket_->setBlockingForTest(true); - EXPECT_TRUE(socket_->connect(addr).rc_ == 0); + EXPECT_TRUE(socket_->connect(addr).return_value_ == 0); EXPECT_TRUE(socket_->ioHandle().lastRoundTripTime() != absl::nullopt); } diff --git a/test/common/network/listen_socket_impl_test.cc b/test/common/network/listen_socket_impl_test.cc index 5c78af7a2e2c5..5468031588d41 100644 --- a/test/common/network/listen_socket_impl_test.cc +++ b/test/common/network/listen_socket_impl_test.cc @@ -96,7 +96,7 @@ class ListenSocketImplTest : public testing::TestWithParam { // instead of if block. auto os_sys_calls = Api::OsSysCallsSingleton::get(); if (NetworkSocketTrait::type == Socket::Type::Stream) { - EXPECT_EQ(0, socket1->listen(0).rc_); + EXPECT_EQ(0, socket1->listen(0).return_value_); } EXPECT_EQ(addr->ip()->port(), socket1->addressProvider().localAddress()->ip()->port()); @@ -118,8 +118,9 @@ class ListenSocketImplTest : public testing::TestWithParam { // Test createListenSocketPtr from IoHandlePtr's os_fd_t constructor int domain = version_ == Address::IpVersion::v4 ? AF_INET : AF_INET6; auto socket_result = os_sys_calls.socket(domain, SOCK_STREAM, 0); - EXPECT_TRUE(SOCKET_VALID(socket_result.rc_)); - Network::IoHandlePtr io_handle = std::make_unique(socket_result.rc_); + EXPECT_TRUE(SOCKET_VALID(socket_result.return_value_)); + Network::IoHandlePtr io_handle = + std::make_unique(socket_result.return_value_); auto socket3 = createListenSocketPtr(std::move(io_handle), addr, nullptr); EXPECT_EQ(socket3->addressProvider().localAddress()->asString(), addr->asString()); diff --git a/test/common/network/socket_option_impl_test.cc b/test/common/network/socket_option_impl_test.cc index 2bbf4eba6400e..c2736caed50d9 100644 --- a/test/common/network/socket_option_impl_test.cc +++ b/test/common/network/socket_option_impl_test.cc @@ -12,7 +12,7 @@ TEST_F(SocketOptionImplTest, BadFd) { absl::string_view zero("\0\0\0\0", 4); Api::SysCallIntResult result = SocketOptionImpl::setSocketOption(socket_, {}, zero.data(), zero.size()); - EXPECT_EQ(-1, result.rc_); + EXPECT_EQ(-1, result.return_value_); EXPECT_EQ(SOCKET_ERROR_NOT_SUP, result.errno_); } diff --git a/test/common/network/socket_option_test.h b/test/common/network/socket_option_test.h index c2a60bc1bf372..a13a9a76b61c9 100644 --- a/test/common/network/socket_option_test.h +++ b/test/common/network/socket_option_test.h @@ -42,13 +42,15 @@ class SocketOptionTest : public testing::Test { .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t sockfd, int level, int optname, const void* optval, socklen_t optlen) -> int { - return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.setsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls_, getsockopt_(_, _, _, _, _)) .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls_, getsockname(_, _, _)) .Times(AnyNumber()) diff --git a/test/common/network/udp_listener_impl_batch_writer_test.cc b/test/common/network/udp_listener_impl_batch_writer_test.cc index 817553ef10184..7e6f1ec296242 100644 --- a/test/common/network/udp_listener_impl_batch_writer_test.cc +++ b/test/common/network/udp_listener_impl_batch_writer_test.cc @@ -113,7 +113,7 @@ TEST_P(UdpListenerImplBatchWriterTest, SendData) { auto send_result = listener_->send(send_data); EXPECT_TRUE(send_result.ok()) << "send() failed : " << send_result.err_->getErrorDetails(); - EXPECT_EQ(send_result.rc_, payload.length()); + EXPECT_EQ(send_result.return_value_, payload.length()); // Verify udp_packet_writer stats for batch writing if (internal_buffer.length() == 0 || /* internal buffer is empty*/ @@ -202,7 +202,7 @@ TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { auto send_result = listener_->send(initial_send_data); internal_buffer.append(initial_payload); EXPECT_TRUE(send_result.ok()); - EXPECT_EQ(send_result.rc_, initial_payload.length()); + EXPECT_EQ(send_result.return_value_, initial_payload.length()); EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); EXPECT_EQ(listener_config_.listenerScope() .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) @@ -229,12 +229,12 @@ TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { // The following payload should get buffered if it is // shorter than initial payload EXPECT_TRUE(send_result.ok()); - EXPECT_EQ(send_result.rc_, following_payload.length()); + EXPECT_EQ(send_result.return_value_, following_payload.length()); EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); internal_buffer.append(following_payload); } else { EXPECT_FALSE(send_result.ok()); - EXPECT_EQ(send_result.rc_, 0); + EXPECT_EQ(send_result.return_value_, 0); } EXPECT_TRUE(udp_packet_writer_->isWriteBlocked()); EXPECT_EQ(listener_config_.listenerScope().counterFromString("total_bytes_sent").value(), @@ -253,7 +253,7 @@ TEST_P(UdpListenerImplBatchWriterTest, WriteBlocked) { })); auto flush_result = udp_packet_writer_->flush(); EXPECT_TRUE(flush_result.ok()); - EXPECT_EQ(flush_result.rc_, 0); + EXPECT_EQ(flush_result.return_value_, 0); EXPECT_FALSE(udp_packet_writer_->isWriteBlocked()); EXPECT_EQ(listener_config_.listenerScope() .gaugeFromString("internal_buffer_size", Stats::Gauge::ImportMode::NeverImport) diff --git a/test/common/network/udp_listener_impl_test.cc b/test/common/network/udp_listener_impl_test.cc index 1561d4ba05b6d..21e640de9c5a3 100644 --- a/test/common/network/udp_listener_impl_test.cc +++ b/test/common/network/udp_listener_impl_test.cc @@ -85,7 +85,7 @@ class UdpListenerImplTest : public UdpListenerImplTestBase { socklen_t int_size = static_cast(sizeof(get_recvbuf_size)); const Api::SysCallIntResult result2 = server_socket_->getSocketOption(SOL_SOCKET, SO_RCVBUF, &get_recvbuf_size, &int_size); - EXPECT_EQ(0, result2.rc_); + EXPECT_EQ(0, result2.return_value_); // Kernel increases the buffer size to allow bookkeeping overhead. if (get_recvbuf_size < 4 * 1024 * 1024) { recvbuf_large_enough_ = false; @@ -318,14 +318,14 @@ TEST_P(UdpListenerImplTest, UdpEcho) { 1, nullptr, *test_peer_address); if (send_rc.ok()) { - total_sent += send_rc.rc_; + total_sent += send_rc.return_value_; if (total_sent >= data_size) { break; } } else if (send_rc.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { break; } - } while (((send_rc.rc_ == 0) && + } while (((send_rc.return_value_ == 0) && (send_rc.err_->getErrorCode() == Api::IoError::IoErrorCode::Again)) || (total_sent < data_size)); @@ -460,7 +460,7 @@ TEST_P(UdpListenerImplTest, SendData) { // Verify External Flush is a No-op auto flush_result = udp_packet_writer_->flush(); EXPECT_TRUE(flush_result.ok()); - EXPECT_EQ(0, flush_result.rc_); + EXPECT_EQ(0, flush_result.return_value_); } /** diff --git a/test/common/quic/active_quic_listener_test.cc b/test/common/quic/active_quic_listener_test.cc index 100e60657e1c4..13da84735c546 100644 --- a/test/common/quic/active_quic_listener_test.cc +++ b/test/common/quic/active_quic_listener_test.cc @@ -219,7 +219,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { auto send_rc = Network::Utility::writeToSocket(client_sockets_.back()->ioHandle(), slice.data(), 1, nullptr, *listen_socket_->addressProvider().localAddress()); - ASSERT_EQ(slice[0].len_, send_rc.rc_); + ASSERT_EQ(slice[0].len_, send_rc.return_value_); #if defined(__APPLE__) // This sleep makes the tests pass more reliably. Some debugging showed that without this, @@ -242,7 +242,7 @@ class ActiveQuicListenerTest : public QuicMultiVersionTest { client_socket->ioHandle().read(*result_buffer, bytes_to_read - bytes_read); if (result.ok()) { - bytes_read += result.rc_; + bytes_read += result.return_value_; } else if (retry == 10 || result.err_->getErrorCode() != Api::IoError::IoErrorCode::Again) { break; } diff --git a/test/common/quic/platform/quic_test_output_impl.cc b/test/common/quic/platform/quic_test_output_impl.cc index 94290fffdcde4..45da4d97d7661 100644 --- a/test/common/quic/platform/quic_test_output_impl.cc +++ b/test/common/quic/platform/quic_test_output_impl.cc @@ -51,12 +51,12 @@ void quicRecordTestOutputToFile(const std::string& filename, absl::string_view d Envoy::Filesystem::FilePathAndType new_file_info{Envoy::Filesystem::DestinationType::File, output_path}; Envoy::Filesystem::FilePtr file = file_system.createFile(new_file_info); - if (!file->open(DefaultFlags).rc_) { + if (!file->open(DefaultFlags).return_value_) { QUIC_LOG(ERROR) << "Failed to open test output file: " << output_path; return; } - if (file->write(data).rc_ != static_cast(data.size())) { + if (file->write(data).return_value_ != static_cast(data.size())) { QUIC_LOG(ERROR) << "Failed to write to test output file: " << output_path; } else { QUIC_LOG(INFO) << "Recorded test output into " << output_path; diff --git a/test/extensions/filters/listener/common/fuzz/fuzzed_input_test.cc b/test/extensions/filters/listener/common/fuzz/fuzzed_input_test.cc index 54c3cb7314e7a..856980e1fc638 100644 --- a/test/extensions/filters/listener/common/fuzz/fuzzed_input_test.cc +++ b/test/extensions/filters/listener/common/fuzz/fuzzed_input_test.cc @@ -27,19 +27,19 @@ TEST(FuzzedInputStream, OneRead) { std::array read_data; // Test peeking - EXPECT_EQ(data.read(read_data.data(), 5, true).rc_, 5); + EXPECT_EQ(data.read(read_data.data(), 5, true).return_value_, 5); EXPECT_EQ(data.size(), 5); // Test length > data.size() - EXPECT_EQ(data.read(read_data.data(), 10, true).rc_, 5); + EXPECT_EQ(data.read(read_data.data(), 10, true).return_value_, 5); EXPECT_EQ(data.size(), 5); // Test non-peeking - EXPECT_EQ(data.read(read_data.data(), 3, false).rc_, 3); + EXPECT_EQ(data.read(read_data.data(), 3, false).return_value_, 3); EXPECT_EQ(data.size(), 2); // Test reaching end-of-stream - EXPECT_EQ(data.read(read_data.data(), 5, false).rc_, 2); + EXPECT_EQ(data.read(read_data.data(), 5, false).return_value_, 2); EXPECT_EQ(data.size(), 0); } @@ -54,7 +54,7 @@ TEST(FuzzedInputStream, MultipleReads) { std::array read_data; // Test peeking (first read) - EXPECT_EQ(data.read(read_data.data(), 5, true).rc_, 2); + EXPECT_EQ(data.read(read_data.data(), 5, true).return_value_, 2); EXPECT_EQ(data.size(), 2); data.next(); @@ -62,7 +62,7 @@ TEST(FuzzedInputStream, MultipleReads) { EXPECT_EQ(data.size(), 4); // Test non-peeking (second read) - EXPECT_EQ(data.read(read_data.data(), 3, false).rc_, 3); + EXPECT_EQ(data.read(read_data.data(), 3, false).return_value_, 3); EXPECT_EQ(data.size(), 1); data.next(); @@ -70,7 +70,7 @@ TEST(FuzzedInputStream, MultipleReads) { EXPECT_EQ(data.size(), 2); // Test non-peeking (third read) and reaching end-of-stream - EXPECT_EQ(data.read(read_data.data(), 5, false).rc_, 2); + EXPECT_EQ(data.read(read_data.data(), 5, false).return_value_, 2); EXPECT_EQ(data.size(), 0); } diff --git a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc index e9cc8f02ec022..5de9c4c9e4e7e 100644 --- a/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc +++ b/test/extensions/filters/listener/proxy_protocol/proxy_protocol_test.cc @@ -335,7 +335,8 @@ TEST_P(ProxyProtocolTest, ErrorRecv_2) { .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls, getsockname(_, _, _)) .Times(AnyNumber()) @@ -395,7 +396,8 @@ TEST_P(ProxyProtocolTest, ErrorRecv_1) { .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls, getsockname(_, _, _)) .Times(AnyNumber()) @@ -605,7 +607,7 @@ TEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) { .Times(AnyNumber()) .WillRepeatedly(Invoke([this](os_fd_t fd, void* buf, size_t n, int flags) { const Api::SysCallSizeResult x = os_sys_calls_actual_.recv(fd, buf, n, flags); - if (x.rc_ == sizeof(tlv)) { + if (x.return_value_ == sizeof(tlv)) { return Api::SysCallSizeResult{-1, 0}; } else { return x; @@ -630,7 +632,8 @@ TEST_P(ProxyProtocolTest, V2ParseExtensionsRecvError) { .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls, getsockname(_, _, _)) .Times(AnyNumber()) @@ -782,7 +785,8 @@ TEST_P(ProxyProtocolTest, V2Fragmented3Error) { .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls, getsockname(_, _, _)) .Times(AnyNumber()) @@ -848,7 +852,8 @@ TEST_P(ProxyProtocolTest, V2Fragmented4Error) { .Times(AnyNumber()) .WillRepeatedly(Invoke( [this](os_fd_t sockfd, int level, int optname, void* optval, socklen_t* optlen) -> int { - return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen).rc_; + return os_sys_calls_actual_.getsockopt(sockfd, level, optname, optval, optlen) + .return_value_; })); EXPECT_CALL(os_sys_calls, getsockname(_, _, _)) .Times(AnyNumber()) diff --git a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc index 28a2c6fac665a..db9d906fff1fd 100644 --- a/test/extensions/filters/udp/dns_filter/dns_filter_test.cc +++ b/test/extensions/filters/udp/dns_filter/dns_filter_test.cc @@ -32,7 +32,7 @@ namespace { Api::IoCallUint64Result makeNoError(uint64_t rc) { auto no_error = Api::ioCallUint64ResultNoError(); - no_error.rc_ = rc; + no_error.return_value_ = rc; return no_error; } diff --git a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc index 8e56df53c6cec..f59a51f7a833c 100644 --- a/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/udp_proxy_filter_test.cc @@ -42,7 +42,7 @@ class TestUdpProxyFilter : public UdpProxyFilter { Api::IoCallUint64Result makeNoError(uint64_t rc) { auto no_error = Api::ioCallUint64ResultNoError(); - no_error.rc_ = rc; + no_error.return_value_ = rc; return no_error; } diff --git a/test/extensions/io_socket/user_space/io_handle_impl_test.cc b/test/extensions/io_socket/user_space/io_handle_impl_test.cc index 83f8cd0ee3e72..fab734bb395d6 100644 --- a/test/extensions/io_socket/user_space/io_handle_impl_test.cc +++ b/test/extensions/io_socket/user_space/io_handle_impl_test.cc @@ -61,8 +61,8 @@ TEST_F(IoHandleImplTest, BasicRecv) { io_handle_peer_->write(buf_to_write); { auto result = io_handle_->recv(buf_.data(), buf_.size(), 0); - ASSERT_EQ(10, result.rc_); - ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.rc_)); + ASSERT_EQ(10, result.return_value_); + ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.return_value_)); } { auto result = io_handle_->recv(buf_.data(), buf_.size(), 0); @@ -84,22 +84,22 @@ TEST_F(IoHandleImplTest, RecvPeek) { { ::memset(buf_.data(), 1, buf_.size()); auto result = io_handle_->recv(buf_.data(), 5, MSG_PEEK); - ASSERT_EQ(5, result.rc_); - ASSERT_EQ("01234", absl::string_view(buf_.data(), result.rc_)); + ASSERT_EQ(5, result.return_value_); + ASSERT_EQ("01234", absl::string_view(buf_.data(), result.return_value_)); // The data beyond the boundary is untouched. ASSERT_EQ(std::string(buf_.size() - 5, 1), absl::string_view(buf_.data() + 5, buf_.size() - 5)); } { auto result = io_handle_->recv(buf_.data(), buf_.size(), MSG_PEEK); - ASSERT_EQ(10, result.rc_); - ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.rc_)); + ASSERT_EQ(10, result.return_value_); + ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.return_value_)); } { // Drain the pending buffer. auto recv_result = io_handle_->recv(buf_.data(), buf_.size(), 0); EXPECT_TRUE(recv_result.ok()); - EXPECT_EQ(10, recv_result.rc_); - ASSERT_EQ("0123456789", absl::string_view(buf_.data(), recv_result.rc_)); + EXPECT_EQ(10, recv_result.return_value_); + ASSERT_EQ("0123456789", absl::string_view(buf_.data(), recv_result.return_value_)); auto peek_result = io_handle_->recv(buf_.data(), buf_.size(), 0); // `EAGAIN`. EXPECT_FALSE(peek_result.ok()); @@ -109,7 +109,7 @@ TEST_F(IoHandleImplTest, RecvPeek) { // Peek upon shutdown. io_handle_->setWriteEnd(); auto result = io_handle_->recv(buf_.data(), buf_.size(), MSG_PEEK); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); ASSERT(result.ok()); } } @@ -118,8 +118,8 @@ TEST_F(IoHandleImplTest, RecvPeekWhenPendingDataButShutdown) { Buffer::OwnedImpl buf_to_write("0123456789"); io_handle_peer_->write(buf_to_write); auto result = io_handle_->recv(buf_.data(), buf_.size(), MSG_PEEK); - ASSERT_EQ(10, result.rc_); - ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.rc_)); + ASSERT_EQ(10, result.return_value_); + ASSERT_EQ("0123456789", absl::string_view(buf_.data(), result.return_value_)); } TEST_F(IoHandleImplTest, MultipleRecvDrain) { @@ -128,13 +128,13 @@ TEST_F(IoHandleImplTest, MultipleRecvDrain) { { auto result = io_handle_->recv(buf_.data(), 1, 0); EXPECT_TRUE(result.ok()); - EXPECT_EQ(1, result.rc_); + EXPECT_EQ(1, result.return_value_); EXPECT_EQ("a", absl::string_view(buf_.data(), 1)); } { auto result = io_handle_->recv(buf_.data(), buf_.size(), 0); EXPECT_TRUE(result.ok()); - EXPECT_EQ(3, result.rc_); + EXPECT_EQ(3, result.return_value_); EXPECT_EQ("bcd", absl::string_view(buf_.data(), 3)); EXPECT_EQ(0, io_handle_->getWriteBuffer()->length()); @@ -150,7 +150,7 @@ TEST_F(IoHandleImplTest, ReadEmpty) { io_handle_->setWriteEnd(); result = io_handle_->read(buf, 10); EXPECT_TRUE(result.ok()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); } // Read allows max_length value 0 and returns no error. @@ -159,7 +159,7 @@ TEST_F(IoHandleImplTest, ReadWhileProvidingNoCapacity) { absl::optional max_length_opt{0}; auto result = io_handle_->read(buf, max_length_opt); EXPECT_TRUE(result.ok()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); } // Test read side effects. @@ -170,12 +170,12 @@ TEST_F(IoHandleImplTest, ReadContent) { Buffer::OwnedImpl buf; auto result = io_handle_->read(buf, 3); EXPECT_TRUE(result.ok()); - EXPECT_EQ(3, result.rc_); + EXPECT_EQ(3, result.return_value_); ASSERT_EQ(3, buf.length()); ASSERT_EQ(4, io_handle_->getWriteBuffer()->length()); result = io_handle_->read(buf, 10); EXPECT_TRUE(result.ok()); - EXPECT_EQ(4, result.rc_); + EXPECT_EQ(4, result.return_value_); ASSERT_EQ(7, buf.length()); ASSERT_EQ(0, io_handle_->getWriteBuffer()->length()); } @@ -194,7 +194,7 @@ TEST_F(IoHandleImplTest, ReadThrottling) { // Read at most 8 * FRAGMENT_SIZE to unlimited buffer. auto result0 = io_handle_->read(unlimited_buf, absl::nullopt); EXPECT_TRUE(result0.ok()); - EXPECT_EQ(result0.rc_, 8 * FRAGMENT_SIZE); + EXPECT_EQ(result0.return_value_, 8 * FRAGMENT_SIZE); EXPECT_EQ(unlimited_buf.length(), 8 * FRAGMENT_SIZE); EXPECT_EQ(unlimited_buf.toString(), std::string(8 * FRAGMENT_SIZE, 'a')); } @@ -205,7 +205,7 @@ TEST_F(IoHandleImplTest, ReadThrottling) { // Verify that read() populates the buf to high watermark. auto result = io_handle_->read(buf, 8 * FRAGMENT_SIZE + 1); EXPECT_TRUE(result.ok()); - EXPECT_EQ(result.rc_, FRAGMENT_SIZE + 1); + EXPECT_EQ(result.return_value_, FRAGMENT_SIZE + 1); EXPECT_EQ(buf.length(), FRAGMENT_SIZE + 1); EXPECT_FALSE(buf.highWatermarkTriggered()); EXPECT_EQ(buf.toString(), std::string(FRAGMENT_SIZE + 1, 'a')); @@ -215,7 +215,7 @@ TEST_F(IoHandleImplTest, ReadThrottling) { // Verify that read returns FRAGMENT_SIZE if the buf is over high watermark. auto result1 = io_handle_->read(buf, 8 * FRAGMENT_SIZE + 1); EXPECT_TRUE(result1.ok()); - EXPECT_EQ(result1.rc_, FRAGMENT_SIZE); + EXPECT_EQ(result1.return_value_, FRAGMENT_SIZE); EXPECT_EQ(buf.length(), 2 * FRAGMENT_SIZE + 1); EXPECT_TRUE(buf.highWatermarkTriggered()); EXPECT_EQ(buf.toString(), std::string(2 * FRAGMENT_SIZE + 1, 'a')); @@ -228,7 +228,7 @@ TEST_F(IoHandleImplTest, ReadThrottling) { EXPECT_TRUE(buf.highWatermarkTriggered()); auto result2 = io_handle_->read(buf, 8 * FRAGMENT_SIZE + 1); EXPECT_TRUE(result2.ok()); - EXPECT_EQ(result2.rc_, FRAGMENT_SIZE); + EXPECT_EQ(result2.return_value_, FRAGMENT_SIZE); EXPECT_TRUE(buf.highWatermarkTriggered()); EXPECT_EQ(buf.toString(), std::string(buf.highWatermark() - 1 + FRAGMENT_SIZE, 'a')); } @@ -243,7 +243,7 @@ TEST_F(IoHandleImplTest, ReadThrottling) { EXPECT_FALSE(buf.highWatermarkTriggered()); auto result3 = io_handle_->read(buf, 8 * FRAGMENT_SIZE + 1); EXPECT_TRUE(result3.ok()); - EXPECT_EQ(result3.rc_, FRAGMENT_SIZE); + EXPECT_EQ(result3.return_value_, FRAGMENT_SIZE); EXPECT_TRUE(buf.highWatermarkTriggered()); EXPECT_EQ(buf.toString(), std::string(buf.highWatermark() - 1 + FRAGMENT_SIZE, 'a')); } @@ -260,7 +260,7 @@ TEST_F(IoHandleImplTest, BasicReadv) { auto result = io_handle_->readv(1024, &slice, 1); EXPECT_TRUE(result.ok()); - EXPECT_EQ(3, result.rc_); + EXPECT_EQ(3, result.return_value_); result = io_handle_->readv(1024, &slice, 1); @@ -271,7 +271,7 @@ TEST_F(IoHandleImplTest, BasicReadv) { result = io_handle_->readv(1024, &slice, 1); // EOF EXPECT_TRUE(result.ok()); - EXPECT_EQ(0, result.rc_); + EXPECT_EQ(0, result.return_value_); } // Test readv on slices. @@ -286,7 +286,7 @@ TEST_F(IoHandleImplTest, ReadvMultiSlices) { EXPECT_EQ(absl::string_view(full_frag, 1024), std::string(1024, 'a')); EXPECT_TRUE(result.ok()); - EXPECT_EQ(1024, result.rc_); + EXPECT_EQ(1024, result.return_value_); } TEST_F(IoHandleImplTest, FlowControl) { @@ -318,7 +318,7 @@ TEST_F(IoHandleImplTest, FlowControl) { } auto result = io_handle_->recv(buf_.data(), 32, 0); EXPECT_TRUE(result.ok()); - EXPECT_EQ(32, result.rc_); + EXPECT_EQ(32, result.return_value_); } ASSERT_EQ(0, internal_buffer.length()); ASSERT_TRUE(writable_flipped); @@ -334,13 +334,13 @@ TEST_F(IoHandleImplTest, NoErrorWriteZeroDataToClosedIoHandle) { { Buffer::OwnedImpl buf; auto result = io_handle_->write(buf); - ASSERT_EQ(0, result.rc_); + ASSERT_EQ(0, result.return_value_); ASSERT(result.ok()); } { Buffer::RawSlice slice{nullptr, 0}; auto result = io_handle_->writev(&slice, 1); - ASSERT_EQ(0, result.rc_); + ASSERT_EQ(0, result.return_value_); ASSERT(result.ok()); } } @@ -382,8 +382,8 @@ TEST_F(IoHandleImplTest, ErrorOnClosedIoHandle) { } TEST_F(IoHandleImplTest, RepeatedShutdownWR) { - EXPECT_EQ(io_handle_peer_->shutdown(ENVOY_SHUT_WR).rc_, 0); - EXPECT_EQ(io_handle_peer_->shutdown(ENVOY_SHUT_WR).rc_, 0); + EXPECT_EQ(io_handle_peer_->shutdown(ENVOY_SHUT_WR).return_value_, 0); + EXPECT_EQ(io_handle_peer_->shutdown(ENVOY_SHUT_WR).return_value_, 0); } TEST_F(IoHandleImplTest, ShutDownOptionsNotSupported) { @@ -395,7 +395,7 @@ TEST_F(IoHandleImplTest, WriteByMove) { Buffer::OwnedImpl buf("0123456789"); auto result = io_handle_peer_->write(buf); EXPECT_TRUE(result.ok()); - EXPECT_EQ(10, result.rc_); + EXPECT_EQ(10, result.return_value_); EXPECT_EQ("0123456789", io_handle_->getWriteBuffer()->toString()); EXPECT_EQ(0, buf.length()); } @@ -425,7 +425,7 @@ TEST_F(IoHandleImplTest, PartialWrite) { // is not triggered. auto result = io_handle_->write(pending_data); EXPECT_TRUE(result.ok()); - EXPECT_EQ(result.rc_, FRAGMENT_SIZE + 1); + EXPECT_EQ(result.return_value_, FRAGMENT_SIZE + 1); EXPECT_EQ(pending_data.length(), INITIAL_SIZE - (FRAGMENT_SIZE + 1)); EXPECT_TRUE(io_handle_peer_->isWritable()); EXPECT_EQ(io_handle_peer_->getWriteBuffer()->toString(), std::string(FRAGMENT_SIZE + 1, 'a')); @@ -434,7 +434,7 @@ TEST_F(IoHandleImplTest, PartialWrite) { // Write another fragment since when high watermark is reached. auto result1 = io_handle_->write(pending_data); EXPECT_TRUE(result1.ok()); - EXPECT_EQ(result1.rc_, FRAGMENT_SIZE); + EXPECT_EQ(result1.return_value_, FRAGMENT_SIZE); EXPECT_EQ(pending_data.length(), INITIAL_SIZE - (FRAGMENT_SIZE + 1) - FRAGMENT_SIZE); EXPECT_FALSE(io_handle_peer_->isWritable()); EXPECT_EQ(io_handle_peer_->getWriteBuffer()->toString(), @@ -444,14 +444,14 @@ TEST_F(IoHandleImplTest, PartialWrite) { // Confirm that the further write return `EAGAIN`. auto result2 = io_handle_->write(pending_data); ASSERT_EQ(result2.err_->getErrorCode(), Api::IoError::IoErrorCode::Again); - ASSERT_EQ(result2.rc_, 0); + ASSERT_EQ(result2.return_value_, 0); } { // Make the peer writable again. Buffer::OwnedImpl black_hole_buffer; auto result_drain = io_handle_peer_->read(black_hole_buffer, FRAGMENT_SIZE + FRAGMENT_SIZE / 2 + 2); - ASSERT_EQ(result_drain.rc_, FRAGMENT_SIZE + FRAGMENT_SIZE / 2 + 2); + ASSERT_EQ(result_drain.return_value_, FRAGMENT_SIZE + FRAGMENT_SIZE / 2 + 2); EXPECT_TRUE(io_handle_peer_->isWritable()); } { @@ -461,7 +461,7 @@ TEST_F(IoHandleImplTest, PartialWrite) { EXPECT_LT(io_handle_peer_->getWriteBuffer()->highWatermark() - len, FRAGMENT_SIZE); EXPECT_GT(pending_data.length(), FRAGMENT_SIZE); auto result3 = io_handle_->write(pending_data); - EXPECT_EQ(result3.rc_, FRAGMENT_SIZE); + EXPECT_EQ(result3.return_value_, FRAGMENT_SIZE); EXPECT_FALSE(io_handle_peer_->isWritable()); EXPECT_EQ(io_handle_peer_->getWriteBuffer()->toString(), std::string(len + FRAGMENT_SIZE, 'a')); } @@ -511,8 +511,8 @@ TEST_F(IoHandleImplTest, PartialWritev) { EXPECT_EQ(3, slices.size()); auto result = io_handle_->writev(slices.data(), slices.size()); EXPECT_TRUE(result.ok()); - EXPECT_EQ(result.rc_, 256); - pending_data.drain(result.rc_); + EXPECT_EQ(result.return_value_, 256); + pending_data.drain(result.return_value_); EXPECT_EQ(pending_data.length(), 3); EXPECT_FALSE(io_handle_peer_->isWritable()); @@ -527,8 +527,8 @@ TEST_F(IoHandleImplTest, PartialWritev) { EXPECT_TRUE(io_handle_peer_->isWritable()); auto slices3 = pending_data.getRawSlices(); auto result3 = io_handle_->writev(slices3.data(), slices3.size()); - EXPECT_EQ(result3.rc_, 3); - pending_data.drain(result3.rc_); + EXPECT_EQ(result3.return_value_, 3); + pending_data.drain(result3.return_value_); EXPECT_EQ(0, pending_data.length()); } @@ -639,7 +639,7 @@ TEST_F(IoHandleImplTest, ReadAndWriteAreEdgeTriggered) { // Drain 1 bytes. auto result = io_handle_->recv(buf_.data(), 1, 0); EXPECT_TRUE(result.ok()); - EXPECT_EQ(1, result.rc_); + EXPECT_EQ(1, result.return_value_); ASSERT_FALSE(schedulable_cb->enabled_); io_handle_->resetFileEvents(); @@ -733,11 +733,11 @@ TEST_F(IoHandleImplTest, Close) { auto result = io_handle_->recv(buf_.data(), buf_.size(), 0); if (result.ok()) { // Read EOF. - if (result.rc_ == 0) { + if (result.return_value_ == 0) { should_close = true; break; } else { - accumulator += std::string(buf_.data(), result.rc_); + accumulator += std::string(buf_.data(), result.return_value_); } } else if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { ENVOY_LOG_MISC(debug, "read returns EAGAIN"); @@ -792,7 +792,7 @@ TEST_F(IoHandleImplTest, ShutDownRaiseEvent) { if (events & Event::FileReadyType::Read) { auto result = io_handle_->recv(buf_.data(), buf_.size(), 0); if (result.ok()) { - accumulator += std::string(buf_.data(), result.rc_); + accumulator += std::string(buf_.data(), result.return_value_); } else if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { ENVOY_LOG_MISC(debug, "read returns EAGAIN"); } else { @@ -832,7 +832,7 @@ TEST_F(IoHandleImplTest, WriteScheduleWritableEvent) { auto slice = reservation.slice(); auto result = handle->readv(1024, &slice, 1); if (result.ok()) { - accumulator += std::string(static_cast(slice.mem_), result.rc_); + accumulator += std::string(static_cast(slice.mem_), result.return_value_); } else if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { ENVOY_LOG_MISC(debug, "read returns EAGAIN"); } else { @@ -872,7 +872,7 @@ TEST_F(IoHandleImplTest, WritevScheduleWritableEvent) { auto slice = reservation.slice(); auto result = handle->readv(1024, &slice, 1); if (result.ok()) { - accumulator += std::string(static_cast(slice.mem_), result.rc_); + accumulator += std::string(static_cast(slice.mem_), result.return_value_); } else if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { ENVOY_LOG_MISC(debug, "read returns EAGAIN"); } else { @@ -913,10 +913,10 @@ TEST_F(IoHandleImplTest, ReadAfterShutdownWrite) { auto slice = reservation.slice(); auto result = handle->readv(1024, &slice, 1); if (result.ok()) { - if (result.rc_ == 0) { + if (result.return_value_ == 0) { should_close = true; } else { - accumulator += std::string(static_cast(slice.mem_), result.rc_); + accumulator += std::string(static_cast(slice.mem_), result.return_value_); } } else if (result.err_->getErrorCode() == Api::IoError::IoErrorCode::Again) { ENVOY_LOG_MISC(debug, "read returns EAGAIN"); @@ -966,13 +966,13 @@ TEST_F(IoHandleImplTest, NotifyWritableAfterShutdownWrite) { EXPECT_CALL(*schedulable_cb, scheduleCallbackNextIteration()).Times(0); auto result = io_handle_peer_->recv(buf_.data(), buf_.size(), 0); - EXPECT_EQ(256, result.rc_); + EXPECT_EQ(256, result.return_value_); // Readable event is not activated due to edge trigger type. EXPECT_FALSE(schedulable_cb->enabled_); // The `end of stream` is delivered. auto result_at_eof = io_handle_peer_->recv(buf_.data(), buf_.size(), 0); - EXPECT_EQ(0, result_at_eof.rc_); + EXPECT_EQ(0, result_at_eof.return_value_); // Also confirm `EOS` can triggered read ready event. EXPECT_CALL(*schedulable_cb, enabled()); @@ -1006,7 +1006,7 @@ TEST_F(IoHandleImplTest, DomainNullOpt) { EXPECT_FALSE(io_handle_->domain().has_ TEST_F(IoHandleImplTest, Connect) { auto address_is_ignored = std::make_shared("listener_id"); - EXPECT_EQ(0, io_handle_->connect(address_is_ignored).rc_); + EXPECT_EQ(0, io_handle_->connect(address_is_ignored).return_value_); } TEST_F(IoHandleImplTest, ActivateEvent) { diff --git a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc index 919f37a97910c..ed2f816fee84c 100644 --- a/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc +++ b/test/extensions/stats_sinks/common/statsd/udp_statsd_test.cc @@ -64,7 +64,7 @@ TEST(UdpOverUdsStatsdSinkTest, InitWithPipeAddress) { // Start the server. Network::SocketImpl sock(Network::Socket::Type::Datagram, uds_address, nullptr); - RELEASE_ASSERT(sock.setBlockingForTest(false).rc_ != -1, ""); + RELEASE_ASSERT(sock.setBlockingForTest(false).return_value_ != -1, ""); sock.bind(uds_address); // Do the flush which should have somewhere to write now. diff --git a/test/integration/fake_upstream.cc b/test/integration/fake_upstream.cc index 52ee01dd3dc89..d0dd2ee4a41a2 100644 --- a/test/integration/fake_upstream.cc +++ b/test/integration/fake_upstream.cc @@ -749,7 +749,7 @@ void FakeUpstream::sendUdpDatagram(const std::string& buffer, dispatcher_->post([this, buffer, peer] { const auto rc = Network::Utility::writeToSocket(socket_->ioHandle(), Buffer::OwnedImpl(buffer), nullptr, *peer); - EXPECT_TRUE(rc.rc_ == buffer.length()); + EXPECT_TRUE(rc.return_value_ == buffer.length()); }); } @@ -770,7 +770,7 @@ testing::AssertionResult FakeUpstream::rawWriteConnection(uint32_t index, const void FakeUpstream::FakeListenSocketFactory::doFinalPreWorkerInit() { if (socket_->socketType() == Network::Socket::Type::Stream) { - ASSERT_EQ(0, socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE).rc_); + ASSERT_EQ(0, socket_->ioHandle().listen(ENVOY_TCP_BACKLOG_SIZE).return_value_); } else { ASSERT(socket_->socketType() == Network::Socket::Type::Datagram); ASSERT_TRUE(Network::Socket::applyOptions(socket_->options(), *socket_, diff --git a/test/integration/filters/test_socket_interface.cc b/test/integration/filters/test_socket_interface.cc index cf0c1aa8a0de5..e32d5ace315ec 100644 --- a/test/integration/filters/test_socket_interface.cc +++ b/test/integration/filters/test_socket_interface.cc @@ -26,22 +26,22 @@ Api::IoCallUint64Result TestIoSocketHandle::writev(const Buffer::RawSlice* slice IoHandlePtr TestIoSocketHandle::accept(struct sockaddr* addr, socklen_t* addrlen) { auto result = Api::OsSysCallsSingleton::get().accept(fd_, addr, addrlen); - if (SOCKET_INVALID(result.rc_)) { + if (SOCKET_INVALID(result.return_value_)) { return nullptr; } - return std::make_unique(writev_override_, result.rc_, socket_v6only_, - domain_); + return std::make_unique(writev_override_, result.return_value_, + socket_v6only_, domain_); } IoHandlePtr TestIoSocketHandle::duplicate() { auto result = Api::OsSysCallsSingleton::get().duplicate(fd_); - if (result.rc_ == -1) { + if (result.return_value_ == -1) { throw EnvoyException(fmt::format("duplicate failed for '{}': ({}) {}", fd_, result.errno_, errorDetails(result.errno_))); } - return std::make_unique(writev_override_, result.rc_, socket_v6only_, - domain_); + return std::make_unique(writev_override_, result.return_value_, + socket_v6only_, domain_); } IoHandlePtr TestSocketInterface::makeSocket(int socket_fd, bool socket_v6only, diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index 33973434429c6..b9dfd3b6629cf 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -405,7 +405,8 @@ TEST_P(OverloadScaledTimerIntegrationTest, TlsHandshakeTimeout) { transport_callbacks->connection().dispatcher().exit(); // Read some amount of data; what's more important is whether the socket was remote-closed. That // needs to be propagated to the socket. - return Network::IoResult{transport_callbacks->ioHandle().read(buffer, 2 * 1024).rc_ == 0 + return Network::IoResult{transport_callbacks->ioHandle().read(buffer, 2 * 1024).return_value_ == + 0 ? Network::PostIoAction::Close : Network::PostIoAction::KeepOpen, 0, false}; diff --git a/test/integration/uds_integration_test.cc b/test/integration/uds_integration_test.cc index 11cfcd10efacb..3fb1ecfc70e11 100644 --- a/test/integration/uds_integration_test.cc +++ b/test/integration/uds_integration_test.cc @@ -105,7 +105,7 @@ TEST_P(UdsListenerIntegrationTest, TestSocketMode) { Api::OsSysCalls& os_sys_calls = Api::OsSysCallsSingleton::get(); struct stat listener_stat; - EXPECT_EQ(os_sys_calls.stat(getListenerSocketName().c_str(), &listener_stat).rc_, 0); + EXPECT_EQ(os_sys_calls.stat(getListenerSocketName().c_str(), &listener_stat).return_value_, 0); if (mode_ == 0) { EXPECT_NE(listener_stat.st_mode & 0777, 0); } else { diff --git a/test/mocks/filesystem/mocks.cc b/test/mocks/filesystem/mocks.cc index 9ce4201006d6e..bbc1e17413b76 100644 --- a/test/mocks/filesystem/mocks.cc +++ b/test/mocks/filesystem/mocks.cc @@ -13,7 +13,7 @@ Api::IoCallBoolResult MockFile::open(FlagSet flag) { Thread::LockGuard lock(open_mutex_); Api::IoCallBoolResult result = open_(flag); - is_open_ = result.rc_; + is_open_ = result.return_value_; num_opens_++; open_event_.notifyOne(); @@ -35,7 +35,7 @@ Api::IoCallSizeResult MockFile::write(absl::string_view buffer) { Api::IoCallBoolResult MockFile::close() { Api::IoCallBoolResult result = close_(); - is_open_ = !result.rc_; + is_open_ = !result.return_value_; return result; } diff --git a/test/server/listener_manager_impl_test.cc b/test/server/listener_manager_impl_test.cc index 20d61b146dee7..8a8139fa33275 100644 --- a/test/server/listener_manager_impl_test.cc +++ b/test/server/listener_manager_impl_test.cc @@ -1760,7 +1760,7 @@ name: foo )EOF"; auto syscall_result = os_sys_calls_actual_.socket(AF_INET, SOCK_STREAM, 0); - ASSERT_TRUE(SOCKET_VALID(syscall_result.rc_)); + ASSERT_TRUE(SOCKET_VALID(syscall_result.return_value_)); ListenerHandle* listener_foo = expectListenerCreate(true, true); EXPECT_CALL(listener_factory_, @@ -1799,7 +1799,7 @@ name: foo )EOF"; auto syscall_result = os_sys_calls_actual_.socket(AF_INET, SOCK_STREAM, 0); - ASSERT_TRUE(SOCKET_VALID(syscall_result.rc_)); + ASSERT_TRUE(SOCKET_VALID(syscall_result.return_value_)); // On Windows if the socket has not been bound to an address with bind // the call to getsockname fails with `WSAEINVAL`. To avoid that we make sure @@ -1808,7 +1808,7 @@ name: foo .WillByDefault(Invoke( [&](os_fd_t sockfd, const sockaddr* addr, socklen_t addrlen) -> Api::SysCallIntResult { Api::SysCallIntResult result = os_sys_calls_actual_.bind(sockfd, addr, addrlen); - ASSERT(result.rc_ >= 0); + ASSERT(result.return_value_ >= 0); return result; })); ListenerHandle* listener_foo = expectListenerCreate(true, true); diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 5081f1a05315c..5d83592881f3b 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -1271,7 +1271,7 @@ void bindAndListenTcpSocket(const Network::Address::InstanceConstSharedPtr& addr auto socket = std::make_unique(address, options, true); // Some kernels erroneously allow `bind` without SO_REUSEPORT for addresses // with some other socket already listening on it, see #7636. - if (SOCKET_FAILURE(socket->ioHandle().listen(1).rc_)) { + if (SOCKET_FAILURE(socket->ioHandle().listen(1).return_value_)) { // Mimic bind exception for the test simplicity. throw Network::SocketBindException(fmt::format("cannot listen: {}", errorDetails(errno)), errno); diff --git a/test/test_common/environment.cc b/test/test_common/environment.cc index 3dbaa9367a18d..be0dc2e76959a 100644 --- a/test/test_common/environment.cc +++ b/test/test_common/environment.cc @@ -412,9 +412,9 @@ std::string TestEnvironment::writeStringToFileForTest(const std::string& filenam const Filesystem::FlagSet flags{1 << Filesystem::File::Operation::Write | 1 << Filesystem::File::Operation::Create}; const Api::IoCallBoolResult open_result = file->open(flags); - EXPECT_TRUE(open_result.rc_); + EXPECT_TRUE(open_result.return_value_); const Api::IoCallSizeResult result = file->write(contents); - EXPECT_EQ(contents.length(), result.rc_); + EXPECT_EQ(contents.length(), result.return_value_); return out_path; } diff --git a/test/test_common/network_utility.cc b/test/test_common/network_utility.cc index 976f2e2205e65..6cf11038fbd77 100644 --- a/test/test_common/network_utility.cc +++ b/test/test_common/network_utility.cc @@ -34,12 +34,12 @@ Address::InstanceConstSharedPtr findOrCheckFreePort(Address::InstanceConstShared // to set REUSEADDR on listener sockets created by tests using an address validated by this means. Api::SysCallIntResult result = sock.bind(addr_port); const char* failing_fn = nullptr; - if (result.rc_ != 0) { + if (result.return_value_ != 0) { failing_fn = "bind"; } else if (type == Socket::Type::Stream) { // Try listening on the port also, if the type is TCP. result = sock.listen(1); - if (result.rc_ != 0) { + if (result.return_value_ != 0) { failing_fn = "listen"; } } @@ -171,7 +171,7 @@ bindFreeLoopbackPort(Address::IpVersion version, Socket::Type type, bool reuse_p envoy::config::core::v3::SocketOption::STATE_PREBIND); } Api::SysCallIntResult result = sock->bind(addr); - if (0 != result.rc_) { + if (0 != result.return_value_) { sock->close(); std::string msg = fmt::format("bind failed for address {} with error: {} ({})", addr->asString(), errorDetails(result.errno_), result.errno_); @@ -232,13 +232,13 @@ UdpSyncPeer::UdpSyncPeer(Network::Address::IpVersion version, uint64_t max_rx_da : socket_( std::make_unique(getCanonicalLoopbackAddress(version), nullptr, true)), max_rx_datagram_size_(max_rx_datagram_size) { - RELEASE_ASSERT(socket_->setBlockingForTest(true).rc_ != -1, ""); + RELEASE_ASSERT(socket_->setBlockingForTest(true).return_value_ != -1, ""); } void UdpSyncPeer::write(const std::string& buffer, const Network::Address::Instance& peer) { const auto rc = Network::Utility::writeToSocket(socket_->ioHandle(), Buffer::OwnedImpl(buffer), nullptr, peer); - ASSERT_EQ(rc.rc_, buffer.length()); + ASSERT_EQ(rc.return_value_, buffer.length()); } void UdpSyncPeer::recv(Network::UdpRecvData& datagram) { diff --git a/test/test_common/network_utility.h b/test/test_common/network_utility.h index 2eb6f82b2b180..ab60a012672cc 100644 --- a/test/test_common/network_utility.h +++ b/test/test_common/network_utility.h @@ -220,7 +220,7 @@ class TcpListenSocketImmediateListen : public Network::TcpListenSocket { TcpListenSocketImmediateListen(const Address::InstanceConstSharedPtr& address, const Network::Socket::OptionsSharedPtr& options = nullptr) : TcpListenSocket(address, options, true) { - EXPECT_EQ(0, io_handle_->listen(ENVOY_TCP_BACKLOG_SIZE).rc_); + EXPECT_EQ(0, io_handle_->listen(ENVOY_TCP_BACKLOG_SIZE).return_value_); } }; From 3bd1db47ec39f8ce4aa21cee7cb04c856a3077bf Mon Sep 17 00:00:00 2001 From: Greg Greenway Date: Mon, 26 Jul 2021 09:51:46 -0700 Subject: [PATCH 47/57] Cleanup idle connection pools (disabled by default) (#17403) Delete connection pools when they have no connections anymore. This fixes unbounded memory use for cases where a new connection pool is needed for each downstream connection, such as when using upstream PROXY protocol. This reverts commit b7bc53945b4aefe4bff4d8fa498b3bf7933acd9e. This reverts PR #17319, by re-adding #17302 and #16948. Signed-off-by: Greg Greenway Co-authored-by: Craig Radcliffe --- .../other_features/ip_transparency.rst | 4 - docs/root/version_history/current.rst | 1 + envoy/common/conn_pool.h | 24 +- envoy/event/deferred_deletable.h | 6 + envoy/upstream/thread_local_cluster.h | 4 +- source/common/config/grpc_mux_impl.cc | 38 ++ source/common/config/grpc_mux_impl.h | 15 + source/common/config/new_grpc_mux_impl.cc | 41 +- source/common/config/new_grpc_mux_impl.h | 15 + source/common/conn_pool/conn_pool_base.cc | 50 ++- source/common/conn_pool/conn_pool_base.h | 23 +- source/common/event/dispatcher_impl.cc | 11 +- source/common/http/conn_pool_base.cc | 2 +- source/common/http/conn_pool_base.h | 7 +- source/common/http/conn_pool_grid.cc | 89 +++-- source/common/http/conn_pool_grid.h | 31 +- source/common/http/http1/conn_pool.cc | 2 +- source/common/runtime/runtime_features.cc | 3 + source/common/tcp/conn_pool.cc | 4 +- source/common/tcp/conn_pool.h | 7 +- source/common/tcp/original_conn_pool.cc | 41 +- source/common/tcp/original_conn_pool.h | 14 +- source/common/tcp_proxy/tcp_proxy.cc | 24 +- source/common/tcp_proxy/tcp_proxy.h | 1 + .../common/upstream/cluster_manager_impl.cc | 190 ++++++---- source/common/upstream/cluster_manager_impl.h | 16 +- source/common/upstream/conn_pool_map.h | 24 +- source/common/upstream/conn_pool_map_impl.h | 49 ++- .../common/upstream/priority_conn_pool_map.h | 20 +- .../upstream/priority_conn_pool_map_impl.h | 30 +- source/server/BUILD | 2 + source/server/server.cc | 6 + test/common/conn_pool/conn_pool_base_test.cc | 61 ++- test/common/http/conn_pool_grid_test.cc | 73 ++-- test/common/http/http1/conn_pool_test.cc | 15 +- test/common/http/http2/conn_pool_test.cc | 15 +- test/common/tcp/conn_pool_test.cc | 42 ++- .../upstream/cluster_manager_impl_test.cc | 355 ++++++++++++------ .../upstream/conn_pool_map_impl_test.cc | 65 ++-- .../priority_conn_pool_map_impl_test.cc | 35 +- test/integration/BUILD | 10 + .../http_conn_pool_integration_test.cc | 91 +++++ .../tcp_conn_pool_integration_test.cc | 134 ++++++- test/mocks/http/conn_pool.cc | 4 + test/mocks/http/conn_pool.h | 5 +- test/mocks/tcp/mocks.cc | 2 + test/mocks/tcp/mocks.h | 5 +- test/test_common/logging.h | 51 ++- 48 files changed, 1338 insertions(+), 419 deletions(-) create mode 100644 test/integration/http_conn_pool_integration_test.cc diff --git a/docs/root/intro/arch_overview/other_features/ip_transparency.rst b/docs/root/intro/arch_overview/other_features/ip_transparency.rst index 76ed11b5f5928..0e9fe81fc82d2 100644 --- a/docs/root/intro/arch_overview/other_features/ip_transparency.rst +++ b/docs/root/intro/arch_overview/other_features/ip_transparency.rst @@ -56,10 +56,6 @@ conjunction with the :ref:`Original Src Listener Filter `. Finally, Envoy supports generating this header using the :ref:`Proxy Protocol Transport Socket `. -IMPORTANT: There is currently a memory `issue `_ in Envoy where upstream connection pools are -not cleaned up after they are created. This heavily affects the usage of this transport socket as new pools are created for every downstream client -IP and port pair. Removing a cluster will clean up its associated connection pools, which could be used to mitigate this issue in the current state. - Here is an example config for setting up the socket: .. code-block:: yaml diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 9877d650b1233..582dcfdcd0320 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -39,6 +39,7 @@ Bug Fixes *Changes expected to improve the state of the world and are unlikely to have negative effects* * access log: fix `%UPSTREAM_CLUSTER%` when used in http upstream access logs. Previously, it was always logging as an unset value. +* cluster: delete pools when they're idle to fix unbounded memory use when using PROXY protocol upstream with tcp_proxy. This behavior can be temporarily reverted by setting the ``envoy.reloadable_features.conn_pool_delete_when_idle`` runtime guard to false. * xray: fix the AWS X-Ray tracer bug where span's error, fault and throttle information was not reported properly as per the `AWS X-Ray documentation `_. Before this fix, server error was reported under 'annotations' section of the segment data. Removed Config or Runtime diff --git a/envoy/common/conn_pool.h b/envoy/common/conn_pool.h index e05664288b673..17cf77eca6824 100644 --- a/envoy/common/conn_pool.h +++ b/envoy/common/conn_pool.h @@ -44,17 +44,27 @@ class Instance { virtual ~Instance() = default; /** - * Called when a connection pool has been drained of pending streams, busy connections, and - * ready connections. + * Called when a connection pool has no pending streams, busy connections, or ready connections. */ - using DrainedCb = std::function; + using IdleCb = std::function; /** - * Register a callback that gets called when the connection pool is fully drained and kicks - * off a drain. The owner of the connection pool is responsible for not creating any - * new streams. + * Register a callback that gets called when the connection pool is fully idle. */ - virtual void addDrainedCallback(DrainedCb cb) PURE; + virtual void addIdleCallback(IdleCb cb) PURE; + + /** + * Returns true if the pool does not have any connections or pending requests. + */ + virtual bool isIdle() const PURE; + + /** + * Starts draining a pool, by gracefully completing all requests and gracefully closing all + * connections, in preparation for deletion. When the process completes, the function registered + * via `addIdleCallback()` is called. The callback may occur before this call returns if the pool + * can be immediately drained. + */ + virtual void startDrain() PURE; /** * Actively drain all existing connection pool connections. This method can be used in cases diff --git a/envoy/event/deferred_deletable.h b/envoy/event/deferred_deletable.h index c0e3dfee2835a..32b9398e322cd 100644 --- a/envoy/event/deferred_deletable.h +++ b/envoy/event/deferred_deletable.h @@ -13,6 +13,12 @@ namespace Event { class DeferredDeletable { public: virtual ~DeferredDeletable() = default; + + /** + * Called when an object is passed to `deferredDelete`. This signals that the object will soon + * be deleted. + */ + virtual void deleteIsPending() {} }; using DeferredDeletablePtr = std::unique_ptr; diff --git a/envoy/upstream/thread_local_cluster.h b/envoy/upstream/thread_local_cluster.h index 2efe626228475..16bba2855b0f3 100644 --- a/envoy/upstream/thread_local_cluster.h +++ b/envoy/upstream/thread_local_cluster.h @@ -31,9 +31,7 @@ class HttpPoolData { /** * See documentation of Envoy::ConnectionPool::Instance. */ - void addDrainedCallback(ConnectionPool::Instance::DrainedCb cb) { - pool_->addDrainedCallback(cb); - }; + void addIdleCallback(ConnectionPool::Instance::IdleCb cb) { pool_->addIdleCallback(cb); }; Upstream::HostDescriptionConstSharedPtr host() const { return pool_->host(); } diff --git a/source/common/config/grpc_mux_impl.cc b/source/common/config/grpc_mux_impl.cc index 58807df00d2d3..060f0c845796b 100644 --- a/source/common/config/grpc_mux_impl.cc +++ b/source/common/config/grpc_mux_impl.cc @@ -14,6 +14,35 @@ namespace Envoy { namespace Config { +namespace { +class AllMuxesState { +public: + void insert(GrpcMuxImpl* mux) { + absl::WriterMutexLock locker(&lock_); + muxes_.insert(mux); + } + + void erase(GrpcMuxImpl* mux) { + absl::WriterMutexLock locker(&lock_); + muxes_.erase(mux); + } + + void shutdownAll() { + absl::WriterMutexLock locker(&lock_); + for (auto& mux : muxes_) { + mux->shutdown(); + } + } + +private: + absl::flat_hash_set muxes_ ABSL_GUARDED_BY(lock_); + + // TODO(ggreenway): can this lock be removed? Is this code only run on the main thread? + absl::Mutex lock_; +}; +using AllMuxes = ThreadSafeSingleton; +} // namespace + GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, Grpc::RawAsyncClientPtr async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, @@ -30,8 +59,13 @@ GrpcMuxImpl::GrpcMuxImpl(const LocalInfo::LocalInfo& local_info, onDynamicContextUpdate(resource_type_url); })) { Config::Utility::checkLocalInfo("ads", local_info); + AllMuxes::get().insert(this); } +GrpcMuxImpl::~GrpcMuxImpl() { AllMuxes::get().erase(this); } + +void GrpcMuxImpl::shutdownAll() { AllMuxes::get().shutdownAll(); } + void GrpcMuxImpl::onDynamicContextUpdate(absl::string_view resource_type_url) { auto api_state = api_state_.find(resource_type_url); if (api_state == api_state_.end()) { @@ -44,6 +78,10 @@ void GrpcMuxImpl::onDynamicContextUpdate(absl::string_view resource_type_url) { void GrpcMuxImpl::start() { grpc_stream_.establishNewStream(); } void GrpcMuxImpl::sendDiscoveryRequest(const std::string& type_url) { + if (shutdown_) { + return; + } + ApiState& api_state = apiStateFor(type_url); auto& request = api_state.request_; request.mutable_resource_names()->Clear(); diff --git a/source/common/config/grpc_mux_impl.h b/source/common/config/grpc_mux_impl.h index 585d028fe2b65..57b4946099373 100644 --- a/source/common/config/grpc_mux_impl.h +++ b/source/common/config/grpc_mux_impl.h @@ -39,6 +39,17 @@ class GrpcMuxImpl : public GrpcMux, Random::RandomGenerator& random, Stats::Scope& scope, const RateLimitSettings& rate_limit_settings, bool skip_subsequent_node); + ~GrpcMuxImpl() override; + + // Causes all GrpcMuxImpl objects to stop sending any messages on `grpc_stream_` to fix a crash + // on Envoy shutdown due to dangling pointers. This may not be the ideal fix; it is probably + // preferable for the `ServerImpl` to cause all configuration subscriptions to be shutdown, which + // would then cause all `GrpcMuxImpl` to be destructed. + // TODO: figure out the correct fix: https://github.com/envoyproxy/envoy/issues/15072. + static void shutdownAll(); + + void shutdown() { shutdown_ = true; } + void start() override; // GrpcMux @@ -179,6 +190,10 @@ class GrpcMuxImpl : public GrpcMux, Event::Dispatcher& dispatcher_; Common::CallbackHandlePtr dynamic_update_callback_handle_; + + // True iff Envoy is shutting down; no messages should be sent on the `grpc_stream_` when this is + // true because it may contain dangling pointers. + std::atomic shutdown_{false}; }; using GrpcMuxImplPtr = std::unique_ptr; diff --git a/source/common/config/new_grpc_mux_impl.cc b/source/common/config/new_grpc_mux_impl.cc index 89a829167e273..d0e3db537d0b9 100644 --- a/source/common/config/new_grpc_mux_impl.cc +++ b/source/common/config/new_grpc_mux_impl.cc @@ -16,6 +16,35 @@ namespace Envoy { namespace Config { +namespace { +class AllMuxesState { +public: + void insert(NewGrpcMuxImpl* mux) { + absl::WriterMutexLock locker(&lock_); + muxes_.insert(mux); + } + + void erase(NewGrpcMuxImpl* mux) { + absl::WriterMutexLock locker(&lock_); + muxes_.erase(mux); + } + + void shutdownAll() { + absl::WriterMutexLock locker(&lock_); + for (auto& mux : muxes_) { + mux->shutdown(); + } + } + +private: + absl::flat_hash_set muxes_ ABSL_GUARDED_BY(lock_); + + // TODO(ggreenway): can this lock be removed? Is this code only run on the main thread? + absl::Mutex lock_; +}; +using AllMuxes = ThreadSafeSingleton; +} // namespace + NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, Event::Dispatcher& dispatcher, const Protobuf::MethodDescriptor& service_method, @@ -30,7 +59,13 @@ NewGrpcMuxImpl::NewGrpcMuxImpl(Grpc::RawAsyncClientPtr&& async_client, [this](absl::string_view resource_type_url) { onDynamicContextUpdate(resource_type_url); })), - transport_api_version_(transport_api_version), dispatcher_(dispatcher) {} + transport_api_version_(transport_api_version), dispatcher_(dispatcher) { + AllMuxes::get().insert(this); +} + +NewGrpcMuxImpl::~NewGrpcMuxImpl() { AllMuxes::get().erase(this); } + +void NewGrpcMuxImpl::shutdownAll() { AllMuxes::get().shutdownAll(); } void NewGrpcMuxImpl::onDynamicContextUpdate(absl::string_view resource_type_url) { auto sub = subscriptions_.find(resource_type_url); @@ -216,6 +251,10 @@ void NewGrpcMuxImpl::addSubscription(const std::string& type_url, const bool use } void NewGrpcMuxImpl::trySendDiscoveryRequests() { + if (shutdown_) { + return; + } + while (true) { // Do any of our subscriptions even want to send a request? absl::optional maybe_request_type = whoWantsToSendDiscoveryRequest(); diff --git a/source/common/config/new_grpc_mux_impl.h b/source/common/config/new_grpc_mux_impl.h index 4c2246fed813b..98ded0dec357b 100644 --- a/source/common/config/new_grpc_mux_impl.h +++ b/source/common/config/new_grpc_mux_impl.h @@ -38,6 +38,17 @@ class NewGrpcMuxImpl const RateLimitSettings& rate_limit_settings, const LocalInfo::LocalInfo& local_info); + ~NewGrpcMuxImpl() override; + + // Causes all NewGrpcMuxImpl objects to stop sending any messages on `grpc_stream_` to fix a crash + // on Envoy shutdown due to dangling pointers. This may not be the ideal fix; it is probably + // preferable for the `ServerImpl` to cause all configuration subscriptions to be shutdown, which + // would then cause all `NewGrpcMuxImpl` to be destructed. + // TODO: figure out the correct fix: https://github.com/envoyproxy/envoy/issues/15072. + static void shutdownAll(); + + void shutdown() { shutdown_ = true; } + GrpcMuxWatchPtr addWatch(const std::string& type_url, const absl::flat_hash_set& resources, SubscriptionCallbacks& callbacks, @@ -170,6 +181,10 @@ class NewGrpcMuxImpl Common::CallbackHandlePtr dynamic_update_callback_handle_; const envoy::config::core::v3::ApiVersion transport_api_version_; Event::Dispatcher& dispatcher_; + + // True iff Envoy is shutting down; no messages should be sent on the `grpc_stream_` when this is + // true because it may contain dangling pointers. + std::atomic shutdown_{false}; }; using NewGrpcMuxImplPtr = std::unique_ptr; diff --git a/source/common/conn_pool/conn_pool_base.cc b/source/common/conn_pool/conn_pool_base.cc index f83c9164126ee..76950f3ccde71 100644 --- a/source/common/conn_pool/conn_pool_base.cc +++ b/source/common/conn_pool/conn_pool_base.cc @@ -28,9 +28,13 @@ ConnPoolImplBase::ConnPoolImplBase( upstream_ready_cb_(dispatcher_.createSchedulableCallback([this]() { onUpstreamReady(); })) {} ConnPoolImplBase::~ConnPoolImplBase() { - ASSERT(ready_clients_.empty()); - ASSERT(busy_clients_.empty()); - ASSERT(connecting_clients_.empty()); + ASSERT(isIdleImpl()); + ASSERT(connecting_stream_capacity_ == 0); +} + +void ConnPoolImplBase::deleteIsPendingImpl() { + deferred_deleting_ = true; + ASSERT(isIdleImpl()); ASSERT(connecting_stream_capacity_ == 0); } @@ -225,6 +229,8 @@ void ConnPoolImplBase::onStreamClosed(Envoy::ConnectionPool::ActiveClient& clien } ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) { + ASSERT(!deferred_deleting_); + ASSERT(static_cast(connecting_stream_capacity_) == connectingCapacity(connecting_clients_)); // O(n) debug check. if (!ready_clients_.empty()) { @@ -272,6 +278,7 @@ ConnectionPool::Cancellable* ConnPoolImplBase::newStream(AttachContext& context) } bool ConnPoolImplBase::maybePreconnect(float global_preconnect_ratio) { + ASSERT(!deferred_deleting_); return tryCreateNewConnection(global_preconnect_ratio) == ConnectionResult::CreatedNewConnection; } @@ -322,9 +329,11 @@ void ConnPoolImplBase::transitionActiveClientState(ActiveClient& client, } } -void ConnPoolImplBase::addDrainedCallbackImpl(Instance::DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); +void ConnPoolImplBase::addIdleCallbackImpl(Instance::IdleCb cb) { idle_callbacks_.push_back(cb); } + +void ConnPoolImplBase::startDrainImpl() { + is_draining_ = true; + checkForIdleAndCloseIdleConnsIfDraining(); } void ConnPoolImplBase::closeIdleConnectionsForDrainingPool() { @@ -366,17 +375,19 @@ void ConnPoolImplBase::drainConnectionsImpl() { } } -void ConnPoolImplBase::checkForDrained() { - if (drained_callbacks_.empty()) { - return; - } +bool ConnPoolImplBase::isIdleImpl() const { + return pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() && + connecting_clients_.empty(); +} - closeIdleConnectionsForDrainingPool(); +void ConnPoolImplBase::checkForIdleAndCloseIdleConnsIfDraining() { + if (is_draining_) { + closeIdleConnectionsForDrainingPool(); + } - if (pending_streams_.empty() && ready_clients_.empty() && busy_clients_.empty() && - connecting_clients_.empty()) { - ENVOY_LOG(debug, "invoking drained callbacks"); - for (const Instance::DrainedCb& cb : drained_callbacks_) { + if (isIdleImpl()) { + ENVOY_LOG(debug, "invoking idle callbacks - is_draining_={}", is_draining_); + for (const Instance::IdleCb& cb : idle_callbacks_) { cb(); } } @@ -439,9 +450,8 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view client.releaseResources(); dispatcher_.deferredDelete(client.removeFromList(owningList(client.state()))); - if (incomplete_stream) { - checkForDrained(); - } + + checkForIdleAndCloseIdleConnsIfDraining(); client.setState(ActiveClient::State::CLOSED); @@ -459,7 +469,7 @@ void ConnPoolImplBase::onConnectionEvent(ActiveClient& client, absl::string_view // refer to client after this point. onConnected(client); onUpstreamReady(); - checkForDrained(); + checkForIdleAndCloseIdleConnsIfDraining(); } } @@ -529,7 +539,7 @@ void ConnPoolImplBase::onPendingStreamCancel(PendingStream& stream, } host_->cluster().stats().upstream_rq_cancelled_.inc(); - checkForDrained(); + checkForIdleAndCloseIdleConnsIfDraining(); } namespace { diff --git a/source/common/conn_pool/conn_pool_base.h b/source/common/conn_pool/conn_pool_base.h index f4822b7e77f64..8e06e8e68ed2b 100644 --- a/source/common/conn_pool/conn_pool_base.h +++ b/source/common/conn_pool/conn_pool_base.h @@ -144,6 +144,8 @@ class ConnPoolImplBase : protected Logger::Loggable { Upstream::ClusterConnectivityState& state); virtual ~ConnPoolImplBase(); + void deleteIsPendingImpl(); + // A helper function to get the specific context type from the base class context. template T& typedContext(AttachContext& context) { ASSERT(dynamic_cast(&context) != nullptr); @@ -160,7 +162,8 @@ class ConnPoolImplBase : protected Logger::Loggable { int64_t connecting_and_connected_capacity, float preconnect_ratio, bool anticipate_incoming_stream = false); - void addDrainedCallbackImpl(Instance::DrainedCb cb); + void addIdleCallbackImpl(Instance::IdleCb cb); + void startDrainImpl(); void drainConnectionsImpl(); // Closes and destroys all connections. This must be called in the destructor of @@ -192,8 +195,13 @@ class ConnPoolImplBase : protected Logger::Loggable { void onConnectionEvent(ActiveClient& client, absl::string_view failure_reason, Network::ConnectionEvent event); - // See if the drain process has started and/or completed. - void checkForDrained(); + + // Returns true if the pool is idle. + bool isIdleImpl() const; + + // See if the pool has gone idle. If we're draining, this will also close idle connections. + void checkForIdleAndCloseIdleConnsIfDraining(); + void scheduleOnUpstreamReady(); ConnectionPool::Cancellable* newStream(AttachContext& context); // Called if this pool is likely to be picked soon, to determine if it's worth preconnecting. @@ -299,7 +307,7 @@ class ConnPoolImplBase : protected Logger::Loggable { const Network::ConnectionSocket::OptionsSharedPtr socket_options_; const Network::TransportSocketOptionsConstSharedPtr transport_socket_options_; - std::list drained_callbacks_; + std::list idle_callbacks_; // When calling purgePendingStreams, this list will be used to hold the streams we are about // to purge. We need this if one cancelled streams cancels a different pending stream @@ -325,6 +333,13 @@ class ConnPoolImplBase : protected Logger::Loggable { // The number of streams currently attached to clients. uint32_t num_active_streams_{0}; + // Whether the connection pool is currently in the process of closing + // all connections so that it can be gracefully deleted. + bool is_draining_{false}; + + // True iff this object is in the deferred delete list. + bool deferred_deleting_{false}; + void onUpstreamReady(); Event::SchedulableCallbackPtr upstream_ready_cb_; }; diff --git a/source/common/event/dispatcher_impl.cc b/source/common/event/dispatcher_impl.cc index c0c8118895872..c9c48cb31a657 100644 --- a/source/common/event/dispatcher_impl.cc +++ b/source/common/event/dispatcher_impl.cc @@ -247,10 +247,13 @@ TimerPtr DispatcherImpl::createTimerInternal(TimerCb cb) { void DispatcherImpl::deferredDelete(DeferredDeletablePtr&& to_delete) { ASSERT(isThreadSafe()); - current_to_delete_->emplace_back(std::move(to_delete)); - ENVOY_LOG(trace, "item added to deferred deletion list (size={})", current_to_delete_->size()); - if (current_to_delete_->size() == 1) { - deferred_delete_cb_->scheduleCallbackCurrentIteration(); + if (to_delete != nullptr) { + to_delete->deleteIsPending(); + current_to_delete_->emplace_back(std::move(to_delete)); + ENVOY_LOG(trace, "item added to deferred deletion list (size={})", current_to_delete_->size()); + if (current_to_delete_->size() == 1) { + deferred_delete_cb_->scheduleCallbackCurrentIteration(); + } } } diff --git a/source/common/http/conn_pool_base.cc b/source/common/http/conn_pool_base.cc index 601295b8f9665..4a67bea9da859 100644 --- a/source/common/http/conn_pool_base.cc +++ b/source/common/http/conn_pool_base.cc @@ -141,7 +141,7 @@ void MultiplexedActiveClientBase::onStreamDestroy() { // wait until the connection has been fully drained of streams and then check in the connection // event callback. if (!closed_with_active_rq_) { - parent().checkForDrained(); + parent().checkForIdleAndCloseIdleConnsIfDraining(); } } diff --git a/source/common/http/conn_pool_base.h b/source/common/http/conn_pool_base.h index 0d72454786204..e2f7494c0e38b 100644 --- a/source/common/http/conn_pool_base.h +++ b/source/common/http/conn_pool_base.h @@ -55,8 +55,13 @@ class HttpConnPoolImplBase : public Envoy::ConnectionPool::ConnPoolImplBase, std::vector protocols); ~HttpConnPoolImplBase() override; + // Event::DeferredDeletable + void deleteIsPending() override { deleteIsPendingImpl(); } + // ConnectionPool::Instance - void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + void addIdleCallback(IdleCb cb) override { addIdleCallbackImpl(cb); } + bool isIdle() const override { return isIdleImpl(); } + void startDrain() override { startDrainImpl(); } void drainConnections() override { drainConnectionsImpl(); } Upstream::HostDescriptionConstSharedPtr host() const override { return host_; } ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, diff --git a/source/common/http/conn_pool_grid.cc b/source/common/http/conn_pool_grid.cc index 071088f1b871e..f5e38a1a9e2ef 100644 --- a/source/common/http/conn_pool_grid.cc +++ b/source/common/http/conn_pool_grid.cc @@ -207,7 +207,7 @@ ConnectivityGrid::ConnectivityGrid( } ConnectivityGrid::~ConnectivityGrid() { - // Ignore drained callbacks while the pools are destroyed below. + // Ignore idle callbacks while the pools are destroyed below. destroying_ = true; // Callbacks might have pending streams registered with the pools, so cancel and delete // the callback before deleting the pools. @@ -215,25 +215,41 @@ ConnectivityGrid::~ConnectivityGrid() { pools_.clear(); } +void ConnectivityGrid::deleteIsPending() { + deferred_deleting_ = true; + for (const auto& pool : pools_) { + pool->deleteIsPending(); + } +} + absl::optional ConnectivityGrid::createNextPool() { + ASSERT(!deferred_deleting_); // Pools are created by newStream, which should not be called during draining. - ASSERT(drained_callbacks_.empty()); + ASSERT(!draining_); // Right now, only H3 and TCP are supported, so if there are 2 pools we're done. - if (pools_.size() == 2 || !drained_callbacks_.empty()) { + if (pools_.size() == 2 || draining_) { return absl::nullopt; } // HTTP/3 is hard-coded as higher priority, H2 as secondary. + ConnectionPool::InstancePtr pool; if (pools_.empty()) { - pools_.push_back(Http3::allocateConnPool(dispatcher_, random_generator_, host_, priority_, - options_, transport_socket_options_, state_, - time_source_, quic_stat_names_, scope_)); - return pools_.begin(); - } - pools_.push_back(std::make_unique(dispatcher_, random_generator_, host_, - priority_, options_, - transport_socket_options_, state_)); - return std::next(pools_.begin()); + pool = Http3::allocateConnPool(dispatcher_, random_generator_, host_, priority_, options_, + transport_socket_options_, state_, time_source_, + quic_stat_names_, scope_); + } else { + pool = std::make_unique(dispatcher_, random_generator_, host_, priority_, + options_, transport_socket_options_, state_); + } + + setupPool(*pool); + pools_.push_back(std::move(pool)); + + return --pools_.end(); +} + +void ConnectivityGrid::setupPool(ConnectionPool::Instance& pool) { + pool.addIdleCallback([this]() { onIdleReceived(); }); } bool ConnectivityGrid::hasActiveConnections() const { @@ -248,6 +264,11 @@ bool ConnectivityGrid::hasActiveConnections() const { ConnectionPool::Cancellable* ConnectivityGrid::newStream(Http::ResponseDecoder& decoder, ConnectionPool::Callbacks& callbacks) { + ASSERT(!deferred_deleting_); + + // New streams should not be created during draining. + ASSERT(!draining_); + if (pools_.empty()) { createNextPool(); } @@ -269,22 +290,24 @@ ConnectionPool::Cancellable* ConnectivityGrid::newStream(Http::ResponseDecoder& return ret; } -void ConnectivityGrid::addDrainedCallback(DrainedCb cb) { +void ConnectivityGrid::addIdleCallback(IdleCb cb) { // Add the callback to the list of callbacks to be called when all drains are // complete. - drained_callbacks_.emplace_back(cb); + idle_callbacks_.emplace_back(cb); +} - if (drained_callbacks_.size() != 1) { +void ConnectivityGrid::startDrain() { + if (draining_) { + // A drain callback has already been set, and only needs to happen once. return; } - // If this is the first time a drained callback has been added, track the - // number of pools which need to be drained in order to pass drain-completion - // up to the callers. Note that no new pools can be created from this point on - // as createNextPool fast-fails if drained callbacks are present. - drains_needed_ = pools_.size(); + // Note that no new pools can be created from this point on + // as createNextPool fast-fails if `draining_` is true. + draining_ = true; + for (auto& pool : pools_) { - pool->addDrainedCallback([this]() -> void { onDrainReceived(); }); + pool->startDrain(); } } @@ -318,21 +341,25 @@ void ConnectivityGrid::markHttp3Broken() { http3_status_tracker_.markHttp3Broken void ConnectivityGrid::markHttp3Confirmed() { http3_status_tracker_.markHttp3Confirmed(); } -void ConnectivityGrid::onDrainReceived() { - // Don't do any work under the stack of ~ConnectivityGrid() - if (destroying_) { - return; +bool ConnectivityGrid::isIdle() const { + // This is O(n) but the function is constant and there are no plans for n > 8. + bool idle = true; + for (const auto& pool : pools_) { + idle &= pool->isIdle(); } + return idle; +} - // If not all the pools have drained, keep waiting. - ASSERT(drains_needed_ != 0); - if (--drains_needed_ != 0) { +void ConnectivityGrid::onIdleReceived() { + // Don't do any work under the stack of ~ConnectivityGrid() + if (destroying_) { return; } - // All the pools have drained. Notify drain subscribers. - for (auto& callback : drained_callbacks_) { - callback(); + if (isIdle()) { + for (auto& callback : idle_callbacks_) { + callback(); + } } } diff --git a/source/common/http/conn_pool_grid.h b/source/common/http/conn_pool_grid.h index e658b5ed1123c..c9b01a74e1622 100644 --- a/source/common/http/conn_pool_grid.h +++ b/source/common/http/conn_pool_grid.h @@ -139,11 +139,16 @@ class ConnectivityGrid : public ConnectionPool::Instance, Stats::Scope& scope); ~ConnectivityGrid() override; + // Event::DeferredDeletable + void deleteIsPending() override; + // Http::ConnPool::Instance bool hasActiveConnections() const override; ConnectionPool::Cancellable* newStream(Http::ResponseDecoder& response_decoder, ConnectionPool::Callbacks& callbacks) override; - void addDrainedCallback(DrainedCb cb) override; + void addIdleCallback(IdleCb cb) override; + bool isIdle() const override; + void startDrain() override; void drainConnections() override; Upstream::HostDescriptionConstSharedPtr host() const override; bool maybePreconnect(float preconnect_ratio) override; @@ -167,12 +172,16 @@ class ConnectivityGrid : public ConnectionPool::Instance, // event that HTTP/3 is marked broken again. void markHttp3Confirmed(); +protected: + // Set the required idle callback on the pool. + void setupPool(ConnectionPool::Instance& pool); + private: friend class ConnectivityGridForTest; - // Called by each pool as it drains. The grid is responsible for calling - // drained_callbacks_ once all pools have drained. - void onDrainReceived(); + // Called by each pool as it idles. The grid is responsible for calling + // idle_callbacks_ once all pools have idled. + void onIdleReceived(); // Returns true if HTTP/3 should be attempted because there is an alternate protocol // that specifies HTTP/3 and HTTP/3 is not broken. @@ -196,20 +205,24 @@ class ConnectivityGrid : public ConnectionPool::Instance, // TODO(RyanTheOptimist): Make the alternate_protocols_ member non-optional. AlternateProtocolsCacheSharedPtr alternate_protocols_; - // Tracks how many drains are needed before calling drain callbacks. This is - // set to the number of pools when the first drain callbacks are added, and - // decremented as various pools drain. - uint32_t drains_needed_ = 0; + // True iff this pool is draining. No new streams or connections should be created + // in this state. + bool draining_{false}; + // Tracks the callbacks to be called on drain completion. - std::list drained_callbacks_; + std::list idle_callbacks_; // The connection pools to use to create new streams, ordered in the order of // desired use. std::list pools_; + // True iff under the stack of the destructor, to avoid calling drain // callbacks on deletion. bool destroying_{}; + // True iff this pool is being being defer deleted. + bool deferred_deleting_{}; + // Wrapped callbacks are stashed in the wrapped_callbacks_ for ownership. std::list wrapped_callbacks_; diff --git a/source/common/http/http1/conn_pool.cc b/source/common/http/http1/conn_pool.cc index c7e9af0970108..87f33935474cf 100644 --- a/source/common/http/http1/conn_pool.cc +++ b/source/common/http/http1/conn_pool.cc @@ -63,7 +63,7 @@ void ActiveClient::StreamWrapper::onDecodeComplete() { pool->scheduleOnUpstreamReady(); parent_.stream_wrapper_.reset(); - pool->checkForDrained(); + pool->checkForIdleAndCloseIdleConnsIfDraining(); } } diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index fec0f2bc740f6..11d268f22c7c4 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -111,6 +111,9 @@ constexpr const char* runtime_features[] = { constexpr const char* disabled_runtime_features[] = { // v2 is fatal-by-default. "envoy.test_only.broken_in_production.enable_deprecated_v2_api", + // Defaulting to off due to high risk. + // TODO(ggreenway): Move this to default-on during 1.20 release cycle. + "envoy.reloadable_features.conn_pool_delete_when_idle", // TODO(asraa) flip to true in a separate PR to enable the new JSON by default. "envoy.reloadable_features.remove_legacy_json", // Sentinel and test flag. diff --git a/source/common/tcp/conn_pool.cc b/source/common/tcp/conn_pool.cc index a4f33086956c6..38456bf4a5110 100644 --- a/source/common/tcp/conn_pool.cc +++ b/source/common/tcp/conn_pool.cc @@ -42,7 +42,7 @@ ActiveTcpClient::~ActiveTcpClient() { ASSERT(state() == ActiveClient::State::CLOSED); tcp_connection_data_->release(); parent_.onStreamClosed(*this, true); - parent_.checkForDrained(); + parent_.checkForIdleAndCloseIdleConnsIfDraining(); } } @@ -54,7 +54,7 @@ void ActiveTcpClient::clearCallbacks() { callbacks_ = nullptr; tcp_connection_data_ = nullptr; parent_.onStreamClosed(*this, true); - parent_.checkForDrained(); + parent_.checkForIdleAndCloseIdleConnsIfDraining(); } void ActiveTcpClient::onEvent(Network::ConnectionEvent event) { diff --git a/source/common/tcp/conn_pool.h b/source/common/tcp/conn_pool.h index e8c2f24c026e4..398254b498461 100644 --- a/source/common/tcp/conn_pool.h +++ b/source/common/tcp/conn_pool.h @@ -145,7 +145,12 @@ class ConnPoolImpl : public Envoy::ConnectionPool::ConnPoolImplBase, transport_socket_options, state) {} ~ConnPoolImpl() override { destructAllConnections(); } - void addDrainedCallback(DrainedCb cb) override { addDrainedCallbackImpl(cb); } + // Event::DeferredDeletable + void deleteIsPending() override { deleteIsPendingImpl(); } + + void addIdleCallback(IdleCb cb) override { addIdleCallbackImpl(cb); } + bool isIdle() const override { return isIdleImpl(); } + void startDrain() override { startDrainImpl(); } void drainConnections() override { drainConnectionsImpl(); // Legacy behavior for the TCP connection pool marks all connecting clients diff --git a/source/common/tcp/original_conn_pool.cc b/source/common/tcp/original_conn_pool.cc index 4f4da573b940d..cb4bf71b6735e 100644 --- a/source/common/tcp/original_conn_pool.cc +++ b/source/common/tcp/original_conn_pool.cc @@ -38,6 +38,7 @@ OriginalConnPoolImpl::~OriginalConnPoolImpl() { } void OriginalConnPoolImpl::drainConnections() { + ENVOY_LOG(debug, "draining connections"); while (!ready_conns_.empty()) { ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); } @@ -67,9 +68,11 @@ void OriginalConnPoolImpl::closeConnections() { } } -void OriginalConnPoolImpl::addDrainedCallback(DrainedCb cb) { - drained_callbacks_.push_back(cb); - checkForDrained(); +void OriginalConnPoolImpl::addIdleCallback(IdleCb cb) { idle_callbacks_.push_back(cb); } + +void OriginalConnPoolImpl::startDrain() { + is_draining_ = true; + checkForIdleAndCloseIdleConnsIfDraining(); } void OriginalConnPoolImpl::assignConnection(ActiveConn& conn, @@ -81,14 +84,22 @@ void OriginalConnPoolImpl::assignConnection(ActiveConn& conn, conn.real_host_description_); } -void OriginalConnPoolImpl::checkForDrained() { - if (!drained_callbacks_.empty() && pending_requests_.empty() && busy_conns_.empty() && - pending_conns_.empty()) { - while (!ready_conns_.empty()) { - ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); - } +bool OriginalConnPoolImpl::isIdle() const { + return pending_requests_.empty() && busy_conns_.empty() && pending_conns_.empty() && + ready_conns_.empty(); +} - for (const DrainedCb& cb : drained_callbacks_) { +void OriginalConnPoolImpl::checkForIdleAndCloseIdleConnsIfDraining() { + if (pending_requests_.empty() && busy_conns_.empty() && pending_conns_.empty() && + (is_draining_ || ready_conns_.empty())) { + if (is_draining_) { + ENVOY_LOG(debug, "in draining state"); + while (!ready_conns_.empty()) { + ready_conns_.front()->conn_->close(Network::ConnectionCloseType::NoFlush); + } + } + ENVOY_LOG(debug, "Calling idle callbacks - drained={}", is_draining_); + for (const IdleCb& cb : idle_callbacks_) { cb(); } } @@ -102,6 +113,8 @@ void OriginalConnPoolImpl::createNewConnection() { ConnectionPool::Cancellable* OriginalConnPoolImpl::newConnection(ConnectionPool::Callbacks& callbacks) { + ASSERT(!deferred_deleting_); + if (!ready_conns_.empty()) { ready_conns_.front()->moveBetweenLists(ready_conns_, busy_conns_); ENVOY_CONN_LOG(debug, "using existing connection", *busy_conns_.front()->conn_); @@ -196,8 +209,8 @@ void OriginalConnPoolImpl::onConnectionEvent(ActiveConn& conn, Network::Connecti createNewConnection(); } - if (check_for_drained) { - checkForDrained(); + if (check_for_drained || !is_draining_) { + checkForIdleAndCloseIdleConnsIfDraining(); } } @@ -232,7 +245,7 @@ void OriginalConnPoolImpl::onPendingRequestCancel(PendingRequest& request, pending_conns_.back()->conn_->close(Network::ConnectionCloseType::NoFlush); } - checkForDrained(); + checkForIdleAndCloseIdleConnsIfDraining(); } void OriginalConnPoolImpl::onConnReleased(ActiveConn& conn) { @@ -313,7 +326,7 @@ void OriginalConnPoolImpl::processIdleConnection(ActiveConn& conn, bool new_conn upstream_ready_cb_->scheduleCallbackCurrentIteration(); } - checkForDrained(); + checkForIdleAndCloseIdleConnsIfDraining(); } OriginalConnPoolImpl::ConnectionWrapper::ConnectionWrapper(ActiveConn& parent) : parent_(parent) { diff --git a/source/common/tcp/original_conn_pool.h b/source/common/tcp/original_conn_pool.h index b4c5f4bca5fd5..d5e79580c5b46 100644 --- a/source/common/tcp/original_conn_pool.h +++ b/source/common/tcp/original_conn_pool.h @@ -28,8 +28,13 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti ~OriginalConnPoolImpl() override; + // Event::DeferredDeletable + void deleteIsPending() override { deferred_deleting_ = true; } + // ConnectionPool::Instance - void addDrainedCallback(DrainedCb cb) override; + void addIdleCallback(IdleCb cb) override; + bool isIdle() const override; + void startDrain() override; void drainConnections() override; void closeConnections() override; ConnectionPool::Cancellable* newConnection(ConnectionPool::Callbacks& callbacks) override; @@ -148,7 +153,7 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti virtual void onConnDestroyed(ActiveConn& conn); void onUpstreamReady(); void processIdleConnection(ActiveConn& conn, bool new_connection, bool delay); - void checkForDrained(); + void checkForIdleAndCloseIdleConnsIfDraining(); Event::Dispatcher& dispatcher_; Upstream::HostConstSharedPtr host_; @@ -160,10 +165,13 @@ class OriginalConnPoolImpl : Logger::Loggable, public Connecti std::list ready_conns_; // conns ready for assignment std::list busy_conns_; // conns assigned std::list pending_requests_; - std::list drained_callbacks_; + std::list idle_callbacks_; Stats::TimespanPtr conn_connect_ms_; Event::SchedulableCallbackPtr upstream_ready_cb_; + bool upstream_ready_enabled_{false}; + bool is_draining_{false}; + bool deferred_deleting_{false}; }; } // namespace Tcp diff --git a/source/common/tcp_proxy/tcp_proxy.cc b/source/common/tcp_proxy/tcp_proxy.cc index ce3f18d7d65c2..a369af7358d16 100644 --- a/source/common/tcp_proxy/tcp_proxy.cc +++ b/source/common/tcp_proxy/tcp_proxy.cc @@ -711,14 +711,22 @@ void Filter::disableIdleTimer() { UpstreamDrainManager::~UpstreamDrainManager() { // If connections aren't closed before they are destructed an ASSERT fires, // so cancel all pending drains, which causes the connections to be closed. - while (!drainers_.empty()) { - auto begin = drainers_.begin(); - Drainer* key = begin->first; - begin->second->cancelDrain(); + if (!drainers_.empty()) { + auto& dispatcher = drainers_.begin()->second->dispatcher(); + while (!drainers_.empty()) { + auto begin = drainers_.begin(); + Drainer* key = begin->first; + begin->second->cancelDrain(); + + // cancelDrain() should cause that drainer to be removed from drainers_. + // ASSERT so that we don't end up in an infinite loop. + ASSERT(drainers_.find(key) == drainers_.end()); + } - // cancelDrain() should cause that drainer to be removed from drainers_. - // ASSERT so that we don't end up in an infinite loop. - ASSERT(drainers_.find(key) == drainers_.end()); + // This destructor is run when shutting down `ThreadLocal`. The destructor of some objects use + // earlier `ThreadLocal` slots (for accessing the runtime snapshot) so they must run before that + // slot is destructed. Clear the list to enforce that ordering. + dispatcher.clearDeferredDeleteList(); } } @@ -790,5 +798,7 @@ void Drainer::cancelDrain() { upstream_conn_data_->connection().close(Network::ConnectionCloseType::NoFlush); } +Event::Dispatcher& Drainer::dispatcher() { return upstream_conn_data_->connection().dispatcher(); } + } // namespace TcpProxy } // namespace Envoy diff --git a/source/common/tcp_proxy/tcp_proxy.h b/source/common/tcp_proxy/tcp_proxy.h index 06c5572a313f2..7e22c3273bc62 100644 --- a/source/common/tcp_proxy/tcp_proxy.h +++ b/source/common/tcp_proxy/tcp_proxy.h @@ -401,6 +401,7 @@ class Drainer : public Event::DeferredDeletable { void onIdleTimeout(); void onBytesSent(); void cancelDrain(); + Event::Dispatcher& dispatcher(); private: UpstreamDrainManager& parent_; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index 24343d13af833..42b49a1cd751f 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -1116,6 +1116,9 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::~ThreadLocalClusterManagerImp } } thread_local_clusters_.clear(); + + // Ensure that all pools are completely destructed. + thread_local_dispatcher_.clearDeferredDeleteList(); } void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const HostVector& hosts) { @@ -1129,7 +1132,7 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const Hos { auto container = host_tcp_conn_pool_map_.find(host); if (container != host_tcp_conn_pool_map_.end()) { - drainTcpConnPools(host, container->second); + drainTcpConnPools(container->second); } } } @@ -1137,80 +1140,37 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools(const Hos void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools( HostSharedPtr old_host, ConnPoolsContainer& container) { - container.drains_remaining_ += container.pools_->size(); - // Make a copy to protect against erasure in the callback. std::shared_ptr pools = container.pools_; - pools->addDrainedCallback([this, old_host]() -> void { - if (destroying_) { - // It is possible for a connection pool to fire drain callbacks during destruction. Instead - // of checking if old_host actually exists in the map, it's clearer and cleaner to keep - // track of destruction as a separate state and check for it here. This also allows us to - // do this check here versus inside every different connection pool implementation. - return; - } - - ConnPoolsContainer* to_clear = getHttpConnPoolsContainer(old_host); - if (to_clear == nullptr) { - // This could happen if we have cleaned out the host before iterating through every connection - // pool. Handle it by just continuing. - return; - } - - ASSERT(to_clear->drains_remaining_ > 0); - to_clear->drains_remaining_--; - if (to_clear->drains_remaining_ == 0 && to_clear->ready_to_drain_) { - clearContainer(old_host, *to_clear); - } - }); + container.draining_ = true; // We need to hold off on actually emptying out the container until we have finished processing - // `addDrainedCallback`. If we do not, then it's possible that the container could be erased in + // `addIdleCallback`. If we do not, then it's possible that the container could be erased in // the middle of its iteration, which leads to undefined behaviour. We handle that case by - // checking here to see if the drains have completed. - container.ready_to_drain_ = true; - if (container.drains_remaining_ == 0) { - clearContainer(old_host, container); - } -} + // guarding deletion with `do_not_delete_` in the registered idle callback, and then checking + // afterwards whether it is empty and deleting it if necessary. + container.do_not_delete_ = true; + pools->startDrain(); + container.do_not_delete_ = false; -void ClusterManagerImpl::ThreadLocalClusterManagerImpl::clearContainer( - HostSharedPtr old_host, ConnPoolsContainer& container) { - container.pools_->clear(); - host_http_conn_pool_map_.erase(old_host); + if (container.pools_->size() == 0) { + host_http_conn_pool_map_.erase(old_host); + } } void ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainTcpConnPools( - HostSharedPtr old_host, TcpConnPoolsContainer& container) { - container.drains_remaining_ += container.pools_.size(); + TcpConnPoolsContainer& container) { + // Copy the pools so that it is safe for the completion callback to mutate `container.pools_`. + // `container` may be invalid after all calls to `startDrain()`. + std::vector pools; for (const auto& pair : container.pools_) { - pair.second->addDrainedCallback([this, old_host]() -> void { - if (destroying_) { - // It is possible for a connection pool to fire drain callbacks during destruction. Instead - // of checking if old_host actually exists in the map, it's clearer and cleaner to keep - // track of destruction as a separate state and check for it here. This also allows us to - // do this check here versus inside every different connection pool implementation. - return; - } - - TcpConnPoolsContainer& container = host_tcp_conn_pool_map_[old_host]; - ASSERT(container.drains_remaining_ > 0); - container.drains_remaining_--; - if (container.drains_remaining_ == 0) { - for (auto& pair : container.pools_) { - thread_local_dispatcher_.deferredDelete(std::move(pair.second)); - } - host_tcp_conn_pool_map_.erase(old_host); - } - }); + pools.push_back(pair.second.get()); + } - // The above addDrainedCallback() drain completion callback might execute immediately. This can - // then effectively nuke 'container', which means we can't continue to loop on its contents - // (we're done here). - if (host_tcp_conn_pool_map_.count(old_host) == 0) { - break; - } + container.draining_ = true; + for (auto pool : pools) { + pool->startDrain(); } } @@ -1271,7 +1231,13 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( { const auto container = getHttpConnPoolsContainer(host); if (container != nullptr) { + container->do_not_delete_ = true; container->pools_->drainConnections(); + container->do_not_delete_ = false; + + if (container->pools_->size() == 0) { + host_http_conn_pool_map_.erase(host); + } } } { @@ -1281,8 +1247,15 @@ void ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure( // active connections. const auto& container = host_tcp_conn_pool_map_.find(host); if (container != host_tcp_conn_pool_map_.end()) { + // Draining pools or closing connections can cause pool deletion if it becomes + // idle. Copy `pools_` so that we aren't iterating through a container that + // gets mutated by callbacks deleting from it. + std::vector pools; for (const auto& pair : container->second.pools_) { - const Tcp::ConnectionPool::InstancePtr& pool = pair.second; + pools.push_back(pair.second.get()); + } + + for (auto* pool : pools) { if (host->cluster().features() & ClusterInfo::Features::CLOSE_CONNECTIONS_ON_HOST_HEALTH_FAILURE) { pool->closeConnections(); @@ -1460,11 +1433,16 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( // function. Otherwise, we'd need to capture a few of these variables by value. ConnPoolsContainer::ConnPools::PoolOptRef pool = container.pools_->getPool(priority, hash_key, [&]() { - return parent_.parent_.factory_.allocateConnPool( + auto pool = parent_.parent_.factory_.allocateConnPool( parent_.thread_local_dispatcher_, host, priority, upstream_protocols, alternate_protocol_options, !upstream_options->empty() ? upstream_options : nullptr, have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr, parent_.parent_.time_source_, parent_.cluster_manager_state_); + + pool->addIdleCallback( + [this, host, priority, hash_key]() { httpConnPoolIsIdle(host, priority, hash_key); }); + + return pool; }); if (pool.has_value()) { @@ -1474,6 +1452,38 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::connPool( } } +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::httpConnPoolIsIdle( + HostConstSharedPtr host, ResourcePriority priority, const std::vector& hash_key) { + if (parent_.destroying_) { + // If the Cluster is being destroyed, this pool will be cleaned up by that + // process. + return; + } + + ConnPoolsContainer* container = parent_.getHttpConnPoolsContainer(host); + if (container == nullptr) { + // This could happen if we have cleaned out the host before iterating through every + // connection pool. Handle it by just continuing. + return; + } + + if (container->draining_ || + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.conn_pool_delete_when_idle")) { + + ENVOY_LOG(trace, "Erasing idle pool for host {}", host); + container->pools_->erasePool(priority, hash_key); + + // Guard deletion of the container with `do_not_delete_` to avoid deletion while + // iterating through the container in `container->pools_->startDrain()`. See + // comment in `ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools`. + if (!container->do_not_delete_ && container->pools_->size() == 0) { + ENVOY_LOG(trace, "Pool container empty for host {}, erasing host entry", host); + parent_.host_http_conn_pool_map_.erase( + host); // NOTE: `container` is erased after this point in the lambda. + } + } +} + Tcp::ConnectionPool::Instance* ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( ResourcePriority priority, LoadBalancerContext* context, bool peek) { @@ -1511,15 +1521,50 @@ ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPool( } TcpConnPoolsContainer& container = parent_.host_tcp_conn_pool_map_[host]; - if (!container.pools_[hash_key]) { - container.pools_[hash_key] = parent_.parent_.factory_.allocateTcpConnPool( - parent_.thread_local_dispatcher_, host, priority, - !upstream_options->empty() ? upstream_options : nullptr, - have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr, - parent_.cluster_manager_state_); + auto pool_iter = container.pools_.find(hash_key); + if (pool_iter == container.pools_.end()) { + bool inserted; + std::tie(pool_iter, inserted) = container.pools_.emplace( + hash_key, + parent_.parent_.factory_.allocateTcpConnPool( + parent_.thread_local_dispatcher_, host, priority, + !upstream_options->empty() ? upstream_options : nullptr, + have_transport_socket_options ? context->upstreamTransportSocketOptions() : nullptr, + parent_.cluster_manager_state_)); + ASSERT(inserted); + pool_iter->second->addIdleCallback( + [this, host, hash_key]() { tcpConnPoolIsIdle(host, hash_key); }); + } + + return pool_iter->second.get(); +} + +void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::tcpConnPoolIsIdle( + HostConstSharedPtr host, const std::vector& hash_key) { + if (parent_.destroying_) { + // If the Cluster is being destroyed, this pool will be cleaned up by that process. + return; } - return container.pools_[hash_key].get(); + auto it = parent_.host_tcp_conn_pool_map_.find(host); + if (it != parent_.host_tcp_conn_pool_map_.end()) { + TcpConnPoolsContainer& container = it->second; + + auto erase_iter = container.pools_.find(hash_key); + if (erase_iter != container.pools_.end()) { + if (container.draining_ || + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.conn_pool_delete_when_idle")) { + ENVOY_LOG(trace, "Idle pool, erasing pool for host {}", host); + parent_.thread_local_dispatcher_.deferredDelete(std::move(erase_iter->second)); + container.pools_.erase(erase_iter); + } + } + + if (container.pools_.empty()) { + parent_.host_tcp_conn_pool_map_.erase( + host); // NOTE: `container` is erased after this point in the lambda. + } + } } ClusterManagerPtr ProdClusterManagerFactory::clusterManagerFromProto( @@ -1590,6 +1635,7 @@ Tcp::ConnectionPool::InstancePtr ProdClusterManagerFactory::allocateTcpConnPool( const Network::ConnectionSocket::OptionsSharedPtr& options, Network::TransportSocketOptionsConstSharedPtr transport_socket_options, ClusterConnectivityState& state) { + ENVOY_LOG_MISC(debug, "Allocating TCP conn pool"); if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.new_tcp_connection_pool")) { return std::make_unique(dispatcher, host, priority, options, transport_socket_options, state); diff --git a/source/common/upstream/cluster_manager_impl.h b/source/common/upstream/cluster_manager_impl.h index d03ea3c439626..8f66d055f49e5 100644 --- a/source/common/upstream/cluster_manager_impl.h +++ b/source/common/upstream/cluster_manager_impl.h @@ -359,15 +359,18 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable pools_; - bool ready_to_drain_{false}; - uint64_t drains_remaining_{}; + bool draining_{false}; + + // Protect from deletion while iterating through pools_. See comments and usage + // in `ClusterManagerImpl::ThreadLocalClusterManagerImpl::drainConnPools()`. + bool do_not_delete_{false}; }; struct TcpConnPoolsContainer { using ConnPools = std::map, Tcp::ConnectionPool::InstancePtr>; ConnPools pools_; - uint64_t drains_remaining_{}; + bool draining_{false}; }; // Holds an unowned reference to a connection, and watches for Closed events. If the connection @@ -409,6 +412,10 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable& hash_key); + void tcpConnPoolIsIdle(HostConstSharedPtr host, const std::vector& hash_key); + // Upstream::ThreadLocalCluster const PrioritySet& prioritySet() override { return priority_set_; } ClusterInfoConstSharedPtr info() override { return cluster_info_; } @@ -445,8 +452,7 @@ class ClusterManagerImpl : public ClusterManager, Logger::Loggable class ConnPoolMap { public: using PoolFactory = std::function()>; - using DrainedCb = std::function; + using IdleCb = typename POOL_TYPE::IdleCb; using PoolOptRef = absl::optional>; ConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host, @@ -31,7 +31,14 @@ template class ConnPoolMap { * possible for this to fail if a limit on the number of pools allowed is reached. * @return The pool corresponding to `key`, or `absl::nullopt`. */ - PoolOptRef getPool(KEY_TYPE key, const PoolFactory& factory); + PoolOptRef getPool(const KEY_TYPE& key, const PoolFactory& factory); + + /** + * Erases an existing pool mapped to `key`. + * + * @return true if the entry exists and was removed, false otherwise + */ + bool erasePool(const KEY_TYPE& key); /** * @return the number of pools. @@ -44,15 +51,20 @@ template class ConnPoolMap { void clear(); /** - * Adds a drain callback to all mapped pools. Any future mapped pools with have the callback + * Adds an idle callback to all mapped pools. Any future mapped pools with have the callback * automatically added. Be careful with the callback. If it itself calls into `this`, modifying * the state of `this`, there is a good chance it will cause corruption due to the callback firing * immediately. */ - void addDrainedCallback(const DrainedCb& cb); + void addIdleCallback(const IdleCb& cb); + + /** + * See `Envoy::ConnectionPool::Instance::startDrain()`. + */ + void startDrain(); /** - * Instructs each connection pool to drain its connections. + * See `Envoy::ConnectionPool::Instance::drainConnections()`. */ void drainConnections(); @@ -70,7 +82,7 @@ template class ConnPoolMap { absl::flat_hash_map> active_pools_; Event::Dispatcher& thread_local_dispatcher_; - std::vector cached_callbacks_; + std::vector cached_callbacks_; Common::DebugRecursionChecker recursion_checker_; const HostConstSharedPtr host_; const ResourcePriority priority_; diff --git a/source/common/upstream/conn_pool_map_impl.h b/source/common/upstream/conn_pool_map_impl.h index b183f9d438bb7..18176d0a18ec8 100644 --- a/source/common/upstream/conn_pool_map_impl.h +++ b/source/common/upstream/conn_pool_map_impl.h @@ -21,7 +21,7 @@ template ConnPoolMap typename ConnPoolMap::PoolOptRef -ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& factory) { +ConnPoolMap::getPool(const KEY_TYPE& key, const PoolFactory& factory) { Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); // TODO(klarose): Consider how we will change the connection pool's configuration in the future. // The plan is to change the downstream socket options... We may want to take those as a parameter @@ -53,13 +53,28 @@ ConnPoolMap::getPool(KEY_TYPE key, const PoolFactory& facto auto new_pool = factory(); connPoolResource.inc(); for (const auto& cb : cached_callbacks_) { - new_pool->addDrainedCallback(cb); + new_pool->addIdleCallback(cb); } auto inserted = active_pools_.emplace(key, std::move(new_pool)); return std::ref(*inserted.first->second); } +template +bool ConnPoolMap::erasePool(const KEY_TYPE& key) { + Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + auto pool_iter = active_pools_.find(key); + + if (pool_iter != active_pools_.end()) { + thread_local_dispatcher_.deferredDelete(std::move(pool_iter->second)); + active_pools_.erase(pool_iter); + host_->cluster().resourceManager(priority_).connectionPools().dec(); + return true; + } else { + return false; + } +} + template size_t ConnPoolMap::size() const { return active_pools_.size(); @@ -74,20 +89,42 @@ template void ConnPoolMap -void ConnPoolMap::addDrainedCallback(const DrainedCb& cb) { +void ConnPoolMap::addIdleCallback(const IdleCb& cb) { Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); for (auto& pool_pair : active_pools_) { - pool_pair.second->addDrainedCallback(cb); + pool_pair.second->addIdleCallback(cb); } cached_callbacks_.emplace_back(std::move(cb)); } +template +void ConnPoolMap::startDrain() { + // Copy the `active_pools_` so that it is safe for the call to result + // in deletion, and avoid iteration through a mutating container. + std::vector pools; + pools.reserve(active_pools_.size()); + for (auto& pool_pair : active_pools_) { + pools.push_back(pool_pair.second.get()); + } + + for (auto* pool : pools) { + pool->startDrain(); + } +} + template void ConnPoolMap::drainConnections() { - Common::AutoDebugRecursionChecker assert_not_in(recursion_checker_); + // Copy the `active_pools_` so that it is safe for the call to result + // in deletion, and avoid iteration through a mutating container. + std::vector pools; + pools.reserve(active_pools_.size()); for (auto& pool_pair : active_pools_) { - pool_pair.second->drainConnections(); + pools.push_back(pool_pair.second.get()); + } + + for (auto* pool : pools) { + pool->drainConnections(); } } diff --git a/source/common/upstream/priority_conn_pool_map.h b/source/common/upstream/priority_conn_pool_map.h index 18ce2c52eb959..c43ba46c06ea5 100644 --- a/source/common/upstream/priority_conn_pool_map.h +++ b/source/common/upstream/priority_conn_pool_map.h @@ -15,7 +15,7 @@ template class PriorityConnPoolMap { public: using ConnPoolMapType = ConnPoolMap; using PoolFactory = typename ConnPoolMapType::PoolFactory; - using DrainedCb = typename ConnPoolMapType::DrainedCb; + using IdleCb = typename ConnPoolMapType::IdleCb; using PoolOptRef = typename ConnPoolMapType::PoolOptRef; PriorityConnPoolMap(Event::Dispatcher& dispatcher, const HostConstSharedPtr& host); @@ -26,7 +26,12 @@ template class PriorityConnPoolMap { * is reached. * @return The pool corresponding to `key`, or `absl::nullopt`. */ - PoolOptRef getPool(ResourcePriority priority, KEY_TYPE key, const PoolFactory& factory); + PoolOptRef getPool(ResourcePriority priority, const KEY_TYPE& key, const PoolFactory& factory); + + /** + * Erase a pool for the given priority and `key` if it exists and is idle. + */ + bool erasePool(ResourcePriority priority, const KEY_TYPE& key); /** * @return the number of pools across all priorities. @@ -44,14 +49,21 @@ template class PriorityConnPoolMap { * the state of `this`, there is a good chance it will cause corruption due to the callback firing * immediately. */ - void addDrainedCallback(const DrainedCb& cb); + void addIdleCallback(const IdleCb& cb); /** - * Instructs each connection pool to drain its connections. + * See `Envoy::ConnectionPool::Instance::startDrain()`. + */ + void startDrain(); + + /** + * See `Envoy::ConnectionPool::Instance::drainConnections()`. */ void drainConnections(); private: + size_t getPriorityIndex(ResourcePriority priority) const; + std::array, NumResourcePriorities> conn_pool_maps_; }; diff --git a/source/common/upstream/priority_conn_pool_map_impl.h b/source/common/upstream/priority_conn_pool_map_impl.h index b1cb6f8c54d12..66cc9ff4407ea 100644 --- a/source/common/upstream/priority_conn_pool_map_impl.h +++ b/source/common/upstream/priority_conn_pool_map_impl.h @@ -20,11 +20,15 @@ PriorityConnPoolMap::~PriorityConnPoolMap() = default; template typename PriorityConnPoolMap::PoolOptRef -PriorityConnPoolMap::getPool(ResourcePriority priority, KEY_TYPE key, +PriorityConnPoolMap::getPool(ResourcePriority priority, const KEY_TYPE& key, const PoolFactory& factory) { - size_t index = static_cast(priority); - ASSERT(index < conn_pool_maps_.size()); - return conn_pool_maps_[index]->getPool(key, factory); + return conn_pool_maps_[getPriorityIndex(priority)]->getPool(key, factory); +} + +template +bool PriorityConnPoolMap::erasePool(ResourcePriority priority, + const KEY_TYPE& key) { + return conn_pool_maps_[getPriorityIndex(priority)]->erasePool(key); } template @@ -44,9 +48,16 @@ void PriorityConnPoolMap::clear() { } template -void PriorityConnPoolMap::addDrainedCallback(const DrainedCb& cb) { +void PriorityConnPoolMap::addIdleCallback(const IdleCb& cb) { for (auto& pool_map : conn_pool_maps_) { - pool_map->addDrainedCallback(cb); + pool_map->addIdleCallback(cb); + } +} + +template +void PriorityConnPoolMap::startDrain() { + for (auto& pool_map : conn_pool_maps_) { + pool_map->startDrain(); } } @@ -57,5 +68,12 @@ void PriorityConnPoolMap::drainConnections() { } } +template +size_t PriorityConnPoolMap::getPriorityIndex(ResourcePriority priority) const { + size_t index = static_cast(priority); + ASSERT(index < conn_pool_maps_.size()); + return index; +} + } // namespace Upstream } // namespace Envoy diff --git a/source/server/BUILD b/source/server/BUILD index ee2c07b4e8380..dc7f3173ae2f6 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -574,6 +574,8 @@ envoy_cc_library( "//source/common/common:logger_lib", "//source/common/common:mutex_tracer_lib", "//source/common/common:utility_lib", + "//source/common/config:grpc_mux_lib", + "//source/common/config:new_grpc_mux_lib", "//source/common/config:utility_lib", "//source/common/config:xds_resource_lib", "//source/common/grpc:async_client_manager_lib", diff --git a/source/server/server.cc b/source/server/server.cc index d8bc98da42ef2..189ecea07dad9 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -29,6 +29,8 @@ #include "source/common/common/enum_to_int.h" #include "source/common/common/mutex_tracer_impl.h" #include "source/common/common/utility.h" +#include "source/common/config/grpc_mux_impl.h" +#include "source/common/config/new_grpc_mux_impl.h" #include "source/common/config/utility.h" #include "source/common/config/version_converter.h" #include "source/common/config/xds_resource.h" @@ -837,6 +839,10 @@ void InstanceImpl::terminate() { // Before the workers start exiting we should disable stat threading. stats_store_.shutdownThreading(); + // TODO: figure out the correct fix: https://github.com/envoyproxy/envoy/issues/15072. + Config::GrpcMuxImpl::shutdownAll(); + Config::NewGrpcMuxImpl::shutdownAll(); + if (overload_manager_) { overload_manager_->stop(); } diff --git a/test/common/conn_pool/conn_pool_base_test.cc b/test/common/conn_pool/conn_pool_base_test.cc index 5d8d64d359db3..f1f53f69f9fd8 100644 --- a/test/common/conn_pool/conn_pool_base_test.cc +++ b/test/common/conn_pool/conn_pool_base_test.cc @@ -60,7 +60,8 @@ class ConnPoolImplBaseTest : public testing::Test { // connection resource limit for most tests. cluster_->resetResourceManager(1024, 1024, 1024, 1, 1); ON_CALL(pool_, instantiateActiveClient).WillByDefault(Invoke([&]() -> ActiveClientPtr { - auto ret = std::make_unique(pool_, stream_limit_, concurrent_streams_); + auto ret = + std::make_unique>(pool_, stream_limit_, concurrent_streams_); clients_.push_back(ret.get()); ret->real_host_description_ = descr_; return ret; @@ -88,7 +89,7 @@ class ConnPoolImplBaseTest : public testing::Test { Upstream::makeTestHost(cluster_, "tcp://127.0.0.1:80", dispatcher_.timeSource())}; TestConnPoolImplBase pool_; AttachContext context_; - std::vector clients_; + std::vector clients_; }; TEST_F(ConnPoolImplBaseTest, DumpState) { @@ -201,5 +202,61 @@ TEST_F(ConnPoolImplBaseTest, ExplicitPreconnectNotHealthy) { EXPECT_FALSE(pool_.maybePreconnect(1)); } +// Remote close simulates the peer closing the connection. +TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredRemoteClose) { + EXPECT_CALL(dispatcher_, createTimer_(_)).Times(AnyNumber()); + + // Create a new stream using the pool + EXPECT_CALL(pool_, instantiateActiveClient); + pool_.newStream(context_); + ASSERT_EQ(1, clients_.size()); + + // Emulate the new upstream connection establishment + EXPECT_CALL(pool_, onPoolReady); + clients_.back()->onEvent(Network::ConnectionEvent::Connected); + + // The pool now has no requests/streams, but has an open connection, so it is not yet idle. + clients_.back()->active_streams_ = 0; + pool_.onStreamClosed(*clients_.back(), false); + + // Now that the last connection is closed, while there are no requests, the pool becomes idle. + testing::MockFunction idle_pool_callback; + EXPECT_CALL(idle_pool_callback, Call()); + pool_.addIdleCallbackImpl(idle_pool_callback.AsStdFunction()); + dispatcher_.clearDeferredDeleteList(); + clients_.back()->onEvent(Network::ConnectionEvent::RemoteClose); + + EXPECT_CALL(idle_pool_callback, Call()); + pool_.startDrainImpl(); +} + +// Local close simulates what would happen for an idle timeout on a connection. +TEST_F(ConnPoolImplBaseTest, PoolIdleCallbackTriggeredLocalClose) { + EXPECT_CALL(dispatcher_, createTimer_(_)).Times(AnyNumber()); + + // Create a new stream using the pool + EXPECT_CALL(pool_, instantiateActiveClient); + pool_.newStream(context_); + ASSERT_EQ(1, clients_.size()); + + // Emulate the new upstream connection establishment + EXPECT_CALL(pool_, onPoolReady); + clients_.back()->onEvent(Network::ConnectionEvent::Connected); + + // The pool now has no requests/streams, but has an open connection, so it is not yet idle. + clients_.back()->active_streams_ = 0; + pool_.onStreamClosed(*clients_.back(), false); + + // Now that the last connection is closed, while there are no requests, the pool becomes idle. + testing::MockFunction idle_pool_callback; + EXPECT_CALL(idle_pool_callback, Call()); + pool_.addIdleCallbackImpl(idle_pool_callback.AsStdFunction()); + dispatcher_.clearDeferredDeleteList(); + clients_.back()->onEvent(Network::ConnectionEvent::LocalClose); + + EXPECT_CALL(idle_pool_callback, Call()); + pool_.startDrainImpl(); +} + } // namespace ConnectionPool } // namespace Envoy diff --git a/test/common/http/conn_pool_grid_test.cc b/test/common/http/conn_pool_grid_test.cc index ec76c120984ec..89affdb9a4d04 100644 --- a/test/common/http/conn_pool_grid_test.cc +++ b/test/common/http/conn_pool_grid_test.cc @@ -41,6 +41,7 @@ class ConnectivityGridForTest : public ConnectivityGrid { return absl::nullopt; } ConnectionPool::MockInstance* instance = new NiceMock(); + setupPool(*instance); pools_.push_back(ConnectionPool::InstancePtr{instance}); ON_CALL(*instance, newStream(_, _)) .WillByDefault( @@ -405,32 +406,28 @@ TEST_F(ConnectivityGridTest, DrainCallbacks) { grid_.createNextPool(); bool drain_received = false; - bool second_drain_received = false; - ConnectionPool::Instance::DrainedCb pool1_cb; - ConnectionPool::Instance::DrainedCb pool2_cb; - // The first time a drained callback is added, the Grid's callback should be - // added to both pools. + grid_.addIdleCallback([&]() { drain_received = true; }); + + // The first time a drain is started, both pools should start draining. { - EXPECT_CALL(*grid_.first(), addDrainedCallback(_)) - .WillOnce(Invoke(Invoke([&](ConnectionPool::Instance::DrainedCb cb) { pool1_cb = cb; }))); - EXPECT_CALL(*grid_.second(), addDrainedCallback(_)) - .WillOnce(Invoke(Invoke([&](ConnectionPool::Instance::DrainedCb cb) { pool2_cb = cb; }))); - grid_.addDrainedCallback([&drain_received]() -> void { drain_received = true; }); + EXPECT_CALL(*grid_.first(), startDrain()); + EXPECT_CALL(*grid_.second(), startDrain()); + grid_.startDrain(); } - // The second time a drained callback is added, the pools will not see any - // change. + // The second time, the pools will not see any change. { - EXPECT_CALL(*grid_.first(), addDrainedCallback(_)).Times(0); - EXPECT_CALL(*grid_.second(), addDrainedCallback(_)).Times(0); - grid_.addDrainedCallback([&second_drain_received]() -> void { second_drain_received = true; }); + EXPECT_CALL(*grid_.first(), startDrain()).Times(0); + EXPECT_CALL(*grid_.second(), startDrain()).Times(0); + grid_.startDrain(); } { // Notify the grid the second pool has been drained. This should not be // passed up to the original callers. EXPECT_FALSE(drain_received); - (pool2_cb)(); + EXPECT_CALL(*grid_.second(), isIdle()).WillRepeatedly(Return(true)); + grid_.second()->idle_cb_(); EXPECT_FALSE(drain_received); } @@ -438,27 +435,57 @@ TEST_F(ConnectivityGridTest, DrainCallbacks) { // Notify the grid that another pool has been drained. Now that all pools are // drained, the original callers should be informed. EXPECT_FALSE(drain_received); - (pool1_cb)(); + EXPECT_CALL(*grid_.first(), isIdle()).WillRepeatedly(Return(true)); + grid_.first()->idle_cb_(); EXPECT_TRUE(drain_received); - EXPECT_TRUE(second_drain_received); } } +// Make sure idle callbacks work as expected. +TEST_F(ConnectivityGridTest, IdleCallbacks) { + // Synthetically create both pools. + grid_.createNextPool(); + grid_.createNextPool(); + + bool idle_received = false; + + grid_.addIdleCallback([&]() { idle_received = true; }); + EXPECT_FALSE(idle_received); + + // Notify the grid the second pool is idle. This should not be + // passed up to the original callers. + EXPECT_CALL(*grid_.second(), isIdle()).WillOnce(Return(true)); + EXPECT_CALL(*grid_.first(), isIdle()).WillOnce(Return(false)); + grid_.second()->idle_cb_(); + EXPECT_FALSE(idle_received); + + // Notify the grid that the first pool is idle, the but second no longer is. + EXPECT_CALL(*grid_.first(), isIdle()).WillOnce(Return(true)); + EXPECT_CALL(*grid_.second(), isIdle()).WillOnce(Return(false)); + grid_.first()->idle_cb_(); + EXPECT_FALSE(idle_received); + + // Notify the grid that both are now idle. This should be passed up + // to the original caller. + EXPECT_CALL(*grid_.first(), isIdle()).WillOnce(Return(true)); + EXPECT_CALL(*grid_.second(), isIdle()).WillOnce(Return(true)); + grid_.first()->idle_cb_(); + EXPECT_TRUE(idle_received); +} + // Ensure drain callbacks aren't called during grid teardown. TEST_F(ConnectivityGridTest, NoDrainOnTeardown) { grid_.createNextPool(); bool drain_received = false; - ConnectionPool::Instance::DrainedCb pool1_cb; { - EXPECT_CALL(*grid_.first(), addDrainedCallback(_)) - .WillOnce(Invoke(Invoke([&](ConnectionPool::Instance::DrainedCb cb) { pool1_cb = cb; }))); - grid_.addDrainedCallback([&drain_received]() -> void { drain_received = true; }); + grid_.addIdleCallback([&drain_received]() -> void { drain_received = true; }); + grid_.startDrain(); } grid_.setDestroying(); // Fake being in the destructor. - (pool1_cb)(); + grid_.first()->idle_cb_(); EXPECT_FALSE(drain_received); } diff --git a/test/common/http/http1/conn_pool_test.cc b/test/common/http/http1/conn_pool_test.cc index c0493c7307b47..f603967b14c97 100644 --- a/test/common/http/http1/conn_pool_test.cc +++ b/test/common/http/http1/conn_pool_test.cc @@ -31,6 +31,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AtLeast; using testing::DoAll; using testing::InSequence; using testing::Invoke; @@ -946,16 +947,17 @@ TEST_F(Http1ConnPoolImplTest, DrainCallback) { InSequence s; ReadyWatcher drained; - EXPECT_CALL(drained, ready()); - conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); - ActiveTestRequest r1(*this, 0, ActiveTestRequest::Type::CreateConnection); ActiveTestRequest r2(*this, 0, ActiveTestRequest::Type::Pending); + + conn_pool_->addIdleCallback([&]() -> void { drained.ready(); }); + conn_pool_->startDrain(); + r2.handle_->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_EQ(1U, cluster_->stats_.upstream_rq_total_.value()); conn_pool_->expectEnableUpstreamReady(); - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); r1.startRequest(); r1.completeResponse(false); @@ -975,10 +977,11 @@ TEST_F(Http1ConnPoolImplTest, DrainWhileConnecting) { Http::ConnectionPool::Cancellable* handle = conn_pool_->newStream(outer_decoder, callbacks); EXPECT_NE(nullptr, handle); - conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addIdleCallback([&]() -> void { drained.ready(); }); + conn_pool_->startDrain(); EXPECT_CALL(*conn_pool_->test_clients_[0].connection_, close(Network::ConnectionCloseType::NoFlush)); - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); handle->cancel(Envoy::ConnectionPool::CancelPolicy::Default); EXPECT_CALL(*conn_pool_, onClientDestroy()); diff --git a/test/common/http/http2/conn_pool_test.cc b/test/common/http/http2/conn_pool_test.cc index 9ec7f843db07c..4f1b8e6f4bbec 100644 --- a/test/common/http/http2/conn_pool_test.cc +++ b/test/common/http/http2/conn_pool_test.cc @@ -23,6 +23,7 @@ #include "gtest/gtest.h" using testing::_; +using testing::AtLeast; using testing::DoAll; using testing::InSequence; using testing::Invoke; @@ -1089,7 +1090,8 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectWithActiveRequest) { ->encodeHeaders(TestRequestHeaderMapImpl{{":path", "/"}, {":method", "GET"}}, true) .ok()); ReadyWatcher drained; - pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addIdleCallback([&]() -> void { drained.ready(); }); + pool_->startDrain(); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(drained, ready()); @@ -1125,7 +1127,8 @@ TEST_F(Http2ConnPoolImplTest, DrainDisconnectDrainingWithActiveRequest) { .ok()); ReadyWatcher drained; - pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addIdleCallback([&]() -> void { drained.ready(); }); + pool_->startDrain(); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -1168,7 +1171,8 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { .ok()); ReadyWatcher drained; - pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addIdleCallback([&]() -> void { drained.ready(); }); + pool_->startDrain(); EXPECT_CALL(dispatcher_, deferredDelete_(_)); EXPECT_CALL(r2.decoder_, decodeHeaders_(_, true)); @@ -1178,7 +1182,7 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimary) { dispatcher_.clearDeferredDeleteList(); EXPECT_CALL(dispatcher_, deferredDelete_(_)); - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); EXPECT_CALL(r1.decoder_, decodeHeaders_(_, true)); r1.inner_decoder_->decodeHeaders( ResponseHeaderMapPtr{new TestResponseHeaderMapImpl{{":status", "200"}}}, true); @@ -1223,7 +1227,8 @@ TEST_F(Http2ConnPoolImplTest, DrainPrimaryNoActiveRequest) { ReadyWatcher drained; EXPECT_CALL(drained, ready()); - pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + pool_->addIdleCallback([&]() -> void { drained.ready(); }); + pool_->startDrain(); EXPECT_CALL(*this, onClientDestroy()); dispatcher_.clearDeferredDeleteList(); diff --git a/test/common/tcp/conn_pool_test.cc b/test/common/tcp/conn_pool_test.cc index c1febd25d1558..8657f444a4834 100644 --- a/test/common/tcp/conn_pool_test.cc +++ b/test/common/tcp/conn_pool_test.cc @@ -25,6 +25,7 @@ using testing::_; using testing::AnyNumber; +using testing::AtLeast; using testing::Invoke; using testing::InvokeWithoutArgs; using testing::NiceMock; @@ -104,7 +105,9 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { Network::TransportSocketOptionsConstSharedPtr transport_socket_options, bool test_new_connection_pool); - void addDrainedCallback(DrainedCb cb) override { conn_pool_->addDrainedCallback(cb); } + void addIdleCallback(IdleCb cb) override { conn_pool_->addIdleCallback(cb); } + bool isIdle() const override { return conn_pool_->isIdle(); } + void startDrain() override { return conn_pool_->startDrain(); } void drainConnections() override { conn_pool_->drainConnections(); } void closeConnections() override { conn_pool_->closeConnections(); } ConnectionPool::Cancellable* newConnection(Tcp::ConnectionPool::Callbacks& callbacks) override { @@ -154,6 +157,7 @@ class ConnPoolBase : public Tcp::ConnectionPool::Instance { Event::MockDispatcher& mock_dispatcher_; NiceMock* mock_upstream_ready_cb_; std::vector test_conns_; + Upstream::HostSharedPtr host_; Network::ConnectionCallbacks* callbacks_ = nullptr; bool test_new_connection_pool_; Network::ConnectionSocket::OptionsSharedPtr options_; @@ -973,16 +977,16 @@ TEST_P(TcpConnPoolImplTest, ConnectionStateWithConcurrentConnections) { TEST_P(TcpConnPoolImplTest, DrainCallback) { initialize(); ReadyWatcher drained; - EXPECT_CALL(drained, ready()); - conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addIdleCallback([&]() -> void { drained.ready(); }); + conn_pool_->startDrain(); ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); ActiveTestConn c2(*this, 0, ActiveTestConn::Type::Pending); c2.handle_->cancel(ConnectionPool::CancelPolicy::Default); EXPECT_CALL(*conn_pool_, onConnReleasedForTest()); - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); c1.releaseConn(); EXPECT_CALL(*conn_pool_, onConnDestroyedForTest()); @@ -1002,11 +1006,13 @@ TEST_P(TcpConnPoolImplTest, DrainWhileConnecting) { Tcp::ConnectionPool::Cancellable* handle = conn_pool_->newConnection(callbacks); EXPECT_NE(nullptr, handle); - conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addIdleCallback([&]() -> void { drained.ready(); }); + conn_pool_->startDrain(); + if (test_new_connection_pool_) { // The shared connection pool removes and closes connecting clients if there are no // pending requests. - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); handle->cancel(ConnectionPool::CancelPolicy::Default); } else { handle->cancel(ConnectionPool::CancelPolicy::Default); @@ -1026,11 +1032,12 @@ TEST_P(TcpConnPoolImplTest, DrainOnClose) { initialize(); ReadyWatcher drained; EXPECT_CALL(drained, ready()); - conn_pool_->addDrainedCallback([&]() -> void { drained.ready(); }); + conn_pool_->addIdleCallback([&]() -> void { drained.ready(); }); + conn_pool_->startDrain(); ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); - EXPECT_CALL(drained, ready()); + EXPECT_CALL(drained, ready()).Times(AtLeast(1)); EXPECT_CALL(c1.callbacks_.callbacks_, onEvent(Network::ConnectionEvent::RemoteClose)) .WillOnce(Invoke([&](Network::ConnectionEvent event) -> void { EXPECT_EQ(Network::ConnectionEvent::RemoteClose, event); @@ -1111,6 +1118,25 @@ TEST_P(TcpConnPoolImplTest, RequestCapacity) { conn_pool_->test_conns_[2].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); } +// Test that connections that are closed due to idle timeout causes the idle callback to be fired. +TEST_P(TcpConnPoolImplTest, TestIdleTimeout) { + initialize(); + testing::MockFunction idle_callback; + conn_pool_->addIdleCallback(idle_callback.AsStdFunction()); + + EXPECT_CALL(idle_callback, Call()); + ActiveTestConn c1(*this, 0, ActiveTestConn::Type::CreateConnection); + EXPECT_CALL(*conn_pool_, onConnReleasedForTest()); + c1.releaseConn(); + conn_pool_->test_conns_[0].connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); + + testing::MockFunction drained_callback; + EXPECT_CALL(idle_callback, Call()); + conn_pool_->startDrain(); + EXPECT_CALL(*conn_pool_, onConnDestroyedForTest()); + dispatcher_.clearDeferredDeleteList(); +} + // Test that maybePreconnect is passed up to the base class implementation. TEST_P(TcpConnPoolImplTest, TestPreconnect) { initialize(); diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 0596f6348f6d4..a7ace7425ba85 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -55,6 +55,8 @@ using ::testing::ReturnNew; using ::testing::ReturnRef; using ::testing::SaveArg; +using namespace std::chrono_literals; + envoy::config::bootstrap::v3::Bootstrap parseBootstrapFromV3Yaml(const std::string& yaml, bool avoid_boosting = true) { envoy::config::bootstrap::v3::Bootstrap bootstrap; @@ -1593,12 +1595,14 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { EXPECT_EQ(1UL, cluster_manager_->clusters().active_clusters_.size()); Http::ConnectionPool::MockInstance* cp = new Http::ConnectionPool::MockInstance(); EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(cp)); + EXPECT_CALL(*cp, addIdleCallback(_)); EXPECT_EQ(cp, HttpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("fake_cluster") ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, nullptr))); Tcp::ConnectionPool::MockInstance* cp2 = new Tcp::ConnectionPool::MockInstance(); EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(cp2)); + EXPECT_CALL(*cp2, addIdleCallback(_)); EXPECT_EQ(cp2, TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("fake_cluster") ->tcpConnPool(ResourcePriority::Default, nullptr))); @@ -1614,11 +1618,9 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { // Now remove the cluster. This should drain the connection pools, but not affect // tcp connections. - Http::ConnectionPool::Instance::DrainedCb drained_cb; - Tcp::ConnectionPool::Instance::DrainedCb drained_cb2; EXPECT_CALL(*callbacks, onClusterRemoval(_)); - EXPECT_CALL(*cp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb)); - EXPECT_CALL(*cp2, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb2)); + EXPECT_CALL(*cp, startDrain()); + EXPECT_CALL(*cp2, startDrain()); EXPECT_TRUE(cluster_manager_->removeCluster("fake_cluster")); EXPECT_EQ(nullptr, cluster_manager_->getThreadLocalCluster("fake_cluster")); EXPECT_EQ(0UL, cluster_manager_->clusters().active_clusters_.size()); @@ -1631,9 +1633,6 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { // Remove an unknown cluster. EXPECT_FALSE(cluster_manager_->removeCluster("foo")); - drained_cb(); - drained_cb2(); - checkStats(1 /*added*/, 1 /*modified*/, 1 /*removed*/, 0 /*active*/, 0 /*warming*/); EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); @@ -1733,8 +1732,8 @@ TEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) { Outlier::MockDetector outlier_detector; ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector)); - Http::ConnectionPool::MockInstance* cp1 = new Http::ConnectionPool::MockInstance(); - Http::ConnectionPool::MockInstance* cp2 = new Http::ConnectionPool::MockInstance(); + Http::ConnectionPool::MockInstance* cp1 = new NiceMock(); + Http::ConnectionPool::MockInstance* cp2 = new NiceMock(); { InSequence s; @@ -1780,6 +1779,54 @@ TEST_F(ClusterManagerImplTest, CloseHttpConnectionsOnHealthFailure) { EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); } +// Test that we close all HTTP connection pool connections when there is a host health failure. +// Verify that the pool gets deleted if it is idle, and that a crash does not occur due to +// deleting a container while iterating through it (see `do_not_delete_` in +// `ClusterManagerImpl::ThreadLocalClusterManagerImpl::onHostHealthFailure()`). +TEST_F(ClusterManagerImplTest, CloseHttpConnectionsAndDeletePoolOnHealthFailure) { + const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", + clustersJson({defaultStaticClusterJson("some_cluster")})); + std::shared_ptr cluster1(new NiceMock()); + cluster1->info_->name_ = "some_cluster"; + HostSharedPtr test_host = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80", time_system_); + cluster1->prioritySet().getMockHostSet(0)->hosts_ = {test_host}; + ON_CALL(*cluster1, initializePhase()).WillByDefault(Return(Cluster::InitializePhase::Primary)); + + MockHealthChecker health_checker; + ON_CALL(*cluster1, healthChecker()).WillByDefault(Return(&health_checker)); + + Outlier::MockDetector outlier_detector; + ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector)); + + Http::ConnectionPool::MockInstance* cp1 = new NiceMock(); + + InSequence s; + + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)) + .WillOnce(Return(std::make_pair(cluster1, nullptr))); + EXPECT_CALL(health_checker, addHostCheckCompleteCb(_)); + EXPECT_CALL(outlier_detector, addChangedStateCb(_)); + EXPECT_CALL(*cluster1, initialize(_)) + .WillOnce(Invoke([cluster1](std::function initialize_callback) { + // Test inline init. + initialize_callback(); + })); + create(parseBootstrapFromV3Json(json)); + + EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(cp1)); + cluster_manager_->getThreadLocalCluster("some_cluster") + ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, nullptr); + + outlier_detector.runCallbacks(test_host); + health_checker.runCallbacks(test_host, HealthTransition::Unchanged); + + EXPECT_CALL(*cp1, drainConnections()).WillOnce(Invoke([&]() { cp1->idle_cb_(); })); + test_host->healthFlagSet(Host::HealthFlag::FAILED_OUTLIER_CHECK); + outlier_detector.runCallbacks(test_host); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); +} + // Test that we close all TCP connection pool connections when there is a host health failure. TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) { const std::string json = fmt::sprintf("{\"static_resources\":{%s}}", @@ -1796,8 +1843,8 @@ TEST_F(ClusterManagerImplTest, CloseTcpConnectionPoolsOnHealthFailure) { Outlier::MockDetector outlier_detector; ON_CALL(*cluster1, outlierDetector()).WillByDefault(Return(&outlier_detector)); - Tcp::ConnectionPool::MockInstance* cp1 = new Tcp::ConnectionPool::MockInstance(); - Tcp::ConnectionPool::MockInstance* cp2 = new Tcp::ConnectionPool::MockInstance(); + Tcp::ConnectionPool::MockInstance* cp1 = new NiceMock(); + Tcp::ConnectionPool::MockInstance* cp2 = new NiceMock(); { InSequence s; @@ -2027,7 +2074,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) .Times(4) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts. Http::ConnectionPool::MockInstance* cp1 = HttpPoolDataPeer::getPool( @@ -2047,14 +2094,9 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { EXPECT_NE(cp1_high, cp2_high); EXPECT_NE(cp1, cp1_high); - Http::ConnectionPool::Instance::DrainedCb drained_cb; - EXPECT_CALL(*cp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb)); - Http::ConnectionPool::Instance::DrainedCb drained_cb_high; - EXPECT_CALL(*cp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb_high)); - - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + EXPECT_CALL(factory_, allocateTcpConnPool_) .Times(4) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts. Tcp::ConnectionPool::MockInstance* tcp1 = @@ -2074,24 +2116,19 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemove) { EXPECT_NE(tcp1_high, tcp2_high); EXPECT_NE(tcp1, tcp1_high); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb; - EXPECT_CALL(*tcp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb)); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_high; - EXPECT_CALL(*tcp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_high)); - // Remove the first host, this should lead to the first cp being drained. dns_timer_->invokeCallback(); dns_callback(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"127.0.0.2"})); - drained_cb(); - drained_cb = nullptr; - tcp_drained_cb(); - tcp_drained_cb = nullptr; - EXPECT_CALL(factory_.tls_.dispatcher_, deferredDelete_(_)).Times(4); - drained_cb_high(); - drained_cb_high = nullptr; - tcp_drained_cb_high(); - tcp_drained_cb_high = nullptr; + cp1->idle_cb_(); + cp1->idle_cb_ = nullptr; + tcp1->idle_cb_(); + tcp1->idle_cb_ = nullptr; + EXPECT_CALL(factory_.tls_.dispatcher_, deferredDelete_(_)).Times(2); + cp1_high->idle_cb_(); + cp1_high->idle_cb_ = nullptr; + tcp1_high->idle_cb_(); + tcp1_high->idle_cb_ = nullptr; // Make sure we get back the same connection pool for the 2nd host as we did before the change. Http::ConnectionPool::MockInstance* cp3 = HttpPoolDataPeer::getPool( @@ -2192,7 +2229,7 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) .Times(4) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts. Http::ConnectionPool::MockInstance* cp1 = HttpPoolDataPeer::getPool( @@ -2212,14 +2249,9 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { EXPECT_NE(cp1_high, cp2_high); EXPECT_NE(cp1, cp1_high); - Http::ConnectionPool::Instance::DrainedCb drained_cb; - EXPECT_CALL(*cp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb)); - Http::ConnectionPool::Instance::DrainedCb drained_cb_high; - EXPECT_CALL(*cp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb_high)); - - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + EXPECT_CALL(factory_, allocateTcpConnPool_) .Times(10) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts, and for different SNIs Tcp::ConnectionPool::MockInstance* tcp1 = @@ -2281,33 +2313,22 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveWithTls) { EXPECT_CALL(factory_.tls_.dispatcher_, deferredDelete_(_)).Times(6); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb; - EXPECT_CALL(*tcp1, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb)); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_high; - EXPECT_CALL(*tcp1_high, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_high)); - - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_example_com; - EXPECT_CALL(*tcp1_example_com, addDrainedCallback(_)) - .WillOnce(SaveArg<0>(&tcp_drained_cb_example_com)); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb_ibm_com; - EXPECT_CALL(*tcp1_ibm_com, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb_ibm_com)); - // Remove the first host, this should lead to the first cp being drained. dns_timer_->invokeCallback(); dns_callback(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({"127.0.0.2"})); - drained_cb(); - drained_cb = nullptr; - tcp_drained_cb(); - tcp_drained_cb = nullptr; - drained_cb_high(); - drained_cb_high = nullptr; - tcp_drained_cb_high(); - tcp_drained_cb_high = nullptr; - tcp_drained_cb_example_com(); - tcp_drained_cb_example_com = nullptr; - tcp_drained_cb_ibm_com(); - tcp_drained_cb_ibm_com = nullptr; + cp1->idle_cb_(); + cp1->idle_cb_ = nullptr; + tcp1->idle_cb_(); + tcp1->idle_cb_ = nullptr; + cp1_high->idle_cb_(); + cp1_high->idle_cb_ = nullptr; + tcp1_high->idle_cb_(); + tcp1_high->idle_cb_ = nullptr; + tcp1_example_com->idle_cb_(); + tcp1_example_com->idle_cb_ = nullptr; + tcp1_ibm_com->idle_cb_(); + tcp1_ibm_com->idle_cb_ = nullptr; // Make sure we get back the same connection pool for the 2nd host as we did before the change. Http::ConnectionPool::MockInstance* cp3 = HttpPoolDataPeer::getPool( @@ -2768,10 +2789,10 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { TestUtility::makeDnsResponse({"127.0.0.2"})); EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) - .WillOnce(ReturnNew()); + .WillOnce(ReturnNew>()); - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) - .WillOnce(ReturnNew()); + EXPECT_CALL(factory_, allocateTcpConnPool_) + .WillOnce(ReturnNew>()); Http::ConnectionPool::MockInstance* cp = HttpPoolDataPeer::getPool( cluster_manager_->getThreadLocalCluster("cluster_1") @@ -2782,11 +2803,14 @@ TEST_F(ClusterManagerImplTest, DynamicHostRemoveDefaultPriority) { ->tcpConnPool(ResourcePriority::Default, nullptr)); // Immediate drain, since this can happen with the HTTP codecs. - EXPECT_CALL(*cp, addDrainedCallback(_)) - .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - - EXPECT_CALL(*tcp, addDrainedCallback(_)) - .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + EXPECT_CALL(*cp, startDrain()).WillOnce(Invoke([&]() { + cp->idle_cb_(); + cp->idle_cb_ = nullptr; + })); + EXPECT_CALL(*tcp, startDrain()).WillOnce(Invoke([&]() { + tcp->idle_cb_(); + tcp->idle_cb_ = nullptr; + })); // Remove the first host, this should lead to the cp being drained, without // crash. @@ -2855,24 +2879,25 @@ TEST_F(ClusterManagerImplTest, ConnPoolDestroyWithDraining) { TestUtility::makeDnsResponse({"127.0.0.2"})); MockConnPoolWithDestroy* mock_cp = new MockConnPoolWithDestroy(); + Http::ConnectionPool::Instance::IdleCb drained_cb; EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(mock_cp)); + EXPECT_CALL(*mock_cp, addIdleCallback(_)).WillOnce(SaveArg<0>(&drained_cb)); + EXPECT_CALL(*mock_cp, startDrain()); - MockTcpConnPoolWithDestroy* mock_tcp = new MockTcpConnPoolWithDestroy(); - EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(mock_tcp)); + MockTcpConnPoolWithDestroy* mock_tcp = new NiceMock(); + Tcp::ConnectionPool::Instance::IdleCb tcp_drained_cb; + EXPECT_CALL(factory_, allocateTcpConnPool_).WillOnce(Return(mock_tcp)); + EXPECT_CALL(*mock_tcp, addIdleCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb)); + EXPECT_CALL(*mock_tcp, startDrain()); - Http::ConnectionPool::MockInstance* cp = HttpPoolDataPeer::getPool( + HttpPoolDataPeer::getPool( cluster_manager_->getThreadLocalCluster("cluster_1") ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, nullptr)); - Tcp::ConnectionPool::MockInstance* tcp = - TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") - ->tcpConnPool(ResourcePriority::Default, nullptr)); + TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->tcpConnPool(ResourcePriority::Default, nullptr)); // Remove the first host, this should lead to the cp being drained. - Http::ConnectionPool::Instance::DrainedCb drained_cb; - EXPECT_CALL(*cp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&drained_cb)); - Tcp::ConnectionPool::Instance::DrainedCb tcp_drained_cb; - EXPECT_CALL(*tcp, addDrainedCallback(_)).WillOnce(SaveArg<0>(&tcp_drained_cb)); dns_timer_->invokeCallback(); dns_callback(Network::DnsResolver::ResolutionStatus::Success, TestUtility::makeDnsResponse({})); @@ -3310,6 +3335,7 @@ TEST_F(ClusterManagerImplTest, UpstreamSocketOptionsPassedToTcpConnPool) { EXPECT_CALL(context, upstreamSocketOptions()).WillOnce(Return(options_to_return)); EXPECT_CALL(factory_, allocateTcpConnPool_(_)).WillOnce(Return(to_create)); + EXPECT_CALL(*to_create, addIdleCallback(_)); auto opt_cp = cluster_manager_->getThreadLocalCluster("cluster_1") ->tcpConnPool(ResourcePriority::Default, &context); @@ -3320,7 +3346,8 @@ TEST_F(ClusterManagerImplTest, UpstreamSocketOptionsPassedToConnPool) { createWithLocalClusterUpdate(); NiceMock context; - Http::ConnectionPool::MockInstance* to_create = new Http::ConnectionPool::MockInstance(); + Http::ConnectionPool::MockInstance* to_create = + new NiceMock(); Network::Socket::OptionsSharedPtr options_to_return = Network::SocketOptionFactory::buildIpTransparentOptions(); @@ -3337,8 +3364,10 @@ TEST_F(ClusterManagerImplTest, UpstreamSocketOptionsUsedInConnPoolHash) { NiceMock context1; NiceMock context2; - Http::ConnectionPool::MockInstance* to_create1 = new Http::ConnectionPool::MockInstance(); - Http::ConnectionPool::MockInstance* to_create2 = new Http::ConnectionPool::MockInstance(); + Http::ConnectionPool::MockInstance* to_create1 = + new NiceMock(); + Http::ConnectionPool::MockInstance* to_create2 = + new NiceMock(); Network::Socket::OptionsSharedPtr options1 = Network::SocketOptionFactory::buildIpTransparentOptions(); Network::Socket::OptionsSharedPtr options2 = @@ -3378,7 +3407,8 @@ TEST_F(ClusterManagerImplTest, UpstreamSocketOptionsNullIsOkay) { createWithLocalClusterUpdate(); NiceMock context; - Http::ConnectionPool::MockInstance* to_create = new Http::ConnectionPool::MockInstance(); + Http::ConnectionPool::MockInstance* to_create = + new NiceMock(); Network::Socket::OptionsSharedPtr options_to_return = nullptr; EXPECT_CALL(context, upstreamSocketOptions()).WillOnce(Return(options_to_return)); @@ -3397,6 +3427,7 @@ TEST_F(ClusterManagerImplTest, HttpPoolDataForwardsCallsToConnectionPool) { Network::Socket::OptionsSharedPtr options_to_return = nullptr; EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(pool_mock)); + EXPECT_CALL(*pool_mock, addIdleCallback(_)); auto opt_cp = cluster_manager_->getThreadLocalCluster("cluster_1") ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, &context); @@ -3405,9 +3436,9 @@ TEST_F(ClusterManagerImplTest, HttpPoolDataForwardsCallsToConnectionPool) { EXPECT_CALL(*pool_mock, hasActiveConnections()).WillOnce(Return(true)); opt_cp.value().hasActiveConnections(); - ConnectionPool::Instance::DrainedCb drained_cb = []() {}; - EXPECT_CALL(*pool_mock, addDrainedCallback(_)); - opt_cp.value().addDrainedCallback(drained_cb); + ConnectionPool::Instance::IdleCb drained_cb = []() {}; + EXPECT_CALL(*pool_mock, addIdleCallback(_)); + opt_cp.value().addIdleCallback(drained_cb); } class TestUpstreamNetworkFilter : public Network::WriteFilter { @@ -4319,11 +4350,11 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) .Times(3) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + EXPECT_CALL(factory_, allocateTcpConnPool_) .Times(3) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts. Http::ConnectionPool::MockInstance* cp1 = HttpPoolDataPeer::getPool( @@ -4345,17 +4376,22 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { EXPECT_NE(cp1, cp2); EXPECT_NE(tcp1, tcp2); - EXPECT_CALL(*cp2, addDrainedCallback(_)) - .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - - EXPECT_CALL(*cp1, addDrainedCallback(_)) - .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - - EXPECT_CALL(*tcp1, addDrainedCallback(_)) - .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - - EXPECT_CALL(*tcp2, addDrainedCallback(_)) - .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + EXPECT_CALL(*cp2, startDrain()).WillOnce(Invoke([&]() { + cp2->idle_cb_(); + cp2->idle_cb_ = nullptr; + })); + EXPECT_CALL(*cp1, startDrain()).WillOnce(Invoke([&]() { + cp1->idle_cb_(); + cp1->idle_cb_ = nullptr; + })); + EXPECT_CALL(*tcp1, startDrain()).WillOnce(Invoke([&]() { + tcp1->idle_cb_(); + tcp1->idle_cb_ = nullptr; + })); + EXPECT_CALL(*tcp2, startDrain()).WillOnce(Invoke([&]() { + tcp2->idle_cb_(); + tcp2->idle_cb_ = nullptr; + })); HostVector hosts_removed; hosts_removed.push_back(host2); @@ -4378,11 +4414,14 @@ TEST_F(ClusterManagerImplTest, ConnPoolsDrainedOnHostSetChange) { HostVector hosts_added; hosts_added.push_back(host3); - EXPECT_CALL(*cp1, addDrainedCallback(_)) - .WillOnce(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); - - EXPECT_CALL(*tcp1, addDrainedCallback(_)) - .WillOnce(Invoke([](Tcp::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + EXPECT_CALL(*cp1, startDrain()).WillOnce(Invoke([&]() { + cp1->idle_cb_(); + cp1->idle_cb_ = nullptr; + })); + EXPECT_CALL(*tcp1, startDrain()).WillOnce(Invoke([&]() { + tcp1->idle_cb_(); + tcp1->idle_cb_ = nullptr; + })); // Adding host3 should drain connection pool for host1. cluster.prioritySet().updateHosts( @@ -4426,11 +4465,11 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) .Times(1) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + EXPECT_CALL(factory_, allocateTcpConnPool_) .Times(1) - .WillRepeatedly(ReturnNew()); + .WillRepeatedly(ReturnNew>()); // This should provide us a CP for each of the above hosts. Http::ConnectionPool::MockInstance* cp1 = HttpPoolDataPeer::getPool( @@ -4455,6 +4494,97 @@ TEST_F(ClusterManagerImplTest, ConnPoolsNotDrainedOnHostSetChange) { hosts_added, {}, 100); } +TEST_F(ClusterManagerImplTest, ConnPoolsIdleDeleted) { + TestScopedRuntime scoped_runtime; + Runtime::LoaderSingleton::getExisting()->mergeValues( + {{"envoy.reloadable_features.conn_pool_delete_when_idle", "true"}}); + + const std::string yaml = R"EOF( + static_resources: + clusters: + - name: cluster_1 + connect_timeout: 0.25s + lb_policy: ROUND_ROBIN + type: STATIC + )EOF"; + + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + create(parseBootstrapFromV3Yaml(yaml)); + + // Set up for an initialize callback. + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + std::unique_ptr callbacks(new NiceMock()); + ClusterUpdateCallbacksHandlePtr cb = + cluster_manager_->addThreadLocalClusterUpdateCallbacks(*callbacks); + + Cluster& cluster = cluster_manager_->activeClusters().begin()->second; + + // Set up the HostSet. + HostSharedPtr host1 = makeTestHost(cluster.info(), "tcp://127.0.0.1:80", time_system_); + + HostVector hosts{host1}; + auto hosts_ptr = std::make_shared(hosts); + + // Sending non-mergeable updates. + cluster.prioritySet().updateHosts( + 0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {}, + 100); + + { + auto* cp1 = new NiceMock(); + EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(cp1)); + std::function idle_callback; + EXPECT_CALL(*cp1, addIdleCallback(_)).WillOnce(SaveArg<0>(&idle_callback)); + + EXPECT_EQ(cp1, HttpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->httpConnPool(ResourcePriority::Default, + Http::Protocol::Http11, nullptr))); + // Request the same pool again and verify that it produces the same output + EXPECT_EQ(cp1, HttpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->httpConnPool(ResourcePriority::Default, + Http::Protocol::Http11, nullptr))); + + // Trigger the idle callback so we remove the connection pool + idle_callback(); + + auto* cp2 = new NiceMock(); + EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)).WillOnce(Return(cp2)); + EXPECT_CALL(*cp2, addIdleCallback(_)); + + // This time we expect cp2 since cp1 will have been destroyed + EXPECT_EQ(cp2, HttpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->httpConnPool(ResourcePriority::Default, + Http::Protocol::Http11, nullptr))); + } + + { + auto* tcp1 = new NiceMock(); + EXPECT_CALL(factory_, allocateTcpConnPool_).WillOnce(Return(tcp1)); + std::function idle_callback; + EXPECT_CALL(*tcp1, addIdleCallback(_)).WillOnce(SaveArg<0>(&idle_callback)); + EXPECT_EQ(tcp1, + TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->tcpConnPool(ResourcePriority::Default, nullptr))); + // Request the same pool again and verify that it produces the same output + EXPECT_EQ(tcp1, + TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->tcpConnPool(ResourcePriority::Default, nullptr))); + + // Trigger the idle callback so we remove the connection pool + idle_callback(); + + auto* tcp2 = new NiceMock(); + EXPECT_CALL(factory_, allocateTcpConnPool_).WillOnce(Return(tcp2)); + + // This time we expect tcp2 since tcp1 will have been destroyed + EXPECT_EQ(tcp2, + TcpPoolDataPeer::getPool(cluster_manager_->getThreadLocalCluster("cluster_1") + ->tcpConnPool(ResourcePriority::Default, nullptr))); + } +} + TEST_F(ClusterManagerImplTest, InvalidPriorityLocalClusterNameStatic) { std::string yaml = R"EOF( static_resources: @@ -4562,6 +4692,7 @@ TEST_F(ClusterManagerImplTest, ConnectionPoolPerDownstreamConnection) { std::vector conn_pool_vector; for (size_t i = 0; i < 3; ++i) { conn_pool_vector.push_back(new Http::ConnectionPool::MockInstance()); + EXPECT_CALL(*conn_pool_vector.back(), addIdleCallback(_)); EXPECT_CALL(factory_, allocateConnPool_(_, _, _, _, _)) .WillOnce(Return(conn_pool_vector.back())); EXPECT_CALL(downstream_connection, hashKey) @@ -4673,7 +4804,7 @@ TEST_F(PreconnectTest, PreconnectOn) { ->httpConnPool(ResourcePriority::Default, Http::Protocol::Http11, nullptr); http_handle.value().newStream(decoder_, http_callbacks_); - EXPECT_CALL(factory_, allocateTcpConnPool_(_)) + EXPECT_CALL(factory_, allocateTcpConnPool_) .Times(2) .WillRepeatedly(ReturnNew>()); auto tcp_handle = cluster_manager_->getThreadLocalCluster("cluster_1") diff --git a/test/common/upstream/conn_pool_map_impl_test.cc b/test/common/upstream/conn_pool_map_impl_test.cc index 6ce14d2becd6a..5b8cd99f2b78b 100644 --- a/test/common/upstream/conn_pool_map_impl_test.cc +++ b/test/common/upstream/conn_pool_map_impl_test.cc @@ -69,10 +69,10 @@ class ConnPoolMapImplTest : public testing::Test { }; } - TestMap::PoolFactory getFactoryExpectDrainedCb(Http::ConnectionPool::Instance::DrainedCb* cb) { + TestMap::PoolFactory getFactoryExpectIdleCb(Http::ConnectionPool::Instance::IdleCb* cb) { return [this, cb]() { auto pool = std::make_unique>(); - EXPECT_CALL(*pool, addDrainedCallback(_)).WillOnce(SaveArg<0>(cb)); + EXPECT_CALL(*pool, addIdleCallback(_)).WillOnce(SaveArg<0>(cb)); mock_pools_.push_back(pool.get()); return pool; }; @@ -153,13 +153,14 @@ TEST_F(ConnPoolMapImplTest, CallbacksPassedToPools) { test_map->getPool(1, getBasicFactory()); test_map->getPool(2, getBasicFactory()); - Http::ConnectionPool::Instance::DrainedCb cb1; - EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb1)); - Http::ConnectionPool::Instance::DrainedCb cb2; - EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cb2)); + Http::ConnectionPool::Instance::IdleCb cb1; + EXPECT_CALL(*mock_pools_[0], addIdleCallback(_)).WillOnce(SaveArg<0>(&cb1)); + Http::ConnectionPool::Instance::IdleCb cb2; + EXPECT_CALL(*mock_pools_[1], addIdleCallback(_)).WillOnce(SaveArg<0>(&cb2)); ReadyWatcher watcher; - test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + test_map->addIdleCallback([&watcher]() { watcher.ready(); }); + test_map->startDrain(); EXPECT_CALL(watcher, ready()).Times(2); cb1(); @@ -171,13 +172,14 @@ TEST_F(ConnPoolMapImplTest, CallbacksCachedAndPassedOnCreation) { TestMapPtr test_map = makeTestMap(); ReadyWatcher watcher; - test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + test_map->addIdleCallback([&watcher]() { watcher.ready(); }); + test_map->startDrain(); - Http::ConnectionPool::Instance::DrainedCb cb1; - test_map->getPool(1, getFactoryExpectDrainedCb(&cb1)); + Http::ConnectionPool::Instance::IdleCb cb1; + test_map->getPool(1, getFactoryExpectIdleCb(&cb1)); - Http::ConnectionPool::Instance::DrainedCb cb2; - test_map->getPool(2, getFactoryExpectDrainedCb(&cb2)); + Http::ConnectionPool::Instance::IdleCb cb2; + test_map->getPool(2, getFactoryExpectIdleCb(&cb2)); EXPECT_CALL(watcher, ready()).Times(2); cb1(); @@ -205,7 +207,7 @@ TEST_F(ConnPoolMapImplTest, DrainConnectionsForwarded) { TEST_F(ConnPoolMapImplTest, ClearDefersDelete) { TestMapPtr test_map = makeTestMap(); - Http::ConnectionPool::Instance::DrainedCb cb1; + Http::ConnectionPool::Instance::IdleCb cb1; test_map->getPool(1, getBasicFactory()); test_map->getPool(2, getBasicFactory()); test_map->clear(); @@ -390,6 +392,19 @@ TEST_F(ConnPoolMapImplTest, CircuitBreakerUsesProvidedPriorityHigh) { test_map->getPool(2, getBasicFactory()); } +TEST_F(ConnPoolMapImplTest, ErasePool) { + TestMapPtr test_map = makeTestMap(); + auto* pool_ptr = &test_map->getPool(1, getBasicFactory()).value().get(); + EXPECT_EQ(1, test_map->size()); + EXPECT_EQ(pool_ptr, &test_map->getPool(1, getNeverCalledFactory()).value().get()); + EXPECT_EQ(1, test_map->size()); + EXPECT_FALSE(test_map->erasePool(2)); + EXPECT_EQ(1, test_map->size()); + EXPECT_TRUE(test_map->erasePool(1)); + EXPECT_EQ(0, test_map->size()); + EXPECT_NE(pool_ptr, &test_map->getPool(1, getBasicFactory()).value().get()); +} + // The following tests only die in debug builds, so don't run them if this isn't one. #if !defined(NDEBUG) class ConnPoolMapImplDeathTest : public ConnPoolMapImplTest {}; @@ -398,10 +413,10 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryClearTripsAssert) { TestMapPtr test_map = makeTestMap(); test_map->getPool(1, getBasicFactory()); - ON_CALL(*mock_pools_[0], addDrainedCallback(_)) - .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + ON_CALL(*mock_pools_[0], addIdleCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::IdleCb cb) { cb(); })); - EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->clear(); }), + EXPECT_DEATH(test_map->addIdleCallback([&test_map]() { test_map->clear(); }), ".*Details: A resource should only be entered once"); } @@ -409,11 +424,11 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryGetPoolTripsAssert) { TestMapPtr test_map = makeTestMap(); test_map->getPool(1, getBasicFactory()); - ON_CALL(*mock_pools_[0], addDrainedCallback(_)) - .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + ON_CALL(*mock_pools_[0], addIdleCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::IdleCb cb) { cb(); })); EXPECT_DEATH( - test_map->addDrainedCallback([&test_map, this] { test_map->getPool(2, getBasicFactory()); }), + test_map->addIdleCallback([&test_map, this]() { test_map->getPool(2, getBasicFactory()); }), ".*Details: A resource should only be entered once"); } @@ -421,10 +436,10 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryDrainConnectionsTripsAssert) { TestMapPtr test_map = makeTestMap(); test_map->getPool(1, getBasicFactory()); - ON_CALL(*mock_pools_[0], addDrainedCallback(_)) - .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + ON_CALL(*mock_pools_[0], addIdleCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::IdleCb cb) { cb(); })); - EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->drainConnections(); }), + EXPECT_DEATH(test_map->addIdleCallback([&test_map]() { test_map->clear(); }), ".*Details: A resource should only be entered once"); } @@ -432,10 +447,10 @@ TEST_F(ConnPoolMapImplDeathTest, ReentryAddDrainedCallbackTripsAssert) { TestMapPtr test_map = makeTestMap(); test_map->getPool(1, getBasicFactory()); - ON_CALL(*mock_pools_[0], addDrainedCallback(_)) - .WillByDefault(Invoke([](Http::ConnectionPool::Instance::DrainedCb cb) { cb(); })); + ON_CALL(*mock_pools_[0], addIdleCallback(_)) + .WillByDefault(Invoke([](Http::ConnectionPool::Instance::IdleCb cb) { cb(); })); - EXPECT_DEATH(test_map->addDrainedCallback([&test_map] { test_map->addDrainedCallback([]() {}); }), + EXPECT_DEATH(test_map->addIdleCallback([&test_map]() { test_map->addIdleCallback([]() {}); }), ".*Details: A resource should only be entered once"); } #endif // !defined(NDEBUG) diff --git a/test/common/upstream/priority_conn_pool_map_impl_test.cc b/test/common/upstream/priority_conn_pool_map_impl_test.cc index 60252e332c58c..a7ade68348547 100644 --- a/test/common/upstream/priority_conn_pool_map_impl_test.cc +++ b/test/common/upstream/priority_conn_pool_map_impl_test.cc @@ -36,6 +36,13 @@ class PriorityConnPoolMapImplTest : public testing::Test { }; } + TestMap::PoolFactory getNeverCalledFactory() { + return []() { + EXPECT_TRUE(false); + return nullptr; + }; + } + protected: NiceMock dispatcher_; std::vector*> mock_pools_; @@ -104,6 +111,24 @@ TEST_F(PriorityConnPoolMapImplTest, TestClearEmptiesOut) { EXPECT_EQ(test_map->size(), 0); } +TEST_F(PriorityConnPoolMapImplTest, TestErase) { + TestMapPtr test_map = makeTestMap(); + + auto* pool_ptr = &test_map->getPool(ResourcePriority::High, 1, getBasicFactory()).value().get(); + EXPECT_EQ(1, test_map->size()); + EXPECT_EQ(pool_ptr, + &test_map->getPool(ResourcePriority::High, 1, getNeverCalledFactory()).value().get()); + EXPECT_FALSE(test_map->erasePool(ResourcePriority::Default, 1)); + EXPECT_NE(pool_ptr, + &test_map->getPool(ResourcePriority::Default, 1, getBasicFactory()).value().get()); + EXPECT_EQ(2, test_map->size()); + EXPECT_TRUE(test_map->erasePool(ResourcePriority::Default, 1)); + EXPECT_TRUE(test_map->erasePool(ResourcePriority::High, 1)); + EXPECT_EQ(0, test_map->size()); + EXPECT_NE(pool_ptr, + &test_map->getPool(ResourcePriority::High, 1, getBasicFactory()).value().get()); +} + // Show that the drained callback is invoked once for the high priority pool, and once for // the default priority pool. TEST_F(PriorityConnPoolMapImplTest, TestAddDrainedCbProxiedThrough) { @@ -112,13 +137,13 @@ TEST_F(PriorityConnPoolMapImplTest, TestAddDrainedCbProxiedThrough) { test_map->getPool(ResourcePriority::High, 0, getBasicFactory()); test_map->getPool(ResourcePriority::Default, 0, getBasicFactory()); - Http::ConnectionPool::Instance::DrainedCb cbHigh; - EXPECT_CALL(*mock_pools_[0], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbHigh)); - Http::ConnectionPool::Instance::DrainedCb cbDefault; - EXPECT_CALL(*mock_pools_[1], addDrainedCallback(_)).WillOnce(SaveArg<0>(&cbDefault)); + Http::ConnectionPool::Instance::IdleCb cbHigh; + EXPECT_CALL(*mock_pools_[0], addIdleCallback(_)).WillOnce(SaveArg<0>(&cbHigh)); + Http::ConnectionPool::Instance::IdleCb cbDefault; + EXPECT_CALL(*mock_pools_[1], addIdleCallback(_)).WillOnce(SaveArg<0>(&cbDefault)); ReadyWatcher watcher; - test_map->addDrainedCallback([&watcher] { watcher.ready(); }); + test_map->addIdleCallback([&watcher]() { watcher.ready(); }); EXPECT_CALL(watcher, ready()).Times(2); cbHigh(); diff --git a/test/integration/BUILD b/test/integration/BUILD index 544fed82743df..f1fdda3435d77 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -312,6 +312,16 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "http_conn_pool_integration_test", + srcs = ["http_conn_pool_integration_test.cc"], + deps = [ + ":http_protocol_integration_lib", + "//test/test_common:test_time_lib", + "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", + ], +) + envoy_cc_test( name = "http2_flood_integration_test", srcs = [ diff --git a/test/integration/http_conn_pool_integration_test.cc b/test/integration/http_conn_pool_integration_test.cc new file mode 100644 index 0000000000000..a1dd4e9fb4007 --- /dev/null +++ b/test/integration/http_conn_pool_integration_test.cc @@ -0,0 +1,91 @@ +#include "envoy/config/bootstrap/v3/bootstrap.pb.h" + +#include "test/integration/http_protocol_integration.h" + +namespace Envoy { +namespace { + +class HttpConnPoolIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initialize() override { + config_helper_.addRuntimeOverride("envoy.reloadable_features.conn_pool_delete_when_idle", + "true"); + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Set pool limit so that the test can use it's stats to validate that + // the pool is deleted. + envoy::config::cluster::v3::CircuitBreakers circuit_breakers; + auto* threshold = circuit_breakers.mutable_thresholds()->Add(); + threshold->mutable_max_connection_pools()->set_value(1); + bootstrap.mutable_static_resources() + ->mutable_clusters(0) + ->mutable_circuit_breakers() + ->MergeFrom(circuit_breakers); + }); + HttpProtocolIntegrationTest::initialize(); + } +}; + +INSTANTIATE_TEST_SUITE_P(Protocols, HttpConnPoolIntegrationTest, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParams()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +// Tests that conn pools are cleaned up after becoming idle due to a LocalClose +TEST_P(HttpConnPoolIntegrationTest, PoolCleanupAfterLocalClose) { + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Make Envoy close the upstream connection after a single request. + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_common_http_protocol_options() + ->mutable_max_requests_per_connection() + ->set_value(1); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(); + + // Validate that the circuit breaker config is setup as we expect. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 1); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + + ASSERT_TRUE(fake_upstream_connection_->waitForDisconnect()); + + // Validate that the pool is deleted when it becomes idle. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 0); +} + +// Tests that conn pools are cleaned up after becoming idle due to a RemoteClose +TEST_P(HttpConnPoolIntegrationTest, PoolCleanupAfterRemoteClose) { + initialize(); + + codec_client_ = makeHttpConnection(lookupPort("http")); + auto response = codec_client_->makeRequestWithBody(default_request_headers_, 1024); + waitForNextUpstreamRequest(); + + // Validate that the circuit breaker config is setup as we expect. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 1); + + upstream_request_->encodeHeaders(default_response_headers_, false); + upstream_request_->encodeData(512, true); + ASSERT_TRUE(response->waitForEndStream()); + + EXPECT_TRUE(upstream_request_->complete()); + EXPECT_TRUE(response->complete()); + + ASSERT_TRUE(fake_upstream_connection_->close()); + + // Validate that the pool is deleted when it becomes idle. + test_server_->waitForGaugeEq("cluster.cluster_0.circuit_breakers.default.cx_pool_open", 0); +} + +} // namespace +} // namespace Envoy diff --git a/test/integration/tcp_conn_pool_integration_test.cc b/test/integration/tcp_conn_pool_integration_test.cc index faea32638e77b..54245a2075929 100644 --- a/test/integration/tcp_conn_pool_integration_test.cc +++ b/test/integration/tcp_conn_pool_integration_test.cc @@ -109,11 +109,34 @@ class TestFilterConfigFactory : public Server::Configuration::NamedNetworkFilter } // namespace -class TcpConnPoolIntegrationTest : public testing::TestWithParam, +struct TcpConnPoolIntegrationTestParams { + Network::Address::IpVersion version; + bool test_original_version; +}; + +std::vector getProtocolTestParams() { + std::vector ret; + + for (auto ip_version : TestEnvironment::getIpVersionsForTest()) { + ret.push_back(TcpConnPoolIntegrationTestParams{ip_version, true}); + ret.push_back(TcpConnPoolIntegrationTestParams{ip_version, false}); + } + return ret; +} + +std::string protocolTestParamsToString( + const ::testing::TestParamInfo& params) { + return absl::StrCat( + (params.param.version == Network::Address::IpVersion::v4 ? "IPv4_" : "IPv6_"), + (params.param.test_original_version == true ? "OriginalConnPool" : "NewConnPool")); +} + +class TcpConnPoolIntegrationTest : public testing::TestWithParam, public BaseIntegrationTest { public: TcpConnPoolIntegrationTest() - : BaseIntegrationTest(GetParam(), tcp_conn_pool_config), filter_resolver_(config_factory_) {} + : BaseIntegrationTest(GetParam().version, tcp_conn_pool_config), + filter_resolver_(config_factory_) {} // Called once by the gtest framework before any tests are run. static void SetUpTestSuite() { // NOLINT(readability-identifier-naming) @@ -126,7 +149,15 @@ class TcpConnPoolIntegrationTest : public testing::TestWithParamclose(); } +TEST_P(TcpConnPoolIntegrationTest, PoolCleanupEnabled) { + // The test first does two requests concurrently, resulting in a single pool (it is never idle + // between the first two), followed by going idle, then another request, which should create a + // second pool, which is why the log message is expected 2 times. If the initial pool was not + // cleaned up, only 1 pool would be created. + EXPECT_LOG_CONTAINS_N_TIMES("debug", "Allocating TCP conn pool", 2, { + config_helper_.addRuntimeOverride("envoy.reloadable_features.conn_pool_delete_when_idle", + "true"); + initialize(); + + std::string request1("request1"); + std::string request2("request2"); + std::string request3("request3"); + std::string response1("response1"); + std::string response2("response2"); + std::string response3("response3"); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + + // Send request 1. + ASSERT_TRUE(tcp_client->write(request1)); + FakeRawConnectionPtr fake_upstream_connection1; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection1)); + std::string data; + ASSERT_TRUE(fake_upstream_connection1->waitForData(request1.size(), &data)); + EXPECT_EQ(request1, data); + + // Send request 2. + ASSERT_TRUE(tcp_client->write(request2)); + FakeRawConnectionPtr fake_upstream_connection2; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection2)); + ASSERT_TRUE(fake_upstream_connection2->waitForData(request2.size(), &data)); + EXPECT_EQ(request2, data); + + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 2); + + // Send response 2. + ASSERT_TRUE(fake_upstream_connection2->write(response2)); + ASSERT_TRUE(fake_upstream_connection2->close()); + tcp_client->waitForData(response2); + + // Send response 1. + ASSERT_TRUE(fake_upstream_connection1->write(response1)); + ASSERT_TRUE(fake_upstream_connection1->close()); + tcp_client->waitForData(response1, false); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 0); + + // After both requests were completed, the pool went idle and was cleaned up. Request 3 causes a + // new pool to be created. Seeing a new pool created is a proxy for directly observing that an + // old pool was cleaned up. + // + // TODO(ggreenway): if pool circuit breakers are implemented for tcp pools, verify cleanup by + // looking at stats such as `cluster.cluster_0.circuit_breakers.default.cx_pool_open`. + + // Send request 3. + ASSERT_TRUE(tcp_client->write(request3)); + FakeRawConnectionPtr fake_upstream_connection3; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection3)); + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 1); + ASSERT_TRUE(fake_upstream_connection3->waitForData(request3.size(), &data)); + EXPECT_EQ(request3, data); + + ASSERT_TRUE(fake_upstream_connection3->write(response3)); + ASSERT_TRUE(fake_upstream_connection3->close()); + tcp_client->waitForData(response3, false); + + test_server_->waitForGaugeEq("cluster.cluster_0.upstream_cx_active", 0); + + tcp_client->close(); + }); +} + +TEST_P(TcpConnPoolIntegrationTest, ShutdownWithOpenConnections) { + initialize(); + + IntegrationTcpClientPtr tcp_client = makeTcpConnection(lookupPort("listener_0")); + + // Establish downstream and upstream connections. + ASSERT_TRUE(tcp_client->write("hello")); + FakeRawConnectionPtr fake_upstream_connection; + ASSERT_TRUE(fake_upstreams_[0]->waitForRawConnection(fake_upstream_connection)); + ASSERT_TRUE(fake_upstream_connection->waitForData(5)); + + test_server_.reset(); + ASSERT_TRUE(fake_upstream_connection->waitForDisconnect()); + tcp_client->waitForDisconnect(); + + // Success criteria is that no ASSERTs fire and there are no leaks. +} + } // namespace Envoy diff --git a/test/mocks/http/conn_pool.cc b/test/mocks/http/conn_pool.cc index 035f566a6b130..7c1ffa928c0fc 100644 --- a/test/mocks/http/conn_pool.cc +++ b/test/mocks/http/conn_pool.cc @@ -1,5 +1,8 @@ #include "test/mocks/http/conn_pool.h" +using testing::_; +using testing::SaveArg; + namespace Envoy { namespace Http { namespace ConnectionPool { @@ -7,6 +10,7 @@ namespace ConnectionPool { MockInstance::MockInstance() : host_{std::make_shared>()} { ON_CALL(*this, host()).WillByDefault(Return(host_)); + ON_CALL(*this, addIdleCallback(_)).WillByDefault(SaveArg<0>(&idle_cb_)); } MockInstance::~MockInstance() = default; diff --git a/test/mocks/http/conn_pool.h b/test/mocks/http/conn_pool.h index 2f3d5b9352de2..c1f55c2f92ef8 100644 --- a/test/mocks/http/conn_pool.h +++ b/test/mocks/http/conn_pool.h @@ -29,7 +29,9 @@ class MockInstance : public Instance { // Http::ConnectionPool::Instance MOCK_METHOD(Http::Protocol, protocol, (), (const)); - MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb)); + MOCK_METHOD(void, addIdleCallback, (IdleCb cb)); + MOCK_METHOD(bool, isIdle, (), (const)); + MOCK_METHOD(void, startDrain, ()); MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(bool, hasActiveConnections, (), (const)); MOCK_METHOD(Cancellable*, newStream, (ResponseDecoder & response_decoder, Callbacks& callbacks)); @@ -38,6 +40,7 @@ class MockInstance : public Instance { MOCK_METHOD(absl::string_view, protocolDescription, (), (const)); std::shared_ptr> host_; + IdleCb idle_cb_; }; } // namespace ConnectionPool diff --git a/test/mocks/tcp/mocks.cc b/test/mocks/tcp/mocks.cc index d6828f046a147..9b1a4cff79905 100644 --- a/test/mocks/tcp/mocks.cc +++ b/test/mocks/tcp/mocks.cc @@ -7,6 +7,7 @@ using testing::ReturnRef; using testing::_; using testing::Invoke; using testing::ReturnRef; +using testing::SaveArg; namespace Envoy { namespace Tcp { @@ -27,6 +28,7 @@ MockInstance::MockInstance() { return newConnectionImpl(cb); })); ON_CALL(*this, host()).WillByDefault(Return(host_)); + ON_CALL(*this, addIdleCallback(_)).WillByDefault(SaveArg<0>(&idle_cb_)); } MockInstance::~MockInstance() = default; diff --git a/test/mocks/tcp/mocks.h b/test/mocks/tcp/mocks.h index 6b486918cb99c..75e79e7aea932 100644 --- a/test/mocks/tcp/mocks.h +++ b/test/mocks/tcp/mocks.h @@ -59,7 +59,9 @@ class MockInstance : public Instance { ~MockInstance() override; // Tcp::ConnectionPool::Instance - MOCK_METHOD(void, addDrainedCallback, (DrainedCb cb)); + MOCK_METHOD(void, addIdleCallback, (IdleCb cb)); + MOCK_METHOD(bool, isIdle, (), (const)); + MOCK_METHOD(void, startDrain, ()); MOCK_METHOD(void, drainConnections, ()); MOCK_METHOD(void, closeConnections, ()); MOCK_METHOD(Cancellable*, newConnection, (Tcp::ConnectionPool::Callbacks & callbacks)); @@ -75,6 +77,7 @@ class MockInstance : public Instance { std::list> handles_; std::list callbacks_; + IdleCb idle_cb_; std::shared_ptr> host_{ new NiceMock()}; diff --git a/test/test_common/logging.h b/test/test_common/logging.h index 124f553fe7666..2d6252b2c68ec 100644 --- a/test/test_common/logging.h +++ b/test/test_common/logging.h @@ -57,10 +57,14 @@ class LogRecordingSink : public Logger::SinkDelegate { void log(absl::string_view msg) override; void flush() override; - const std::vector& messages() const { return messages_; } + const std::vector messages() const { + absl::MutexLock ml(&mtx_); + std::vector copy(messages_); + return copy; + } private: - absl::Mutex mtx_; + mutable absl::Mutex mtx_; std::vector messages_ ABSL_GUARDED_BY(mtx_); }; @@ -98,24 +102,24 @@ using ExpectedLogMessages = std::vector; sink_ptr->setShouldEscape(escaped); \ Envoy::LogRecordingSink log_recorder(sink_ptr); \ stmt; \ - if (log_recorder.messages().empty()) { \ + auto messages = log_recorder.messages(); \ + if (messages.empty()) { \ FAIL() << "Expected message(s), but NONE was recorded."; \ } \ Envoy::ExpectedLogMessages failed_expectations; \ for (const Envoy::StringPair& expected : expected_messages) { \ const auto log_message = \ - std::find_if(log_recorder.messages().begin(), log_recorder.messages().end(), \ - [&expected](const std::string& message) { \ - return (message.find(expected.second) != std::string::npos) && \ - (message.find(expected.first) != std::string::npos); \ - }); \ - if (log_message == log_recorder.messages().end()) { \ + std::find_if(messages.begin(), messages.end(), [&expected](const std::string& message) { \ + return (message.find(expected.second) != std::string::npos) && \ + (message.find(expected.first) != std::string::npos); \ + }); \ + if (log_message == messages.end()) { \ failed_expectations.push_back(expected); \ } \ } \ if (!failed_expectations.empty()) { \ std::string failed_message; \ - absl::StrAppend(&failed_message, "\nLogs:\n ", absl::StrJoin(log_recorder.messages(), " "), \ + absl::StrAppend(&failed_message, "\nLogs:\n ", absl::StrJoin(messages, " "), \ "\n Do NOT contain:\n"); \ for (const auto& expectation : failed_expectations) { \ absl::StrAppend(&failed_message, " '", expectation.first, "', '", expectation.second, \ @@ -138,11 +142,12 @@ using ExpectedLogMessages = std::vector; Envoy::LogLevelSetter save_levels(spdlog::level::trace); \ Envoy::LogRecordingSink log_recorder(Envoy::Logger::Registry::getSink()); \ stmt; \ - for (const std::string& message : log_recorder.messages()) { \ + auto messages = log_recorder.messages(); \ + for (const std::string& message : messages) { \ if ((message.find(substr) != std::string::npos) && \ (message.find(loglevel) != std::string::npos)) { \ - FAIL() << "\nLogs:\n " << absl::StrJoin(log_recorder.messages(), " ") \ - << "\n Should NOT contain:\n '" << loglevel << "', '" << substr "'\n"; \ + FAIL() << "\nLogs:\n " << absl::StrJoin(messages, " ") << "\n Should NOT contain:\n '" \ + << loglevel << "', '" << substr "'\n"; \ } \ } \ } while (false) @@ -161,6 +166,24 @@ using ExpectedLogMessages = std::vector; EXPECT_LOG_CONTAINS_ALL_OF(message, stmt); \ } while (false) +// Validates that when stmt is executed, the supplied substring occurs exactly the specified +// number of times. +#define EXPECT_LOG_CONTAINS_N_TIMES(loglevel, substr, expected_occurrences, stmt) \ + do { \ + Envoy::LogLevelSetter save_levels(spdlog::level::trace); \ + Envoy::LogRecordingSink log_recorder(Envoy::Logger::Registry::getSink()); \ + stmt; \ + auto messages = log_recorder.messages(); \ + uint64_t actual_occurrences = 0; \ + for (const std::string& message : messages) { \ + if ((message.find(substr) != std::string::npos) && \ + (message.find(loglevel) != std::string::npos)) { \ + actual_occurrences++; \ + } \ + } \ + EXPECT_EQ(expected_occurrences, actual_occurrences); \ + } while (false) + // Validates that when stmt is executed, no logs will be emitted. // Expected equality of these values: // 0 @@ -174,7 +197,7 @@ using ExpectedLogMessages = std::vector; Envoy::LogLevelSetter save_levels(spdlog::level::trace); \ Envoy::LogRecordingSink log_recorder(Envoy::Logger::Registry::getSink()); \ stmt; \ - const std::vector& logs = log_recorder.messages(); \ + const std::vector logs = log_recorder.messages(); \ ASSERT_EQ(0, logs.size()) << " Logs:\n " << absl::StrJoin(logs, " "); \ } while (false) From 2f49895285bdcac60d3dc812233aa9115d726c2d Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Mon, 26 Jul 2021 12:09:33 -0700 Subject: [PATCH 48/57] Remove unused powershell call from Windows docker file (#17472) Signed-off-by: Sotiris Nanopoulos --- ci/Dockerfile-envoy-windows | 2 -- 1 file changed, 2 deletions(-) diff --git a/ci/Dockerfile-envoy-windows b/ci/Dockerfile-envoy-windows index 159bbe7ec7466..6f9514569c1c4 100644 --- a/ci/Dockerfile-envoy-windows +++ b/ci/Dockerfile-envoy-windows @@ -13,8 +13,6 @@ ADD ["windows/amd64/envoy.exe", "C:/Program Files/envoy/"] RUN mkdir "C:\\ProgramData\\envoy" ADD ["configs/envoyproxy_io_proxy.yaml", "C:/ProgramData/envoy/envoy.yaml"] -# Replace temp path with Windows temp path -RUN powershell -Command "(cat C:\ProgramData\envoy\envoy.yaml -raw) -replace '/tmp/','C:\Windows\Temp\' | Set-Content -Encoding Ascii C:\ProgramData\envoy\envoy.yaml" EXPOSE 10000 From a4cec80da6194602df6b2247e7f90bba97cd35f2 Mon Sep 17 00:00:00 2001 From: Keith Smiley Date: Mon, 26 Jul 2021 12:13:29 -0700 Subject: [PATCH 49/57] bazel: remove old luajit workaround (#17466) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit According to https://luajit.org/install.html > Important: this relates to LuaJIT 2.0 only — use LuaJIT 2.1 to avoid these complications. Since we have updated past 2.1 we shouldn't need these anymore which is great since it breaks on Apple Silicon https://github.com/envoyproxy/envoy/issues/16482#issuecomment-846439439 Signed-off-by: Keith Smiley --- bazel/envoy_binary.bzl | 7 +------ bazel/envoy_test.bzl | 6 +----- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/bazel/envoy_binary.bzl b/bazel/envoy_binary.bzl index 43c52bf7e15bf..6ea24b9888cfb 100644 --- a/bazel/envoy_binary.bzl +++ b/bazel/envoy_binary.bzl @@ -54,12 +54,7 @@ def _envoy_select_exported_symbols(xs): # Compute the final linkopts based on various options. def _envoy_linkopts(): return select({ - # The macOS system library transitively links common libraries (e.g., pthread). - "@envoy//bazel:apple": [ - # See note here: https://luajit.org/install.html - "-pagezero_size 10000", - "-image_base 100000000", - ], + "@envoy//bazel:apple": [], "@envoy//bazel:windows_opt_build": [ "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", diff --git a/bazel/envoy_test.bzl b/bazel/envoy_test.bzl index f74eda3e8393d..7f45c554c54db 100644 --- a/bazel/envoy_test.bzl +++ b/bazel/envoy_test.bzl @@ -60,11 +60,7 @@ def _envoy_cc_test_infrastructure_library( # Compute the test linkopts based on various options. def _envoy_test_linkopts(): return select({ - "@envoy//bazel:apple": [ - # See note here: https://luajit.org/install.html - "-pagezero_size 10000", - "-image_base 100000000", - ], + "@envoy//bazel:apple": [], "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", From 0b395d151da3d5e35c0c10c9fe0845be1275ab9c Mon Sep 17 00:00:00 2001 From: Greg Brail Date: Mon, 26 Jul 2021 12:44:01 -0700 Subject: [PATCH 50/57] ext_proc: Fix problem with buffered body mode with empty or no body (#17430) Signed-off-by: Gregory Brail --- .../filters/http/ext_proc/ext_proc.cc | 1 + .../filters/http/ext_proc/processor_state.cc | 4 +- .../filters/http/ext_proc/processor_state.h | 5 +- .../ext_proc/ext_proc_integration_test.cc | 165 ++++++++++++++++++ 4 files changed, 173 insertions(+), 2 deletions(-) diff --git a/source/extensions/filters/http/ext_proc/ext_proc.cc b/source/extensions/filters/http/ext_proc/ext_proc.cc index 05e7efd5eac8e..711e377161cc8 100644 --- a/source/extensions/filters/http/ext_proc/ext_proc.cc +++ b/source/extensions/filters/http/ext_proc/ext_proc.cc @@ -73,6 +73,7 @@ FilterHeadersStatus Filter::onHeaders(ProcessorState& state, } state.setHeaders(&headers); + state.setHasNoBody(end_stream); ProcessingRequest req; auto* headers_req = state.mutableHeaders(req); MutationUtils::headersToProto(headers, *headers_req->mutable_headers()); diff --git a/source/extensions/filters/http/ext_proc/processor_state.cc b/source/extensions/filters/http/ext_proc/processor_state.cc index 9a20f72d817fa..303798657dd35 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.cc +++ b/source/extensions/filters/http/ext_proc/processor_state.cc @@ -62,7 +62,9 @@ bool ProcessorState::handleHeadersResponse(const HeadersResponse& response) { clearWatermark(); } else { - if (body_mode_ == ProcessingMode::BUFFERED) { + if (no_body_) { + // Fall through if there was never a body in the first place. + } else if (body_mode_ == ProcessingMode::BUFFERED) { if (complete_body_available_) { // If we get here, then all the body data came in before the header message // was complete, and the server wants the body. So, don't continue filter diff --git a/source/extensions/filters/http/ext_proc/processor_state.h b/source/extensions/filters/http/ext_proc/processor_state.h index d20e2bba5b59d..9ca9cbee66a7f 100644 --- a/source/extensions/filters/http/ext_proc/processor_state.h +++ b/source/extensions/filters/http/ext_proc/processor_state.h @@ -54,7 +54,7 @@ class ProcessorState : public Logger::Loggable { }; explicit ProcessorState(Filter& filter) - : filter_(filter), watermark_requested_(false), paused_(false), + : filter_(filter), watermark_requested_(false), paused_(false), no_body_(false), complete_body_available_(false), trailers_available_(false), body_replaced_(false), bytes_enqueued_(0) {} ProcessorState(const ProcessorState&) = delete; @@ -67,6 +67,7 @@ class ProcessorState : public Logger::Loggable { bool completeBodyAvailable() const { return complete_body_available_; } void setCompleteBodyAvailable(bool d) { complete_body_available_ = d; } + void setHasNoBody(bool b) { no_body_ = b; } void setTrailersAvailable(bool d) { trailers_available_ = d; } bool bodyReplaced() const { return body_replaced_; } @@ -132,6 +133,8 @@ class ProcessorState : public Logger::Loggable { // a "continue." bool paused_ : 1; + // If true, then there is not going to be a body + bool no_body_ : 1; // If true, then the filter received the complete body bool complete_body_available_ : 1; // If true, then the filter received the trailers diff --git a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc index a730287d6519e..4d22ca5dba855 100644 --- a/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc +++ b/test/extensions/filters/http/ext_proc/ext_proc_integration_test.cc @@ -1002,4 +1002,169 @@ TEST_P(ExtProcIntegrationTest, ResponseMessageTimeoutIgnoreError) { verifyDownstreamResponse(*response, 200); } +// Test how the filter responds when asked to buffer a request body for a POST +// request with an empty body. We should get an empty body message because +// the Envoy filter stream received the body after all the headers. +TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithEmptyBody) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::BUFFERED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequestWithBody("", absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_FALSE(headers.end_of_stream()); + return true; + }); + // We should get an empty body message this time + processRequestBodyMessage(false, [](const HttpBody& body, BodyResponse&) { + EXPECT_TRUE(body.end_of_stream()); + EXPECT_EQ(body.body().size(), 0); + return true; + }); + + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to buffer a response body for a POST +// request with an empty body. We should get an empty body message because +// the Envoy filter stream received the body after all the headers. +TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetWithEmptyResponseBody) { + proto_config_.mutable_processing_mode()->set_response_body_mode(ProcessingMode::BUFFERED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, false); + upstream_request_->encodeData(0, true); + processResponseHeadersMessage(false, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_FALSE(headers.end_of_stream()); + return true; + }); + // We should get an empty body message this time + processResponseBodyMessage(false, [](const HttpBody& body, BodyResponse&) { + EXPECT_TRUE(body.end_of_stream()); + EXPECT_EQ(body.body().size(), 0); + return true; + }); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to buffer a response body for a POST +// request with no body. We should not get an empty body message because +// the Envoy filter stream received headers with no body. +TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetWithNoResponseBody) { + proto_config_.mutable_processing_mode()->set_response_body_mode(ProcessingMode::BUFFERED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + ASSERT_TRUE(fake_upstreams_[0]->waitForHttpConnection(*dispatcher_, fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + processResponseHeadersMessage(false, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to stream a request body for a POST +// request with an empty body. We should get an empty body message because +// the Envoy filter stream received the body after all the headers. +TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithEmptyBodyStreamed) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequestWithBody("", absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_FALSE(headers.end_of_stream()); + return true; + }); + // We should get an empty body message this time + processRequestBodyMessage(false, [](const HttpBody& body, BodyResponse&) { + EXPECT_TRUE(body.end_of_stream()); + EXPECT_EQ(body.body().size(), 0); + return true; + }); + + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to buffer a request body for a GET +// request with no body. We should receive no body message because the Envoy +// filter stream received the headers and end simultaneously. +TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetRequestNoBody) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::BUFFERED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + // We should not see a request body message here + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to stream a request body for a GET +// request with no body. We should receive no body message because the Envoy +// filter stream received the headers and end simultaneously. +TEST_P(ExtProcIntegrationTest, BufferBodyOverrideGetRequestNoBodyStreaming) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::STREAMED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequest(absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_TRUE(headers.end_of_stream()); + return true; + }); + // We should not see a request body message here + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + +// Test how the filter responds when asked to buffer a request body for a POST +// request with a body. +TEST_P(ExtProcIntegrationTest, BufferBodyOverridePostWithRequestBody) { + proto_config_.mutable_processing_mode()->set_request_body_mode(ProcessingMode::BUFFERED); + initializeConfig(); + HttpIntegrationTest::initialize(); + auto response = sendDownstreamRequestWithBody("Testing", absl::nullopt); + + processRequestHeadersMessage(true, [](const HttpHeaders& headers, HeadersResponse&) { + EXPECT_FALSE(headers.end_of_stream()); + return true; + }); + processRequestBodyMessage(false, [](const HttpBody& body, BodyResponse&) { + EXPECT_TRUE(body.end_of_stream()); + EXPECT_EQ(body.body(), "Testing"); + return true; + }); + handleUpstreamRequest(); + processResponseHeadersMessage(false, absl::nullopt); + verifyDownstreamResponse(*response, 200); +} + } // namespace Envoy From a58fe011a399a545a1e247c114900b97db9cf1f2 Mon Sep 17 00:00:00 2001 From: Sotiris Nanopoulos Date: Mon, 26 Jul 2021 22:18:27 -0700 Subject: [PATCH 51/57] Increase buffer size of `Win32RedirectRecords` (#17471) While testing the feature I noticed that we can not retrieve the redirect records for the following curl request: `curl.exe -s -o NUL -D - -I -w StatusCode:%{http_code} -L http://edition.cnn.com` Increase the buffer size to 2kb to resolve the issue. Signed-off-by: Sotiris Nanopoulos --- envoy/network/io_handle.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envoy/network/io_handle.h b/envoy/network/io_handle.h index 990c5cb5cd209..644228762fc51 100644 --- a/envoy/network/io_handle.h +++ b/envoy/network/io_handle.h @@ -29,7 +29,7 @@ namespace Network { struct Win32RedirectRecords { // The size of the buffer is selected based on: // https://docs.microsoft.com/en-us/windows-hardware/drivers/network/sio-query-wfp-connection-redirect-records - uint8_t buf_[1024]; + uint8_t buf_[2048]; unsigned long buf_size_; }; From aa7b49773aa58536b82010e2bfdb612c202e9577 Mon Sep 17 00:00:00 2001 From: alyssawilk Date: Tue, 27 Jul 2021 09:09:16 -0400 Subject: [PATCH 52/57] improving tracer coverage (#17493) Adding unit tests and bumping tracer coverage Risk Level: n/a (test only) Testing: unit tests Docs Changes: n/a Release Notes: n/a Signed-off-by: Alyssa Wilk --- .../tracers/opencensus/opencensus_tracer_impl.cc | 5 ----- source/extensions/tracers/zipkin/zipkin_core_types.h | 10 ---------- test/extensions/tracers/xray/tracer_test.cc | 1 + test/per_file_coverage.sh | 1 - 4 files changed, 1 insertion(+), 16 deletions(-) diff --git a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc index 108a04b7dcef2..5a8e85dd3f43f 100644 --- a/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc +++ b/source/extensions/tracers/opencensus/opencensus_tracer_impl.cc @@ -103,7 +103,6 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex } break; } - case OpenCensusConfig::GRPC_TRACE_BIN: { const auto entry = trace_context.getTraceContext(Constants::get().GRPC_TRACE_BIN); if (entry.has_value()) { @@ -114,7 +113,6 @@ startSpanHelper(const std::string& name, bool traced, const Tracing::TraceContex } break; } - case OpenCensusConfig::CLOUD_TRACE_CONTEXT: { const auto entry = trace_context.getTraceContext(Constants::get().X_CLOUD_TRACE_CONTEXT); if (entry.has_value()) { @@ -212,20 +210,17 @@ void Span::injectContext(Tracing::TraceContext& trace_context) { trace_context.setTraceContextReferenceKey( Constants::get().TRACEPARENT, ::opencensus::trace::propagation::ToTraceParentHeader(ctx)); break; - case OpenCensusConfig::GRPC_TRACE_BIN: { std::string val = ::opencensus::trace::propagation::ToGrpcTraceBinHeader(ctx); val = Base64::encode(val.data(), val.size(), /*add_padding=*/false); trace_context.setTraceContextReferenceKey(Constants::get().GRPC_TRACE_BIN, val); break; } - case OpenCensusConfig::CLOUD_TRACE_CONTEXT: trace_context.setTraceContextReferenceKey( Constants::get().X_CLOUD_TRACE_CONTEXT, ::opencensus::trace::propagation::ToCloudTraceContextHeader(ctx)); break; - case OpenCensusConfig::B3: trace_context.setTraceContextReferenceKey( Constants::get().X_B3_TRACEID, ::opencensus::trace::propagation::ToB3TraceIdHeader(ctx)); diff --git a/source/extensions/tracers/zipkin/zipkin_core_types.h b/source/extensions/tracers/zipkin/zipkin_core_types.h index ae70ddff7adc7..949311fb82edf 100644 --- a/source/extensions/tracers/zipkin/zipkin_core_types.h +++ b/source/extensions/tracers/zipkin/zipkin_core_types.h @@ -251,11 +251,6 @@ class BinaryAnnotation : public ZipkinBase { */ AnnotationType annotationType() const { return annotation_type_; } - /** - * Sets the binary's annotation type. - */ - void setAnnotationType(AnnotationType annotation_type) { annotation_type_ = annotation_type; } - /** * @return the annotation's endpoint attribute. */ @@ -266,11 +261,6 @@ class BinaryAnnotation : public ZipkinBase { */ void setEndpoint(const Endpoint& endpoint) { endpoint_ = endpoint; } - /** - * Sets the annotation's endpoint attribute (move semantics). - */ - void setEndpoint(const Endpoint&& endpoint) { endpoint_ = endpoint; } - /** * @return true if the endpoint attribute is set, or false otherwise. */ diff --git a/test/extensions/tracers/xray/tracer_test.cc b/test/extensions/tracers/xray/tracer_test.cc index cbac645b0d496..8f81c9138b478 100644 --- a/test/extensions/tracers/xray/tracer_test.cc +++ b/test/extensions/tracers/xray/tracer_test.cc @@ -276,6 +276,7 @@ TEST_F(XRayTracerTest, SerializeSpanTestWithStatusCodeNotANumber) { span->setTag(Tracing::Tags::get().UserAgent, expected_->user_agent); span->setTag(Tracing::Tags::get().HttpStatusCode, expected_status_code); span->setTag(Tracing::Tags::get().ResponseSize, expected_content_length); + span->setTag("", ""); span->finishSpan(); } diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index 967c1f81a94ef..dc8fdd47c1ce1 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -48,7 +48,6 @@ declare -a KNOWN_LOW_COVERAGE=( "source/extensions/stat_sinks/common/statsd:96.5" "source/extensions/stat_sinks/graphite_statsd:85.7" "source/extensions/stat_sinks/statsd:85.2" -"source/extensions/tracers:96.5" "source/extensions/tracers/opencensus:92.5" "source/extensions/tracers/xray:94.0" "source/extensions/transport_sockets:95.7" From 57bfbda2a50d2f026cf64c82383fe3bebe1e0860 Mon Sep 17 00:00:00 2001 From: Snow Pettersen Date: Tue, 27 Jul 2021 10:18:14 -0400 Subject: [PATCH 53/57] deps: update yaml-cpp to latest master (#17489) Signed-off-by: Snow Pettersen --- bazel/repository_locations.bzl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index 2b2b53eba6688..a4b339cc7951a 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -440,14 +440,14 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "yaml-cpp", project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec", project_url = "https://github.com/jbeder/yaml-cpp", - version = "98acc5a8874faab28b82c28936f4b400b389f5d6", - sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f", + version = "db6deedcd301754723065e0bbb1b75927c5b49c7", + sha256 = "387d7f25467312ca59068081f9a25bbab02bb6af32fd3e0aec1bd59163558171", strip_prefix = "yaml-cpp-{version}", urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"], # YAML is also used for runtime as well as controlplane. It shouldn't appear on the # dataplane but we can't verify this automatically due to code structure today. use_category = ["controlplane", "dataplane_core"], - release_date = "2020-07-27", + release_date = "2021-07-23", cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*", ), com_github_msgpack_msgpack_c = dict( From 6ace04d0fb8f0b258f4fba2bc62f8014de795bc8 Mon Sep 17 00:00:00 2001 From: code Date: Tue, 27 Jul 2021 23:34:28 +0800 Subject: [PATCH 54/57] http: make custom inline headers bootstrap configurable (#17330) Signed-off-by: wbpcode --- api/envoy/config/bootstrap/v3/bootstrap.proto | 49 +++++++++- .../config/bootstrap/v4alpha/bootstrap.proto | 52 ++++++++++- docs/root/version_history/current.rst | 1 + .../envoy/config/bootstrap/v3/bootstrap.proto | 49 +++++++++- .../config/bootstrap/v4alpha/bootstrap.proto | 52 ++++++++++- source/server/BUILD | 1 + source/server/server.cc | 47 +++++++++- test/server/server_test.cc | 92 +++++++++++++++++++ .../server/bootstrap_inline_headers.yaml | 9 ++ .../bootstrap_inline_headers_error.yaml | 3 + 10 files changed, 349 insertions(+), 6 deletions(-) create mode 100644 test/server/test_data/server/bootstrap_inline_headers.yaml create mode 100644 test/server/test_data/server/bootstrap_inline_headers_error.yaml diff --git a/api/envoy/config/bootstrap/v3/bootstrap.proto b/api/envoy/config/bootstrap/v3/bootstrap.proto index b29e8e9c24e13..0e8de36633354 100644 --- a/api/envoy/config/bootstrap/v3/bootstrap.proto +++ b/api/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 32] +// [#next-free-field: 33] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -322,6 +322,13 @@ message Bootstrap { // field. // [#not-implemented-hide:] map certificate_provider_instances = 25; + + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + repeated CustomInlineHeader inline_headers = 32; } // Administration interface :ref:`operations documentation @@ -595,3 +602,43 @@ message LayeredRuntime { // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +message CustomInlineHeader { + enum InlineHeaderType { + REQUEST_HEADER = 0; + REQUEST_TRAILER = 1; + RESPONSE_HEADER = 2; + RESPONSE_TRAILER = 3; + } + + // The name of the header that is expected to be set as the inline header. + string inline_header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The type of the header that is expected to be set as the inline header. + InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto index 1bcc2ff78e9e1..5c45b8f7dbce9 100644 --- a/api/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/api/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -37,7 +37,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 32] +// [#next-free-field: 33] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -289,6 +289,13 @@ message Bootstrap { // field. // [#not-implemented-hide:] map certificate_provider_instances = 25; + + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + repeated CustomInlineHeader inline_headers = 32; } // Administration interface :ref:`operations documentation @@ -568,3 +575,46 @@ message LayeredRuntime { // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +message CustomInlineHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.CustomInlineHeader"; + + enum InlineHeaderType { + REQUEST_HEADER = 0; + REQUEST_TRAILER = 1; + RESPONSE_HEADER = 2; + RESPONSE_TRAILER = 3; + } + + // The name of the header that is expected to be set as the inline header. + string inline_header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The type of the header that is expected to be set as the inline header. + InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 582dcfdcd0320..0a4f5b399f64f 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -52,6 +52,7 @@ Removed Config or Runtime New Features ------------ +* bootstrap: added :ref:`inline_headers ` in the bootstrap to make custom inline headers bootstrap configurable. * http: added :ref:`string_match ` in the header matcher. * http: added support for :ref:`max_requests_per_connection ` for both upstream and downstream connections. diff --git a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto index 813cfa9e2a595..9171d066a4302 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v3/bootstrap.proto @@ -40,7 +40,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 32] +// [#next-free-field: 33] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; @@ -321,6 +321,13 @@ message Bootstrap { // [#not-implemented-hide:] map certificate_provider_instances = 25; + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + repeated CustomInlineHeader inline_headers = 32; + Runtime hidden_envoy_deprecated_runtime = 11 [ deprecated = true, (envoy.annotations.deprecated_at_minor_version) = "3.0", @@ -599,3 +606,43 @@ message LayeredRuntime { // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +message CustomInlineHeader { + enum InlineHeaderType { + REQUEST_HEADER = 0; + REQUEST_TRAILER = 1; + RESPONSE_HEADER = 2; + RESPONSE_TRAILER = 3; + } + + // The name of the header that is expected to be set as the inline header. + string inline_header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The type of the header that is expected to be set as the inline header. + InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto index 6e4fc1a4d8ff6..b21acabe686fc 100644 --- a/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto +++ b/generated_api_shadow/envoy/config/bootstrap/v4alpha/bootstrap.proto @@ -39,7 +39,7 @@ option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSIO // ` for more detail. // Bootstrap :ref:`configuration overview `. -// [#next-free-field: 32] +// [#next-free-field: 33] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; @@ -318,6 +318,13 @@ message Bootstrap { // field. // [#not-implemented-hide:] map certificate_provider_instances = 25; + + // Specifies a set of headers that need to be registered as inline header. This configuration + // allows users to customize the inline headers on-demand at Envoy startup without modifying + // Envoy's source code. + // + // Note that the 'set-cookie' header cannot be registered as inline header. + repeated CustomInlineHeader inline_headers = 32; } // Administration interface :ref:`operations documentation @@ -600,3 +607,46 @@ message LayeredRuntime { // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } + +// Used to specify the header that needs to be registered as an inline header. +// +// If request or response contain multiple headers with the same name and the header +// name is registered as an inline header. Then multiple headers will be folded +// into one, and multiple header values will be concatenated by a suitable delimiter. +// The delimiter is generally a comma. +// +// For example, if 'foo' is registered as an inline header, and the headers contains +// the following two headers: +// +// .. code-block:: text +// +// foo: bar +// foo: eep +// +// Then they will eventually be folded into: +// +// .. code-block:: text +// +// foo: bar, eep +// +// Inline headers provide O(1) search performance, but each inline header imposes +// an additional memory overhead on all instances of the corresponding type of +// HeaderMap or TrailerMap. +message CustomInlineHeader { + option (udpa.annotations.versioning).previous_message_type = + "envoy.config.bootstrap.v3.CustomInlineHeader"; + + enum InlineHeaderType { + REQUEST_HEADER = 0; + REQUEST_TRAILER = 1; + RESPONSE_HEADER = 2; + RESPONSE_TRAILER = 3; + } + + // The name of the header that is expected to be set as the inline header. + string inline_header_name = 1 + [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; + + // The type of the header that is expected to be set as the inline header. + InlineHeaderType inline_header_type = 2 [(validate.rules).enum = {defined_only: true}]; +} diff --git a/source/server/BUILD b/source/server/BUILD index dc7f3173ae2f6..1efb9e42b30f0 100644 --- a/source/server/BUILD +++ b/source/server/BUILD @@ -582,6 +582,7 @@ envoy_cc_library( "//source/common/grpc:context_lib", "//source/common/http:codes_lib", "//source/common/http:context_lib", + "//source/common/http:headers_lib", "//source/common/init:manager_lib", "//source/common/local_info:local_info_lib", "//source/common/memory:heap_shrinker_lib", diff --git a/source/server/server.cc b/source/server/server.cc index 189ecea07dad9..edb9c03045503 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -35,6 +35,7 @@ #include "source/common/config/version_converter.h" #include "source/common/config/xds_resource.h" #include "source/common/http/codes.h" +#include "source/common/http/headers.h" #include "source/common/local_info/local_info_impl.h" #include "source/common/memory/stats.h" #include "source/common/network/address_impl.h" @@ -295,6 +296,46 @@ void loadBootstrap(absl::optional bootstrap_version, throw EnvoyException(fmt::format("Unknown bootstrap version {}.", *bootstrap_version)); } } + +bool canBeRegisteredAsInlineHeader(const Http::LowerCaseString& header_name) { + // 'set-cookie' cannot currently be registered as an inline header. + if (header_name == Http::Headers::get().SetCookie) { + return false; + } + return true; +} + +void registerCustomInlineHeadersFromBootstrap( + const envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + for (const auto& inline_header : bootstrap.inline_headers()) { + const Http::LowerCaseString lower_case_name(inline_header.inline_header_name()); + if (!canBeRegisteredAsInlineHeader(lower_case_name)) { + throw EnvoyException(fmt::format("Header {} cannot be registered as an inline header.", + inline_header.inline_header_name())); + } + switch (inline_header.inline_header_type()) { + case envoy::config::bootstrap::v3::CustomInlineHeader::REQUEST_HEADER: + Http::CustomInlineHeaderRegistry::registerInlineHeader< + Http::RequestHeaderMap::header_map_type>(lower_case_name); + break; + case envoy::config::bootstrap::v3::CustomInlineHeader::REQUEST_TRAILER: + Http::CustomInlineHeaderRegistry::registerInlineHeader< + Http::RequestTrailerMap::header_map_type>(lower_case_name); + break; + case envoy::config::bootstrap::v3::CustomInlineHeader::RESPONSE_HEADER: + Http::CustomInlineHeaderRegistry::registerInlineHeader< + Http::ResponseHeaderMap::header_map_type>(lower_case_name); + break; + case envoy::config::bootstrap::v3::CustomInlineHeader::RESPONSE_TRAILER: + Http::CustomInlineHeaderRegistry::registerInlineHeader< + Http::ResponseTrailerMap::header_map_type>(lower_case_name); + break; + default: + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } + } +} + } // namespace void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& bootstrap, @@ -356,8 +397,10 @@ void InstanceImpl::initialize(const Options& options, // setPrefix has a release assert verifying that setPrefix() is not called after prefix() ThreadSafeSingleton::get().setPrefix(bootstrap_.header_prefix().c_str()); } - // TODO(mattklein123): Custom O(1) headers can be registered at this point for creating/finalizing - // any header maps. + + // Register Custom O(1) headers from bootstrap. + registerCustomInlineHeadersFromBootstrap(bootstrap_); + ENVOY_LOG(info, "HTTP header map info:"); for (const auto& info : Http::HeaderMapImplUtility::getAllHeaderMapImplInfo()) { ENVOY_LOG(info, " {}: {} bytes: {}", info.name_, info.size_, diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 5d83592881f3b..115953df50d49 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -334,6 +334,98 @@ INSTANTIATE_TEST_SUITE_P(IpVersions, ServerInstanceImplTest, testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), TestUtility::ipTestParamsToString); +// The finalize() of Http::CustomInlineHeaderRegistry will be called When InstanceImpl is +// instantiated. For this reason, the test case for inline headers must precede all other test +// cases. + +// Test the case where inline headers contain names that cannot be registered as inline headers. +TEST_P(ServerInstanceImplTest, WithErrorCustomInlineHeaders) { + EXPECT_THROW_WITH_MESSAGE( + initialize("test/server/test_data/server/bootstrap_inline_headers_error.yaml"), + EnvoyException, "Header set-cookie cannot be registered as an inline header."); +} + +TEST_P(ServerInstanceImplTest, WithDeathCustomInlineHeaders) { +#if !defined(NDEBUG) + // The finalize() of Http::CustomInlineHeaderRegistry will be called after the first successful + // instantiation of InstanceImpl. If InstanceImpl is instantiated again and the inline header is + // registered again, the assertion will be triggered. + EXPECT_DEATH( + { + initialize("test/server/test_data/server/bootstrap_inline_headers.yaml"); + initialize("test/server/test_data/server/bootstrap_inline_headers.yaml"); + }, + ""); +#endif // !defined(NDEBUG) +} + +// Test whether the custom inline headers can be registered correctly. +TEST_P(ServerInstanceImplTest, WithCustomInlineHeaders) { + static bool is_registered = false; + + if (!is_registered) { + // Avoid repeated registration of custom inline headers in the current process after the + // finalize() of Http::CustomInlineHeaderRegistry has already been called. + EXPECT_NO_THROW(initialize("test/server/test_data/server/bootstrap_inline_headers.yaml")); + is_registered = true; + } + + EXPECT_TRUE( + Http::CustomInlineHeaderRegistry::getInlineHeader( + Http::LowerCaseString("test1")) + .has_value()); + EXPECT_TRUE( + Http::CustomInlineHeaderRegistry::getInlineHeader( + Http::LowerCaseString("test2")) + .has_value()); + EXPECT_TRUE( + Http::CustomInlineHeaderRegistry::getInlineHeader( + Http::LowerCaseString("test3")) + .has_value()); + EXPECT_TRUE( + Http::CustomInlineHeaderRegistry::getInlineHeader( + Http::LowerCaseString("test4")) + .has_value()); + + { + Http::TestRequestHeaderMapImpl headers{ + {"test1", "test1_value1"}, + {"test1", "test1_value2"}, + {"test3", "test3_value1"}, + {"test3", "test3_value2"}, + }; + + // 'test1' is registered as the inline request header. + auto test1_headers = headers.get(Http::LowerCaseString("test1")); + EXPECT_EQ(1, test1_headers.size()); + EXPECT_EQ("test1_value1,test1_value2", headers.get_("test1")); + + // 'test3' is not registered as an inline request header. + auto test3_headers = headers.get(Http::LowerCaseString("test3")); + EXPECT_EQ(2, test3_headers.size()); + EXPECT_EQ("test3_value1", headers.get_("test3")); + } + + { + Http::TestResponseHeaderMapImpl headers{ + {"test1", "test1_value1"}, + {"test1", "test1_value2"}, + {"test3", "test3_value1"}, + {"test3", "test3_value2"}, + }; + + // 'test1' is not registered as the inline response header. + auto test1_headers = headers.get(Http::LowerCaseString("test1")); + EXPECT_EQ(2, test1_headers.size()); + EXPECT_EQ("test1_value1", headers.get_("test1")); + + // 'test3' is registered as an inline response header. + auto test3_headers = headers.get(Http::LowerCaseString("test3")); + EXPECT_EQ(1, test3_headers.size()); + EXPECT_EQ("test3_value1,test3_value2", headers.get_("test3")); + } +} + // Validates that server stats are flushed even when server is stuck with initialization. TEST_P(ServerInstanceImplTest, StatsFlushWhenServerIsStillInitializing) { CustomStatsSinkFactory factory; diff --git a/test/server/test_data/server/bootstrap_inline_headers.yaml b/test/server/test_data/server/bootstrap_inline_headers.yaml new file mode 100644 index 0000000000000..7d5e05c47f9b4 --- /dev/null +++ b/test/server/test_data/server/bootstrap_inline_headers.yaml @@ -0,0 +1,9 @@ +inline_headers: +- inline_header_name: test1 + inline_header_type: REQUEST_HEADER +- inline_header_name: test2 + inline_header_type: REQUEST_TRAILER +- inline_header_name: test3 + inline_header_type: RESPONSE_HEADER +- inline_header_name: test4 + inline_header_type: RESPONSE_TRAILER diff --git a/test/server/test_data/server/bootstrap_inline_headers_error.yaml b/test/server/test_data/server/bootstrap_inline_headers_error.yaml new file mode 100644 index 0000000000000..d3d884fd3e8e6 --- /dev/null +++ b/test/server/test_data/server/bootstrap_inline_headers_error.yaml @@ -0,0 +1,3 @@ +inline_headers: +- inline_header_name: set-cookie + inline_header_type: RESPONSE_HEADER From dc202cdce075c383a808bbd5aac13d9e82305832 Mon Sep 17 00:00:00 2001 From: Crypt Keeper <64215+codefromthecrypt@users.noreply.github.com> Date: Wed, 28 Jul 2021 01:48:00 +1000 Subject: [PATCH 55/57] updates links to jaegertracing-plugin.tar.gz (#17497) This moves links used by example Docker configuration to a 1st party repository. Fixes #16866 Signed-off-by: Adrian Cole --- examples/front-proxy/Dockerfile-jaeger-service | 2 +- examples/jaeger-native-tracing/Dockerfile-frontenvoy | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/front-proxy/Dockerfile-jaeger-service b/examples/front-proxy/Dockerfile-jaeger-service index 8c3fe1bd42761..f8bbf13b03e03 100644 --- a/examples/front-proxy/Dockerfile-jaeger-service +++ b/examples/front-proxy/Dockerfile-jaeger-service @@ -11,7 +11,7 @@ RUN chmod u+x /usr/local/bin/start_service.sh # https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072 # RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/local/lib/libjaegertracing_plugin.so" > /tmp/checksum \ - && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \ + && curl -Ls https://github.com/envoyproxy/misc/releases/download/jaegertracing-plugin/jaegertracing-plugin-centos.tar.gz \ | tar zxf - -C /usr/local/lib \ && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \ && sha256sum -c /tmp/checksum \ diff --git a/examples/jaeger-native-tracing/Dockerfile-frontenvoy b/examples/jaeger-native-tracing/Dockerfile-frontenvoy index ef40a9365ed25..fbb826bf0ef92 100644 --- a/examples/jaeger-native-tracing/Dockerfile-frontenvoy +++ b/examples/jaeger-native-tracing/Dockerfile-frontenvoy @@ -12,7 +12,7 @@ COPY ./front-envoy-jaeger.yaml /etc/front-envoy.yaml # https://github.com/envoyproxy/envoy/issues/11382#issuecomment-638012072 # RUN echo "4a7d17d4724ee890490bcd6cfdedb12a02316a3d33214348d30979abd201f1ca /usr/local/lib/libjaegertracing_plugin.so" > /tmp/checksum \ - && curl -Ls https://github.com/tetratelabs/getenvoy-package/files/3518103/getenvoy-centos-jaegertracing-plugin.tar.gz \ + && curl -Ls https://github.com/envoyproxy/misc/releases/download/jaegertracing-plugin/jaegertracing-plugin-centos.tar.gz \ | tar zxf - -C /usr/local/lib \ && mv /usr/local/lib/libjaegertracing.so.0.4.2 /usr/local/lib/libjaegertracing_plugin.so \ && sha256sum -c /tmp/checksum \ From cdbeb5f127443e934e7f1dfe43dff9da3e26eefb Mon Sep 17 00:00:00 2001 From: Manish Kumar Date: Tue, 27 Jul 2021 23:05:23 +0530 Subject: [PATCH 56/57] docs: Fixed FaultDelay docs. (#17495) Signed-off-by: Manish Kumar --- api/envoy/extensions/filters/common/fault/v3/fault.proto | 9 ++++----- .../envoy/extensions/filters/common/fault/v3/fault.proto | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/api/envoy/extensions/filters/common/fault/v3/fault.proto b/api/envoy/extensions/filters/common/fault/v3/fault.proto index b5b1dbd463f0d..62da059e26484 100644 --- a/api/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/api/envoy/extensions/filters/common/fault/v3/fault.proto @@ -18,7 +18,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common fault injection types] // Delay specification is used to inject latency into the -// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. +// HTTP/Mongo operation. // [#next-free-field: 6] message FaultDelay { option (udpa.annotations.versioning).previous_message_type = @@ -46,10 +46,9 @@ message FaultDelay { // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. + // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + // delay will be injected before a new request/operation. + // This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). diff --git a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto index f1bdb571c4803..bcb5bdf9bbf55 100644 --- a/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto +++ b/generated_api_shadow/envoy/extensions/filters/common/fault/v3/fault.proto @@ -19,7 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common fault injection types] // Delay specification is used to inject latency into the -// HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. +// HTTP/Mongo operation. // [#next-free-field: 6] message FaultDelay { option (udpa.annotations.versioning).previous_message_type = @@ -45,10 +45,9 @@ message FaultDelay { // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for - // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified - // delay will be injected before a new request/operation. For TCP - // connections, the proxying of the connection upstream will be delayed - // for the specified period. This is required if type is FIXED. + // the JSON/YAML Duration mapping. For HTTP/Mongo, the specified + // delay will be injected before a new request/operation. + // This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). From 7cf1f9bd40ef869103dec7b6a1521adbb8d5219b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ra=C3=BAl=20Guti=C3=A9rrez=20Segal=C3=A9s?= Date: Tue, 27 Jul 2021 18:50:02 -0400 Subject: [PATCH 57/57] thrift proxy: move UpstreamRequest into its own file (#17498) This is needed so that it can be reused by the upcoming shadowing implementation, which will introduce a dependency from Router to ShadowRouter to UpstreamRequest, so this breaks the circular dependency. Risk Level: low (refactor) Unit Tests: existing Docs Changes: n/a Release Notes: n/a Signed-off-by: Raul Gutierrez Segales --- .../filters/network/thrift_proxy/router/BUILD | 16 ++ .../thrift_proxy/router/router_impl.cc | 185 ---------------- .../network/thrift_proxy/router/router_impl.h | 45 +--- .../thrift_proxy/router/upstream_request.cc | 199 ++++++++++++++++++ .../thrift_proxy/router/upstream_request.h | 65 ++++++ 5 files changed, 281 insertions(+), 229 deletions(-) create mode 100644 source/extensions/filters/network/thrift_proxy/router/upstream_request.cc create mode 100644 source/extensions/filters/network/thrift_proxy/router/upstream_request.h diff --git a/source/extensions/filters/network/thrift_proxy/router/BUILD b/source/extensions/filters/network/thrift_proxy/router/BUILD index ff8048656105e..7eb78e9adb33d 100644 --- a/source/extensions/filters/network/thrift_proxy/router/BUILD +++ b/source/extensions/filters/network/thrift_proxy/router/BUILD @@ -45,6 +45,21 @@ envoy_cc_library( ], ) +envoy_cc_library( + name = "upstream_request_lib", + srcs = ["upstream_request.cc"], + hdrs = ["upstream_request.h"], + deps = [ + ":router_interface", + "//envoy/tcp:conn_pool_interface", + "//source/extensions/filters/network/thrift_proxy:app_exception_lib", + "//source/extensions/filters/network/thrift_proxy:conn_manager_lib", + "//source/extensions/filters/network/thrift_proxy:thrift_object_interface", + "//source/extensions/filters/network/thrift_proxy:transport_interface", + "//source/extensions/filters/network/thrift_proxy/filters:filter_interface", + ], +) + envoy_cc_library( name = "router_lib", srcs = ["router_impl.cc"], @@ -52,6 +67,7 @@ envoy_cc_library( deps = [ ":router_interface", ":router_ratelimit_lib", + ":upstream_request_lib", "//envoy/tcp:conn_pool_interface", "//envoy/upstream:cluster_manager_interface", "//envoy/upstream:load_balancer_interface", diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc index 77e2e18970304..f0a5ef11bb1af 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.cc +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.cc @@ -434,191 +434,6 @@ const Network::Connection* Router::downstreamConnection() const { void Router::cleanup() { upstream_request_.reset(); } -Router::UpstreamRequest::UpstreamRequest(RequestOwner& parent, Upstream::TcpPoolData& pool_data, - MessageMetadataSharedPtr& metadata, - TransportType transport_type, ProtocolType protocol_type) - : parent_(parent), conn_pool_data_(pool_data), metadata_(metadata), - transport_(NamedTransportConfigFactory::getFactory(transport_type).createTransport()), - protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()), - request_complete_(false), response_started_(false), response_complete_(false) {} - -Router::UpstreamRequest::~UpstreamRequest() { - if (conn_pool_handle_) { - conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); - } -} - -FilterStatus Router::UpstreamRequest::start() { - Tcp::ConnectionPool::Cancellable* handle = conn_pool_data_.newConnection(*this); - if (handle) { - // Pause while we wait for a connection. - conn_pool_handle_ = handle; - return FilterStatus::StopIteration; - } - - if (upgrade_response_ != nullptr) { - // Pause while we wait for an upgrade response. - return FilterStatus::StopIteration; - } - - if (upstream_host_ == nullptr) { - return FilterStatus::StopIteration; - } - - return FilterStatus::Continue; -} - -void Router::UpstreamRequest::releaseConnection(const bool close) { - if (conn_pool_handle_) { - conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); - conn_pool_handle_ = nullptr; - } - - conn_state_ = nullptr; - - // The event triggered by close will also release this connection so clear conn_data_ before - // closing. - auto conn_data = std::move(conn_data_); - if (close && conn_data != nullptr) { - conn_data->connection().close(Network::ConnectionCloseType::NoFlush); - } -} - -void Router::UpstreamRequest::resetStream() { releaseConnection(true); } - -void Router::UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view, - Upstream::HostDescriptionConstSharedPtr host) { - conn_pool_handle_ = nullptr; - - // Mimic an upstream reset. - onUpstreamHostSelected(host); - onResetStream(reason); -} - -void Router::UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, - Upstream::HostDescriptionConstSharedPtr host) { - // Only invoke continueDecoding if we'd previously stopped the filter chain. - bool continue_decoding = conn_pool_handle_ != nullptr; - - onUpstreamHostSelected(host); - host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess); - - conn_data_ = std::move(conn_data); - conn_data_->addUpstreamCallbacks(parent_.upstreamCallbacks()); - conn_pool_handle_ = nullptr; - - conn_state_ = conn_data_->connectionStateTyped(); - if (conn_state_ == nullptr) { - conn_data_->setConnectionState(std::make_unique()); - conn_state_ = conn_data_->connectionStateTyped(); - } - - if (protocol_->supportsUpgrade()) { - auto& buffer = parent_.buffer(); - upgrade_response_ = protocol_->attemptUpgrade(*transport_, *conn_state_, buffer); - if (upgrade_response_ != nullptr) { - parent_.addSize(buffer.length()); - conn_data_->connection().write(buffer, false); - return; - } - } - - onRequestStart(continue_decoding); -} - -void Router::UpstreamRequest::onRequestStart(bool continue_decoding) { - auto& buffer = parent_.buffer(); - parent_.initProtocolConverter(*protocol_, buffer); - - metadata_->setSequenceId(conn_state_->nextSequenceId()); - parent_.convertMessageBegin(metadata_); - - if (continue_decoding) { - parent_.continueDecoding(); - } -} - -void Router::UpstreamRequest::onRequestComplete() { - Event::Dispatcher& dispatcher = parent_.dispatcher(); - downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); - request_complete_ = true; -} - -void Router::UpstreamRequest::onResponseComplete() { - chargeResponseTiming(); - response_complete_ = true; - conn_state_ = nullptr; - conn_data_.reset(); -} - -void Router::UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { - upstream_host_ = host; -} - -void Router::UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { - if (metadata_->messageType() == MessageType::Oneway) { - // For oneway requests, we should not attempt a response. Reset the downstream to signal - // an error. - parent_.resetDownstreamConnection(); - return; - } - - chargeResponseTiming(); - - switch (reason) { - case ConnectionPool::PoolFailureReason::Overflow: - parent_.sendLocalReply(AppException(AppExceptionType::InternalError, - "thrift upstream request: too many connections"), - true); - break; - case ConnectionPool::PoolFailureReason::LocalConnectionFailure: - upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::LocalOriginConnectFailed); - // Should only happen if we closed the connection, due to an error condition, in which case - // we've already handled any possible downstream response. - parent_.resetDownstreamConnection(); - break; - case ConnectionPool::PoolFailureReason::RemoteConnectionFailure: - case ConnectionPool::PoolFailureReason::Timeout: - if (reason == ConnectionPool::PoolFailureReason::Timeout) { - upstream_host_->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginTimeout); - } else if (reason == ConnectionPool::PoolFailureReason::RemoteConnectionFailure) { - upstream_host_->outlierDetector().putResult( - Upstream::Outlier::Result::LocalOriginConnectFailed); - } - - // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout) - if (!response_started_) { - parent_.sendLocalReply(AppException(AppExceptionType::InternalError, - fmt::format("connection failure '{}'", - (upstream_host_ != nullptr) - ? upstream_host_->address()->asString() - : "to upstream")), - true); - return; - } - - // Error occurred after a partial response, propagate the reset to the downstream. - parent_.resetDownstreamConnection(); - break; - default: - NOT_REACHED_GCOVR_EXCL_LINE; - } -} - -void Router::UpstreamRequest::chargeResponseTiming() { - if (charged_response_timing_ || !request_complete_) { - return; - } - charged_response_timing_ = true; - Event::Dispatcher& dispatcher = parent_.dispatcher(); - const std::chrono::milliseconds response_time = - std::chrono::duration_cast( - dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); - parent_.recordResponseDuration(response_time.count(), Stats::Histogram::Unit::Milliseconds); -} - } // namespace Router } // namespace ThriftProxy } // namespace NetworkFilters diff --git a/source/extensions/filters/network/thrift_proxy/router/router_impl.h b/source/extensions/filters/network/thrift_proxy/router/router_impl.h index 502a2659cff6b..5410028323b88 100644 --- a/source/extensions/filters/network/thrift_proxy/router/router_impl.h +++ b/source/extensions/filters/network/thrift_proxy/router/router_impl.h @@ -18,6 +18,7 @@ #include "source/extensions/filters/network/thrift_proxy/filters/filter.h" #include "source/extensions/filters/network/thrift_proxy/router/router.h" #include "source/extensions/filters/network/thrift_proxy/router/router_ratelimit_impl.h" +#include "source/extensions/filters/network/thrift_proxy/router/upstream_request.h" #include "source/extensions/filters/network/thrift_proxy/thrift_object.h" #include "absl/types/optional.h" @@ -209,50 +210,6 @@ class Router : public Tcp::ConnectionPool::UpstreamCallbacks, void onBelowWriteBufferLowWatermark() override {} private: - struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { - UpstreamRequest(RequestOwner& parent, Upstream::TcpPoolData& pool_data, - MessageMetadataSharedPtr& metadata, TransportType transport_type, - ProtocolType protocol_type); - ~UpstreamRequest() override; - - FilterStatus start(); - void resetStream(); - void releaseConnection(bool close); - - // Tcp::ConnectionPool::Callbacks - void onPoolFailure(ConnectionPool::PoolFailureReason reason, - absl::string_view transport_failure_reason, - Upstream::HostDescriptionConstSharedPtr host) override; - void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, - Upstream::HostDescriptionConstSharedPtr host) override; - - void onRequestStart(bool continue_decoding); - void onRequestComplete(); - void onResponseComplete(); - void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); - void onResetStream(ConnectionPool::PoolFailureReason reason); - void chargeResponseTiming(); - - RequestOwner& parent_; - Upstream::TcpPoolData& conn_pool_data_; - MessageMetadataSharedPtr metadata_; - - Tcp::ConnectionPool::Cancellable* conn_pool_handle_{}; - Tcp::ConnectionPool::ConnectionDataPtr conn_data_; - Upstream::HostDescriptionConstSharedPtr upstream_host_; - ThriftConnectionState* conn_state_{}; - TransportPtr transport_; - ProtocolPtr protocol_; - ThriftObjectPtr upgrade_response_; - - bool request_complete_ : 1; - bool response_started_ : 1; - bool response_complete_ : 1; - - bool charged_response_timing_{false}; - MonotonicTime downstream_request_complete_time_; - }; - void cleanup(); ThriftFilters::DecoderFilterCallbacks* callbacks_{}; diff --git a/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc b/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc new file mode 100644 index 0000000000000..8ac684414efbe --- /dev/null +++ b/source/extensions/filters/network/thrift_proxy/router/upstream_request.cc @@ -0,0 +1,199 @@ +#include "source/extensions/filters/network/thrift_proxy/router/upstream_request.h" + +#include "source/extensions/filters/network/thrift_proxy/app_exception_impl.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace Router { + +UpstreamRequest::UpstreamRequest(RequestOwner& parent, Upstream::TcpPoolData& pool_data, + MessageMetadataSharedPtr& metadata, TransportType transport_type, + ProtocolType protocol_type) + : parent_(parent), conn_pool_data_(pool_data), metadata_(metadata), + transport_(NamedTransportConfigFactory::getFactory(transport_type).createTransport()), + protocol_(NamedProtocolConfigFactory::getFactory(protocol_type).createProtocol()), + request_complete_(false), response_started_(false), response_complete_(false) {} + +UpstreamRequest::~UpstreamRequest() { + if (conn_pool_handle_) { + conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + } +} + +FilterStatus UpstreamRequest::start() { + Tcp::ConnectionPool::Cancellable* handle = conn_pool_data_.newConnection(*this); + if (handle) { + // Pause while we wait for a connection. + conn_pool_handle_ = handle; + return FilterStatus::StopIteration; + } + + if (upgrade_response_ != nullptr) { + // Pause while we wait for an upgrade response. + return FilterStatus::StopIteration; + } + + if (upstream_host_ == nullptr) { + return FilterStatus::StopIteration; + } + + return FilterStatus::Continue; +} + +void UpstreamRequest::releaseConnection(const bool close) { + if (conn_pool_handle_) { + conn_pool_handle_->cancel(Tcp::ConnectionPool::CancelPolicy::Default); + conn_pool_handle_ = nullptr; + } + + conn_state_ = nullptr; + + // The event triggered by close will also release this connection so clear conn_data_ before + // closing. + auto conn_data = std::move(conn_data_); + if (close && conn_data != nullptr) { + conn_data->connection().close(Network::ConnectionCloseType::NoFlush); + } +} + +void UpstreamRequest::resetStream() { releaseConnection(true); } + +void UpstreamRequest::onPoolFailure(ConnectionPool::PoolFailureReason reason, absl::string_view, + Upstream::HostDescriptionConstSharedPtr host) { + conn_pool_handle_ = nullptr; + + // Mimic an upstream reset. + onUpstreamHostSelected(host); + onResetStream(reason); +} + +void UpstreamRequest::onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn_data, + Upstream::HostDescriptionConstSharedPtr host) { + // Only invoke continueDecoding if we'd previously stopped the filter chain. + bool continue_decoding = conn_pool_handle_ != nullptr; + + onUpstreamHostSelected(host); + host->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginConnectSuccess); + + conn_data_ = std::move(conn_data); + conn_data_->addUpstreamCallbacks(parent_.upstreamCallbacks()); + conn_pool_handle_ = nullptr; + + conn_state_ = conn_data_->connectionStateTyped(); + if (conn_state_ == nullptr) { + conn_data_->setConnectionState(std::make_unique()); + conn_state_ = conn_data_->connectionStateTyped(); + } + + if (protocol_->supportsUpgrade()) { + auto& buffer = parent_.buffer(); + upgrade_response_ = protocol_->attemptUpgrade(*transport_, *conn_state_, buffer); + if (upgrade_response_ != nullptr) { + parent_.addSize(buffer.length()); + conn_data_->connection().write(buffer, false); + return; + } + } + + onRequestStart(continue_decoding); +} + +void UpstreamRequest::onRequestStart(bool continue_decoding) { + auto& buffer = parent_.buffer(); + parent_.initProtocolConverter(*protocol_, buffer); + + metadata_->setSequenceId(conn_state_->nextSequenceId()); + parent_.convertMessageBegin(metadata_); + + if (continue_decoding) { + parent_.continueDecoding(); + } +} + +void UpstreamRequest::onRequestComplete() { + Event::Dispatcher& dispatcher = parent_.dispatcher(); + downstream_request_complete_time_ = dispatcher.timeSource().monotonicTime(); + request_complete_ = true; +} + +void UpstreamRequest::onResponseComplete() { + chargeResponseTiming(); + response_complete_ = true; + conn_state_ = nullptr; + conn_data_.reset(); +} + +void UpstreamRequest::onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host) { + upstream_host_ = host; +} + +void UpstreamRequest::onResetStream(ConnectionPool::PoolFailureReason reason) { + if (metadata_->messageType() == MessageType::Oneway) { + // For oneway requests, we should not attempt a response. Reset the downstream to signal + // an error. + parent_.resetDownstreamConnection(); + return; + } + + chargeResponseTiming(); + + switch (reason) { + case ConnectionPool::PoolFailureReason::Overflow: + parent_.sendLocalReply(AppException(AppExceptionType::InternalError, + "thrift upstream request: too many connections"), + true); + break; + case ConnectionPool::PoolFailureReason::LocalConnectionFailure: + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::LocalOriginConnectFailed); + // Should only happen if we closed the connection, due to an error condition, in which case + // we've already handled any possible downstream response. + parent_.resetDownstreamConnection(); + break; + case ConnectionPool::PoolFailureReason::RemoteConnectionFailure: + case ConnectionPool::PoolFailureReason::Timeout: + if (reason == ConnectionPool::PoolFailureReason::Timeout) { + upstream_host_->outlierDetector().putResult(Upstream::Outlier::Result::LocalOriginTimeout); + } else if (reason == ConnectionPool::PoolFailureReason::RemoteConnectionFailure) { + upstream_host_->outlierDetector().putResult( + Upstream::Outlier::Result::LocalOriginConnectFailed); + } + + // TODO(zuercher): distinguish between these cases where appropriate (particularly timeout) + if (!response_started_) { + parent_.sendLocalReply(AppException(AppExceptionType::InternalError, + fmt::format("connection failure '{}'", + (upstream_host_ != nullptr) + ? upstream_host_->address()->asString() + : "to upstream")), + true); + return; + } + + // Error occurred after a partial response, propagate the reset to the downstream. + parent_.resetDownstreamConnection(); + break; + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } +} + +void UpstreamRequest::chargeResponseTiming() { + if (charged_response_timing_ || !request_complete_) { + return; + } + charged_response_timing_ = true; + Event::Dispatcher& dispatcher = parent_.dispatcher(); + const std::chrono::milliseconds response_time = + std::chrono::duration_cast( + dispatcher.timeSource().monotonicTime() - downstream_request_complete_time_); + parent_.recordResponseDuration(response_time.count(), Stats::Histogram::Unit::Milliseconds); +} + +} // namespace Router +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/network/thrift_proxy/router/upstream_request.h b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h new file mode 100644 index 0000000000000..f5129ce583184 --- /dev/null +++ b/source/extensions/filters/network/thrift_proxy/router/upstream_request.h @@ -0,0 +1,65 @@ +#pragma once + +#include "envoy/common/time.h" +#include "envoy/tcp/conn_pool.h" + +#include "source/extensions/filters/network/thrift_proxy/decoder_events.h" +#include "source/extensions/filters/network/thrift_proxy/metadata.h" +#include "source/extensions/filters/network/thrift_proxy/router/router.h" +#include "source/extensions/filters/network/thrift_proxy/thrift.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace ThriftProxy { +namespace Router { + +struct UpstreamRequest : public Tcp::ConnectionPool::Callbacks { + UpstreamRequest(RequestOwner& parent, Upstream::TcpPoolData& pool_data, + MessageMetadataSharedPtr& metadata, TransportType transport_type, + ProtocolType protocol_type); + ~UpstreamRequest() override; + + FilterStatus start(); + void resetStream(); + void releaseConnection(bool close); + + // Tcp::ConnectionPool::Callbacks + void onPoolFailure(ConnectionPool::PoolFailureReason reason, + absl::string_view transport_failure_reason, + Upstream::HostDescriptionConstSharedPtr host) override; + void onPoolReady(Tcp::ConnectionPool::ConnectionDataPtr&& conn, + Upstream::HostDescriptionConstSharedPtr host) override; + + void onRequestStart(bool continue_decoding); + void onRequestComplete(); + void onResponseComplete(); + void onUpstreamHostSelected(Upstream::HostDescriptionConstSharedPtr host); + void onResetStream(ConnectionPool::PoolFailureReason reason); + void chargeResponseTiming(); + + RequestOwner& parent_; + Upstream::TcpPoolData& conn_pool_data_; + MessageMetadataSharedPtr metadata_; + + Tcp::ConnectionPool::Cancellable* conn_pool_handle_{}; + Tcp::ConnectionPool::ConnectionDataPtr conn_data_; + Upstream::HostDescriptionConstSharedPtr upstream_host_; + ThriftConnectionState* conn_state_{}; + TransportPtr transport_; + ProtocolPtr protocol_; + ThriftObjectPtr upgrade_response_; + + bool request_complete_ : 1; + bool response_started_ : 1; + bool response_complete_ : 1; + + bool charged_response_timing_{false}; + MonotonicTime downstream_request_complete_time_; +}; + +} // namespace Router +} // namespace ThriftProxy +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy