diff --git a/pytest.ini b/pytest.ini index 14991b78a5ed3..be97f8e60e6a7 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,4 @@ [pytest] -addopts = -raq --ignore=tools/testing/external/*,__init__.py,testing/conf --color=yes --cov-append -p tools.testing.plugin --cov-config=.coveragerc -vv tools +addopts = -raq --ignore=tools/testing/external/*,__init__.py,testing/conf --color=yes --cov-append -p tools.testing.plugin --cov-config=.coveragerc -Werror -vv tools testpaths = tests diff --git a/tools/base/BUILD b/tools/base/BUILD index f768786837847..59461e1f66e05 100644 --- a/tools/base/BUILD +++ b/tools/base/BUILD @@ -16,10 +16,13 @@ envoy_py_library( ], ) +envoy_py_library("tools.base.functional") + envoy_py_library( "tools.base.utils", deps = [ requirement("pyyaml"), + requirement("setuptools"), ], ) diff --git a/tools/base/checker.py b/tools/base/checker.py index 8a73fef52dfe2..6a8f19000c377 100644 --- a/tools/base/checker.py +++ b/tools/base/checker.py @@ -1,20 +1,20 @@ import argparse import asyncio import logging -import os +import pathlib from functools import cached_property -from typing import Optional, Sequence, Tuple, Type +from typing import Any, Iterable, Optional, Sequence, Tuple, Type from tools.base import runner -class Checker(runner.Runner): +class BaseChecker(runner.Runner): """Runs check methods prefixed with `check_` and named in `self.checks` Check methods should call the `self.warn`, `self.error` or `self.succeed` depending upon the outcome of the checks. """ - _active_check: Optional[str] = None + _active_check = "" checks: Tuple[str, ...] = () def __init__(self, *args): @@ -24,7 +24,7 @@ def __init__(self, *args): self.warnings = {} @property - def active_check(self) -> Optional[str]: + def active_check(self) -> str: return self._active_check @property @@ -58,14 +58,14 @@ def has_failed(self) -> bool: return bool(self.failed or self.warned) @cached_property - def path(self) -> str: + def path(self) -> pathlib.Path: """The "path" - usually Envoy src dir. This is used for finding configs for the tooling and should be a dir""" try: - path = self.args.path or self.args.paths[0] + path = pathlib.Path(self.args.path or self.args.paths[0]) except IndexError: raise self.parser.error( "Missing path: `path` must be set either as an arg or with --path") - if not os.path.isdir(path): + if not path.is_dir(): raise self.parser.error( "Incorrect path: `path` must be a directory, set either as first arg or with --path" ) @@ -174,7 +174,12 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: "Paths to check. At least one path must be specified, or the `path` argument should be provided" ) - def error(self, name: str, errors: list, log: bool = True, log_type: str = "error") -> int: + def error( + self, + name: str, + errors: Optional[Iterable[str]], + log: bool = True, + log_type: str = "error") -> int: """Record (and log) errors for a check type""" if not errors: return 0 @@ -197,13 +202,13 @@ def get_checks(self) -> Sequence[str]: self.checks if not self.args.check else [check for check in self.args.check if check in self.checks]) - def on_check_begin(self, check: str) -> None: + def on_check_begin(self, check: str) -> Any: self._active_check = check self.log.notice(f"[{check}] Running check") - def on_check_run(self, check: str) -> None: + def on_check_run(self, check: str) -> Any: """Callback hook called after each check run""" - self._active_check = None + self._active_check = "" if self.exiting: return elif check in self.errors: @@ -213,11 +218,11 @@ def on_check_run(self, check: str) -> None: else: self.log.success(f"[{check}] Check completed successfully") - def on_checks_begin(self) -> None: + def on_checks_begin(self) -> Any: """Callback hook called before all checks""" pass - def on_checks_complete(self) -> int: + def on_checks_complete(self) -> Any: """Callback hook called after all checks have run, and returning the final outcome of a checks_run""" if self.show_summary: self.summary.print_summary() @@ -257,6 +262,21 @@ def warn(self, name: str, warnings: list, log: bool = True) -> None: self.log.warning(f"[{name}] {message}") +class Checker(BaseChecker): + + def on_check_begin(self, check: str) -> None: + super().on_check_begin(check) + + def on_check_run(self, check: str) -> None: + super().on_check_run(check) + + def on_checks_begin(self) -> None: + super().on_checks_complete() + + def on_checks_complete(self) -> int: + return super().on_checks_complete() + + class ForkingChecker(runner.ForkingRunner, Checker): pass @@ -267,7 +287,7 @@ class BazelChecker(runner.BazelRunner, Checker): class CheckerSummary(object): - def __init__(self, checker: Checker): + def __init__(self, checker: BaseChecker): self.checker = checker @property @@ -319,7 +339,7 @@ def _section(self, message: str, lines: list = None) -> list: return section -class AsyncChecker(Checker): +class AsyncChecker(BaseChecker): """Async version of the Checker class for use with asyncio""" async def _run(self) -> int: diff --git a/tools/base/functional.py b/tools/base/functional.py new file mode 100644 index 0000000000000..75772e6e11508 --- /dev/null +++ b/tools/base/functional.py @@ -0,0 +1,68 @@ +# +# Functional utilities +# + +from typing import Any, Callable, Optional + + +class NoCache(Exception): + pass + + +class async_property: # noqa: N801 + name = None + cache_name = "__async_prop_cache__" + _instance = None + + # If the decorator is called with `kwargs` then `fun` is `None` + # and instead `__call__` is triggered with `fun` + def __init__(self, fun: Optional[Callable] = None, cache: bool = False): + self.cache = cache + self._fun = fun + self.name = getattr(fun, "__name__", None) + self.__doc__ = getattr(fun, '__doc__') + + def __call__(self, fun: Callable) -> 'async_property': + self._fun = fun + self.name = self.name or fun.__name__ + self.__doc__ = getattr(fun, '__doc__') + return self + + def __get__(self, instance: Any, cls=None) -> Any: + if instance is None: + return self + self._instance = instance + return self.async_result() + + def fun(self, *args, **kwargs): + if self._fun: + return self._fun(*args, **kwargs) + + @property + def prop_cache(self) -> dict: + return getattr(self._instance, self.cache_name, {}) + + # An async wrapper function to return the result + # This is returned when the prop is called + async def async_result(self) -> Any: + # retrieve the value from cache if available + try: + return self.get_cached_prop() + except (NoCache, KeyError): + pass + + # derive the result, set the cache if required, and return the result + return self.set_prop_cache(await self.fun(self._instance)) + + def get_cached_prop(self) -> Any: + if not self.cache: + raise NoCache + return self.prop_cache[self.name] + + def set_prop_cache(self, result: Any) -> Any: + if not self.cache: + return result + cache = self.prop_cache + cache[self.name] = result + setattr(self._instance, self.cache_name, cache) + return result diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index f7f9ecb473a03..20ae6d1dc142d 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile --generate-hashes tools/base/requirements.txt +# pip-compile --allow-unsafe --generate-hashes tools/base/requirements.txt # colorama==0.4.4 \ --hash=sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b \ @@ -52,8 +52,14 @@ pyyaml==5.4.1 \ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0 - # via -r tools/distribution/requirements.txt + # via -r tools/base/requirements.txt verboselogs==1.7 \ --hash=sha256:d63f23bf568295b95d3530c6864a0b580cec70e7ff974177dead1e4ffbc6ff49 \ --hash=sha256:e33ddedcdfdafcb3a174701150430b11b46ceb64c2a9a26198c76a156568e427 # via -r tools/base/requirements.txt + +# The following packages are considered to be unsafe in a requirements file: +setuptools==57.4.0 \ + --hash=sha256:6bac238ffdf24e8806c61440e755192470352850f3419a52f26ffe0a1a64f465 \ + --hash=sha256:a49230977aa6cfb9d933614d2f7b79036e9945c4cdd7583163f4e920b83418d6 + # via -r tools/base/requirements.txt diff --git a/tools/base/runner.py b/tools/base/runner.py index 5f4d18af6cf6a..bada801b0e253 100644 --- a/tools/base/runner.py +++ b/tools/base/runner.py @@ -3,24 +3,25 @@ # import argparse +import inspect import logging -import os +import pathlib import subprocess import sys from functools import cached_property, wraps -from typing import Callable, Tuple, Optional, Union +from typing import Callable, Optional, Tuple, Type, Union from frozendict import frozendict -import coloredlogs -import verboselogs +import coloredlogs # type:ignore +import verboselogs # type:ignore LOG_LEVELS = (("debug", logging.DEBUG), ("info", logging.INFO), ("warn", logging.WARN), ("error", logging.ERROR)) -LOG_FIELD_STYLES = frozendict( +LOG_FIELD_STYLES: frozendict = frozendict( name=frozendict(color="blue"), levelname=frozendict(color="cyan", bold=True)) LOG_FMT = "%(name)s %(levelname)s %(message)s" -LOG_LEVEL_STYLES = frozendict( +LOG_LEVEL_STYLES: frozendict = frozendict( critical=frozendict(bold=True, color="red"), debug=frozendict(color="green"), error=frozendict(color="red", bold=True), @@ -32,7 +33,7 @@ warning=frozendict(color="yellow", bold=True)) -def catches(errors: Union[Tuple[Exception], Exception]) -> Callable: +def catches(errors: Union[Type[Exception], Tuple[Type[Exception], ...]]) -> Callable: """Method decorator to catch specified errors logs and returns 1 for sys.exit if error/s are caught @@ -48,6 +49,7 @@ def run(self): self.myrun() ``` + Can work with `async` methods too. """ def wrapper(fun: Callable) -> Callable: @@ -60,7 +62,15 @@ def wrapped(self, *args, **kwargs) -> Optional[int]: self.log.error(str(e) or repr(e)) return 1 - return wrapped + @wraps(fun) + async def async_wrapped(self, *args, **kwargs) -> Optional[int]: + try: + return await fun(self, *args, **kwargs) + except errors as e: + self.log.error(str(e) or repr(e)) + return 1 + + return async_wrapped if inspect.iscoroutinefunction(fun) else wrapped return wrapper @@ -103,7 +113,7 @@ def log_level_styles(self): return LOG_LEVEL_STYLES @cached_property - def log(self) -> logging.Logger: + def log(self) -> verboselogs.VerboseLogger: """Instantiated logger""" verboselogs.install() logger = logging.getLogger(self.name) @@ -135,8 +145,8 @@ def parser(self) -> argparse.ArgumentParser: return parser @cached_property - def path(self) -> str: - return os.getcwd() + def path(self) -> pathlib.Path: + return pathlib.Path(".") @cached_property def stdout(self) -> logging.Logger: diff --git a/tools/base/tests/test_checker.py b/tools/base/tests/test_checker.py index 9b18a023187c6..7c29d8cef1454 100644 --- a/tools/base/tests/test_checker.py +++ b/tools/base/tests/test_checker.py @@ -3,7 +3,8 @@ import pytest -from tools.base.checker import AsyncChecker, BazelChecker, Checker, CheckerSummary, ForkingChecker +from tools.base.checker import ( + AsyncChecker, BaseChecker, BazelChecker, Checker, CheckerSummary, ForkingChecker) from tools.base.runner import BazelRunner, ForkingRunner @@ -50,7 +51,7 @@ def test_checker_constructor(): == [('path1', 'path2', 'path3'), {}]) assert checker.summary_class == CheckerSummary - assert checker.active_check is None + assert checker.active_check == "" assert "active_check" not in checker.__dict__ @@ -129,16 +130,16 @@ class DummyError(Exception): pass checker = Checker("path1", "path2", "path3") patched = patches( + "pathlib", ("Checker.args", dict(new_callable=PropertyMock)), ("Checker.parser", dict(new_callable=PropertyMock)), - "os.path.isdir", prefix="tools.base.checker") - with patched as (m_args, m_parser, m_isdir): + with patched as (m_plib, m_args, m_parser): m_parser.return_value.error = DummyError m_args.return_value.path = path m_args.return_value.paths = paths - m_isdir.return_value = isdir + m_plib.Path.return_value.is_dir.return_value = isdir if not path and not paths: with pytest.raises(DummyError) as e: checker.path @@ -152,12 +153,15 @@ class DummyError(Exception): e.value.args == ('Incorrect path: `path` must be a directory, set either as first arg or with --path',)) else: - assert checker.path == path or paths[0] + assert checker.path == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(path or paths[0],), {}]) assert "path" in checker.__dict__ if path or paths: assert ( - list(m_isdir.call_args) - == [(path or paths[0],), {}]) + list(m_plib.Path.return_value.is_dir.call_args) + == [(), {}]) @pytest.mark.parametrize("paths", [[], ["path1", "path2"]]) @@ -345,7 +349,7 @@ def test_checker_add_arguments(patches): 'help': 'Paths to check. At least one path must be specified, or the `path` argument should be provided'}]]) -TEST_ERRORS = ( +TEST_ERRORS: tuple = ( {}, dict(myerror=[]), dict(myerror=["a", "b", "c"]), @@ -416,7 +420,7 @@ def test_checker_exit(patches): == [('exiting', ['Keyboard exit']), {'log_type': 'fatal'}]) -TEST_CHECKS = ( +TEST_CHECKS: tuple = ( None, (), ("check1", ), @@ -477,7 +481,7 @@ def test_checker_on_check_run(patches, errors, warnings, exiting): m_exit.return_value = exiting assert not checker.on_check_run(check) - assert checker.active_check is None + assert checker.active_check == "" if exiting: assert not m_log.called @@ -598,7 +602,7 @@ def test_checker_run(patches, raises): == [(), {}]) -TEST_WARNS = ( +TEST_WARNS: tuple = ( {}, dict(mywarn=[]), dict(mywarn=["a", "b", "c"]), @@ -630,7 +634,7 @@ def test_checker_warn(patches, log, warns): assert not m_log.return_value.warn.called -TEST_SUCCESS = ( +TEST_SUCCESS: tuple = ( {}, dict(mysuccess=[]), dict(mysuccess=["a", "b", "c"]), @@ -702,7 +706,7 @@ def test_checker_summary_print_summary(patches): assert m_status.called -TEST_SECTIONS = ( +TEST_SECTIONS: tuple = ( ("MSG1", ["a", "b", "c"]), ("MSG2", []), ("MSG3", None)) @@ -831,7 +835,7 @@ def test_bazelchecker_constructor(): def test_asynchecker_constructor(): checker = AsyncChecker() - assert isinstance(checker, Checker) + assert isinstance(checker, BaseChecker) @pytest.mark.parametrize("raises", [None, KeyboardInterrupt, Exception]) @@ -840,7 +844,7 @@ def test_asynchecker_run(patches, raises): patched = patches( "asyncio", - "Checker.exit", + "BaseChecker.exit", ("AsyncChecker._run", dict(new_callable=MagicMock)), ("AsyncChecker.on_checks_complete", dict(new_callable=MagicMock)), prefix="tools.base.checker") @@ -892,7 +896,7 @@ def test_asynchecker_run(patches, raises): async def test_asynchecker_on_check_begin(patches): checker = AsyncChecker() patched = patches( - "Checker.on_check_begin", + "BaseChecker.on_check_begin", prefix="tools.base.checker") with patched as (m_super, ): @@ -907,7 +911,7 @@ async def test_asynchecker_on_check_begin(patches): async def test_asynchecker_on_check_run(patches): checker = AsyncChecker() patched = patches( - "Checker.on_check_run", + "BaseChecker.on_check_run", prefix="tools.base.checker") with patched as (m_super, ): @@ -922,7 +926,7 @@ async def test_asynchecker_on_check_run(patches): async def test_asynchecker_on_checks_begin(patches): checker = AsyncChecker() patched = patches( - "Checker.on_checks_begin", + "BaseChecker.on_checks_begin", prefix="tools.base.checker") with patched as (m_super, ): @@ -938,7 +942,7 @@ async def test_asynchecker_on_checks_complete(patches): checker = AsyncChecker() patched = patches( - "Checker.on_checks_complete", + "BaseChecker.on_checks_complete", prefix="tools.base.checker") with patched as (m_complete, ): @@ -976,8 +980,8 @@ class SomeError(Exception): checker = AsyncCheckerWithChecks() patched = patches( - "Checker.log", - "Checker.get_checks", + "BaseChecker.log", + "BaseChecker.get_checks", "AsyncChecker.on_checks_begin", "AsyncChecker.on_check_begin", "AsyncChecker.on_check_run", diff --git a/tools/base/tests/test_functional.py b/tools/base/tests/test_functional.py new file mode 100644 index 0000000000000..d2b87aae7cbb2 --- /dev/null +++ b/tools/base/tests/test_functional.py @@ -0,0 +1,80 @@ +from unittest.mock import AsyncMock + +import pytest + +from tools.base import functional + + +@pytest.mark.asyncio +@pytest.mark.parametrize("cache", [None, True, False]) +@pytest.mark.parametrize("raises", [True, False]) +@pytest.mark.parametrize("result", [None, False, "X", 23]) +async def test_functional_async_property(cache, raises, result): + m_async = AsyncMock(return_value=result) + + class SomeError(Exception): + pass + + if cache is None: + decorator = functional.async_property + else: + decorator = functional.async_property(cache=cache) + + class Klass: + + @decorator + async def prop(self): + """This prop deserves some docs""" + if raises: + await m_async() + raise SomeError("AN ERROR OCCURRED") + else: + return await m_async() + + klass = Klass() + + # The class.prop should be an instance of async_prop + # and should have the name and docs of the wrapped method. + assert isinstance( + type(klass).prop, + functional.async_property) + assert ( + type(klass).prop.__doc__ + == "This prop deserves some docs") + assert ( + type(klass).prop.name + == "prop") + + if raises: + with pytest.raises(SomeError) as e: + await klass.prop + + assert ( + e.value.args[0] + == 'AN ERROR OCCURRED') + assert ( + list(m_async.call_args) + == [(), {}]) + return + + # results can be repeatedly awaited + assert await klass.prop == result + assert await klass.prop == result + + if not cache: + assert ( + list(list(c) for c in m_async.call_args_list) + == [[(), {}], [(), {}]]) + assert not hasattr(klass, functional.async_property.cache_name) + return + + # with cache we can keep awaiting the result but the fun + # is still only called once + assert await klass.prop == result + assert await klass.prop == result + assert ( + list(list(c) for c in m_async.call_args_list) + == [[(), {}]]) + assert ( + getattr(klass, functional.async_property.cache_name) + == dict(prop=m_async.return_value)) diff --git a/tools/base/tests/test_runner.py b/tools/base/tests/test_runner.py index a1e4e4fd434e8..54693ab486e99 100644 --- a/tools/base/tests/test_runner.py +++ b/tools/base/tests/test_runner.py @@ -40,6 +40,10 @@ class Error2(Exception): def _failing_runner(errors): class DummyFailingRunner(object): + # this dummy runner calls the _runner mock + # when its run/run_async methods are called + # and optionally raises some type of error + # to ensure they are caught as expected log = PropertyMock() _runner = MagicMock() @@ -54,9 +58,18 @@ def run(self, *args, **kwargs): raise self.raises("AN ERROR OCCURRED") return result + @runner.catches(errors) + async def run_async(self, *args, **kwargs): + result = self._runner(*args, **kwargs) + if self.raises: + raise self.raises("AN ERROR OCCURRED") + return result + return DummyFailingRunner +@pytest.mark.asyncio +@pytest.mark.parametrize("async_fun", [True, False]) @pytest.mark.parametrize( "errors", [Error1, (Error1, Error2)]) @@ -69,7 +82,7 @@ def run(self, *args, **kwargs): @pytest.mark.parametrize( "kwargs", [{}, dict(key1="VAL1", key2="VAL2")]) -def test_catches(errors, raises, args, kwargs): +async def test_catches(errors, async_fun, raises, args, kwargs): run = _failing_runner(errors)(raises) should_fail = ( raises @@ -81,9 +94,9 @@ def test_catches(errors, raises, args, kwargs): if should_fail: result = 1 with pytest.raises(raises): - run.run(*args, **kwargs) + run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) else: - result = run.run(*args, **kwargs) + result = run.run(*args, **kwargs) if not async_fun else await run.run_async(*args, **kwargs) assert ( list(run._runner.call_args) @@ -234,19 +247,21 @@ def test_runner_parser(patches): assert "parser" in run.__dict__ -def test_checker_path(): +def test_runner_path(patches): run = runner.Runner("path1", "path2", "path3") - cwd_mock = patch("tools.base.runner.os.getcwd") + patched = patches( + "pathlib", + prefix="tools.base.runner") - with cwd_mock as m_cwd: - assert run.path == m_cwd.return_value + with patched as (m_plib, ): + assert run.path == m_plib.Path.return_value assert ( - list(m_cwd.call_args) - == [(), {}]) + list(m_plib.Path.call_args) + == [(".", ), {}]) -def test_checker_stdout(patches): +def test_runner_stdout(patches): run = runner.Runner("path1", "path2", "path3") patched = patches( diff --git a/tools/base/tests/test_utils.py b/tools/base/tests/test_utils.py index bfbde71a2fd47..5ea95efb4ac3a 100644 --- a/tools/base/tests/test_utils.py +++ b/tools/base/tests/test_utils.py @@ -54,7 +54,6 @@ def test_util_buffered_stdout_stderr(): def test_util_buffered_no_stdout_stderr(): - with pytest.raises(utils.BufferUtilError): with utils.buffered(): pass @@ -127,19 +126,19 @@ def test_util_coverage_with_data_file(patches): def test_util_extract(patches, tarballs): patched = patches( "nested", + "pathlib", "tarfile.open", prefix="tools.base.utils") - with patched as (m_nested, m_open): + with patched as (m_nested, m_plib, m_open): _extractions = [MagicMock(), MagicMock()] m_nested.return_value.__enter__.return_value = _extractions if tarballs: - assert utils.extract("PATH", *tarballs) == "PATH" + assert utils.extract("PATH", *tarballs) == m_plib.Path.return_value else: - with pytest.raises(utils.ExtractError) as e: - utils.extract("PATH", *tarballs) == "PATH" + utils.extract("PATH", *tarballs) if not tarballs: assert ( @@ -151,6 +150,10 @@ def test_util_extract(patches, tarballs): assert not _extract.extractall.called return + assert ( + list(m_plib.Path.call_args) + == [("PATH", ), {}]) + for _extract in _extractions: assert ( list(_extract.extractall.call_args) @@ -187,39 +190,39 @@ def test_util_untar(patches, tarballs): def test_util_from_yaml(patches): patched = patches( - "open", + "pathlib", "yaml", prefix="tools.base.utils") - with patched as (m_open, m_yaml): + with patched as (m_plib, m_yaml): assert utils.from_yaml("PATH") == m_yaml.safe_load.return_value assert ( - list(m_open.call_args) + list(m_plib.Path.call_args) == [("PATH", ), {}]) assert ( list(m_yaml.safe_load.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value, ), {}]) + == [(m_plib.Path.return_value.read_text.return_value, ), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) + list(m_plib.Path.return_value.read_text.call_args) == [(), {}]) def test_util_to_yaml(patches): patched = patches( - "open", + "pathlib", "yaml", prefix="tools.base.utils") - with patched as (m_open, m_yaml): - assert utils.to_yaml("DATA", "PATH") == "PATH" + with patched as (m_plib, m_yaml): + assert utils.to_yaml("DATA", "PATH") == m_plib.Path.return_value - assert ( - list(m_open.call_args) - == [("PATH", "w"), {}]) assert ( list(m_yaml.dump.call_args) == [("DATA", ), {}]) assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) + list(m_plib.Path.return_value.write_text.call_args) == [(m_yaml.dump.return_value, ), {}]) + assert ( + list(m_plib.Path.call_args) + == [("PATH", ), {}]) diff --git a/tools/base/utils.py b/tools/base/utils.py index 01813dbb0eea9..cd7526b16271e 100644 --- a/tools/base/utils.py +++ b/tools/base/utils.py @@ -79,8 +79,7 @@ def buffered( stderr.extend(mangle(_stderr.read().strip().split("\n"))) -def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, - str]) -> Union[pathlib.Path, str]: +def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, str]) -> pathlib.Path: if not tarballs: raise ExtractError(f"No tarballs specified for extraction to {path}") openers = nested(*tuple(tarfile.open(tarball) for tarball in tarballs)) @@ -88,11 +87,11 @@ def extract(path: Union[pathlib.Path, str], *tarballs: Union[pathlib.Path, with openers as tarfiles: for tar in tarfiles: tar.extractall(path=path) - return path + return pathlib.Path(path) @contextmanager -def untar(*tarballs: str) -> Iterator[str]: +def untar(*tarballs: Union[pathlib.Path, str]) -> Iterator[pathlib.Path]: """Untar a tarball into a temporary directory for example to list the contents of a tarball: @@ -116,17 +115,16 @@ def untar(*tarballs: str) -> Iterator[str]: yield extract(tmpdir, *tarballs) -def from_yaml(path: str) -> Union[dict, list, str, int]: +def from_yaml(path: Union[pathlib.Path, str]) -> Union[dict, list, str, int]: """Returns the loaded python object from a yaml file given by `path`""" - with open(path) as f: - return yaml.safe_load(f.read()) + return yaml.safe_load(pathlib.Path(path).read_text()) -def to_yaml(data: Union[dict, list, str, int], path: str) -> str: +def to_yaml(data: Union[dict, list, str, int], path: Union[pathlib.Path, str]) -> pathlib.Path: """For given `data` dumps as yaml to provided `path`. Returns `path` """ - with open(path, "w") as f: - f.write(yaml.dump(data)) + path = pathlib.Path(path) + path.write_text(yaml.dump(data)) return path diff --git a/tools/code_format/python_check.py b/tools/code_format/python_check.py index 64f1e296f9560..9fabcfb49bafa 100755 --- a/tools/code_format/python_check.py +++ b/tools/code_format/python_check.py @@ -13,13 +13,15 @@ # python requires: flake8, yapf # -import os +import argparse +import pathlib import sys from functools import cached_property +from typing import Iterable, List, Optional, Tuple -from flake8.main.application import Application as Flake8Application +from flake8.main.application import Application as Flake8Application # type:ignore -import yapf +import yapf # type:ignore from tools.base import checker, utils @@ -34,8 +36,8 @@ class PythonChecker(checker.ForkingChecker): checks = ("flake8", "yapf") @property - def diff_file_path(self) -> str: - return self.args.diff_file + def diff_file_path(self) -> Optional[pathlib.Path]: + return pathlib.Path(self.args.diff_file) if self.args.diff_file else None @cached_property def flake8_app(self) -> Flake8Application: @@ -44,12 +46,12 @@ def flake8_app(self) -> Flake8Application: return flake8_app @property - def flake8_args(self) -> list: - return ["--config", self.flake8_config_path, self.path] + def flake8_args(self) -> Tuple[str, ...]: + return ("--config", str(self.flake8_config_path), str(self.path)) @property - def flake8_config_path(self) -> str: - return os.path.join(self.path, FLAKE8_CONFIG) + def flake8_config_path(self) -> pathlib.Path: + return self.path.joinpath(FLAKE8_CONFIG) @property def recurse(self) -> bool: @@ -57,17 +59,17 @@ def recurse(self) -> bool: return self.args.recurse @property - def yapf_config_path(self) -> str: - return os.path.join(self.path, YAPF_CONFIG) + def yapf_config_path(self) -> pathlib.Path: + return self.path.joinpath(YAPF_CONFIG) @property - def yapf_files(self): + def yapf_files(self) -> List[str]: return yapf.file_resources.GetCommandLineFiles( self.args.paths, recursive=self.recurse, - exclude=yapf.file_resources.GetExcludePatternsForDir(self.path)) + exclude=yapf.file_resources.GetExcludePatternsForDir(str(self.path))) - def add_arguments(self, parser) -> None: + def add_arguments(self, parser: argparse.ArgumentParser) -> None: super().add_arguments(parser) parser.add_argument( "--recurse", @@ -80,7 +82,7 @@ def add_arguments(self, parser) -> None: def check_flake8(self) -> None: """Run flake8 on files and/or repo""" - errors = [] + errors: List[str] = [] with utils.buffered(stdout=errors, mangle=self._strip_lines): self.flake8_app.run_checks() self.flake8_app.report() @@ -99,14 +101,13 @@ def on_check_run(self, check: str) -> None: def on_checks_complete(self) -> int: if self.diff_file_path and self.has_failed: result = self.subproc_run(["git", "diff", "HEAD"]) - with open(self.diff_file_path, "wb") as f: - f.write(result.stdout) + self.diff_file_path.write_bytes(result.stdout) return super().on_checks_complete() def yapf_format(self, python_file: str) -> tuple: return yapf.yapf_api.FormatFile( python_file, - style_config=self.yapf_config_path, + style_config=str(self.yapf_config_path), in_place=self.fix, print_diff=not self.fix) @@ -120,14 +121,14 @@ def yapf_run(self, python_file: str) -> None: return self.warn("yapf", [f"{python_file}: diff\n{reformatted_source}"]) self.error("yapf", [python_file]) - def _strip_line(self, line) -> str: - return line[len(self.path) + 1:] if line.startswith(f"{self.path}/") else line + def _strip_line(self, line: str) -> str: + return line[len(str(self.path)) + 1:] if line.startswith(f"{self.path}/") else line - def _strip_lines(self, lines) -> list: + def _strip_lines(self, lines: Iterable[str]) -> List[str]: return [self._strip_line(line) for line in lines if line] -def main(*args: list) -> None: +def main(*args: str) -> int: return PythonChecker(*args).run() diff --git a/tools/code_format/tests/test_python_check.py b/tools/code_format/tests/test_python_check.py index ab22c8f9089e6..de72bd2c8298e 100644 --- a/tools/code_format/tests/test_python_check.py +++ b/tools/code_format/tests/test_python_check.py @@ -12,12 +12,24 @@ def test_python_checker_constructor(): assert checker.args.paths == ['path1', 'path2', 'path3'] -def test_python_diff_path(): +@pytest.mark.parametrize("diff_path", ["", None, "PATH"]) +def test_python_diff_path(patches, diff_path): checker = python_check.PythonChecker("path1", "path2", "path3") - args_mock = patch("tools.code_format.python_check.PythonChecker.args", new_callable=PropertyMock) + patched = patches( + "pathlib", + ("PythonChecker.args", dict(new_callable=PropertyMock)), + prefix="tools.code_format.python_check") - with args_mock as m_args: - assert checker.diff_file_path == m_args.return_value.diff_file + with patched as (m_plib, m_args): + m_args.return_value.diff_file = diff_path + assert checker.diff_file_path == (m_plib.Path.return_value if diff_path else None) + + if diff_path: + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.diff_file, ), {}]) + else: + assert not m_plib.Path.called def test_python_flake8_app(patches): @@ -48,38 +60,37 @@ def test_python_flake8_args(patches): with patched as (m_flake8_config, m_path): assert ( checker.flake8_args - == ['--config', - m_flake8_config.return_value, m_path.return_value]) + == ('--config', + str(m_flake8_config.return_value), + str(m_path.return_value))) def test_python_flake8_config_path(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( ("PythonChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.code_format.python_check") - with patched as (m_path, m_join): - assert checker.flake8_config_path == m_join.return_value + with patched as (m_path, ): + assert checker.flake8_config_path == m_path.return_value.joinpath.return_value assert ( - list(m_join.call_args) - == [(m_path.return_value, python_check.FLAKE8_CONFIG), {}]) + list(m_path.return_value.joinpath.call_args) + == [(python_check.FLAKE8_CONFIG, ), {}]) def test_python_yapf_config_path(patches): checker = python_check.PythonChecker("path1", "path2", "path3") patched = patches( ("PythonChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.code_format.python_check") - with patched as (m_path, m_join): - assert checker.yapf_config_path == m_join.return_value + with patched as (m_path, ): + assert checker.yapf_config_path == m_path.return_value.joinpath.return_value assert ( - list(m_join.call_args) - == [(m_path.return_value, python_check.YAPF_CONFIG), {}]) + list(m_path.return_value.joinpath.call_args) + == [(python_check.YAPF_CONFIG, ), {}]) def test_python_yapf_files(patches): @@ -102,7 +113,7 @@ def test_python_yapf_files(patches): 'exclude': m_yapf_exclude.return_value}]) assert ( list(m_yapf_exclude.call_args) - == [(m_path.return_value,), {}]) + == [(str(m_path.return_value),), {}]) def test_python_add_arguments(patches): @@ -194,7 +205,7 @@ def test_python_check_yapf(patches): == [[('file1',), {}], [('file2',), {}], [('file3',), {}]]) -TEST_CHECK_RESULTS = ( +TEST_CHECK_RESULTS: tuple = ( ("check1", [], []), ("check1", ["check2", "check3"], ["check4", "check5"]), ("check1", ["check1", "check3"], ["check4", "check5"]), @@ -238,15 +249,15 @@ def test_python_on_checks_complete(patches, results): checker = python_check.PythonChecker("path1", "path2", "path3") diff_path, failed = results patched = patches( - "open", "checker.ForkingChecker.subproc_run", "checker.Checker.on_checks_complete", ("PythonChecker.diff_file_path", dict(new_callable=PropertyMock)), ("PythonChecker.has_failed", dict(new_callable=PropertyMock)), prefix="tools.code_format.python_check") - with patched as (m_open, m_fork, m_super, m_diff, m_failed): - m_diff.return_value = diff_path + with patched as (m_fork, m_super, m_diff, m_failed): + if not diff_path: + m_diff.return_value = None m_failed.return_value = failed assert checker.on_checks_complete() == m_super.return_value @@ -255,14 +266,10 @@ def test_python_on_checks_complete(patches, results): list(m_fork.call_args) == [(['git', 'diff', 'HEAD'],), {}]) assert ( - list(m_open.call_args) - == [(diff_path, 'wb'), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) + list(m_diff.return_value.write_bytes.call_args) == [(m_fork.return_value.stdout,), {}]) else: assert not m_fork.called - assert not m_open.called assert ( list(m_super.call_args) @@ -285,7 +292,7 @@ def test_python_yapf_format(patches, fix): assert ( list(m_format.call_args) == [('FILENAME',), - {'style_config': m_config.return_value, + {'style_config': str(m_config.return_value), 'in_place': fix, 'print_diff': not fix}]) assert ( diff --git a/tools/dependency/pip_check.py b/tools/dependency/pip_check.py index c924c44936549..91a8456fc2854 100755 --- a/tools/dependency/pip_check.py +++ b/tools/dependency/pip_check.py @@ -11,9 +11,9 @@ # ./tools/dependency/pip_check.py -h # -import os import sys from functools import cached_property +from typing import Iterable, Set from tools.base import checker, utils @@ -25,6 +25,10 @@ # - pip-compile formatting +class PipConfigurationError(Exception): + pass + + class PipChecker(checker.Checker): checks = ("dependabot",) _dependabot_config = DEPENDABOT_CONFIG @@ -41,19 +45,22 @@ def config_requirements(self) -> set: @cached_property def dependabot_config(self) -> dict: """Parsed dependabot config""" - return utils.from_yaml(os.path.join(self.path, self.dependabot_config_path)) + result = utils.from_yaml(self.path.joinpath(self.dependabot_config_path)) + if not isinstance(result, dict): + raise PipConfigurationError( + f"Unable to parse dependabot config: {self.dependabot_config_path}") + return result @property def dependabot_config_path(self) -> str: return self._dependabot_config @cached_property - def requirements_dirs(self) -> set: + def requirements_dirs(self) -> Set[str]: """Set of found directories in the repo containing requirements.txt""" return set( - root[len(self.path):] - for root, dirs, files in os.walk(self.path) - if self.requirements_filename in files) + f"/{f.parent.relative_to(self.path)}" for f in self.path.glob("**/*") + if f.name == self.requirements_filename) @property def requirements_filename(self) -> str: @@ -75,12 +82,12 @@ def check_dependabot(self) -> None: missing_config, f"Missing dependabot config for {self.requirements_filename} in dir") - def dependabot_success(self, correct: list) -> None: + def dependabot_success(self, correct: Iterable) -> None: self.succeed( "dependabot", ([f"{self.requirements_filename}: {dirname}" for dirname in sorted(correct)])) - def dependabot_errors(self, missing: list, msg: str) -> None: + def dependabot_errors(self, missing: Iterable, msg: str) -> None: for dirname in sorted(missing): self.error("dependabot", [f"{msg}: {dirname}"]) diff --git a/tools/dependency/tests/test_pip_check.py b/tools/dependency/tests/test_pip_check.py index 1fa7171573015..0c3458626cc89 100644 --- a/tools/dependency/tests/test_pip_check.py +++ b/tools/dependency/tests/test_pip_check.py @@ -1,4 +1,4 @@ -from unittest.mock import patch, PropertyMock +from unittest.mock import MagicMock, patch, PropertyMock import pytest @@ -31,51 +31,67 @@ def test_pip_checker_config_requirements(): == [('updates',), {}]) -def test_pip_checker_dependabot_config(patches): +@pytest.mark.parametrize("isdict", [True, False]) +def test_pip_checker_dependabot_config(patches, isdict): checker = pip_check.PipChecker("path1", "path2", "path3") patched = patches( "utils", ("PipChecker.path", dict(new_callable=PropertyMock)), - "os.path.join", prefix="tools.dependency.pip_check") - with patched as (m_utils, m_path, m_join): - assert checker.dependabot_config == m_utils.from_yaml.return_value + with patched as (m_utils, m_path): + if isdict: + m_utils.from_yaml.return_value = {} + + if isdict: + assert checker.dependabot_config == m_utils.from_yaml.return_value + else: + with pytest.raises(pip_check.PipConfigurationError) as e: + checker.dependabot_config + + assert ( + e.value.args[0] + == f'Unable to parse dependabot config: {checker.dependabot_config_path}') assert ( - list(m_join.call_args) - == [(m_path.return_value, checker._dependabot_config), {}]) + list(m_path.return_value.joinpath.call_args) + == [(checker._dependabot_config, ), {}]) assert ( list(m_utils.from_yaml.call_args) - == [(m_join.return_value,), {}]) + == [(m_path.return_value.joinpath.return_value,), {}]) def test_pip_checker_requirements_dirs(patches): checker = pip_check.PipChecker("path1", "path2", "path3") - - dummy_walker = [ - ["ROOT1", ["DIR1", "DIR2"], ["FILE1", "FILE2", "FILE3"]], - ["ROOT2", ["DIR1", "DIR2"], ["FILE1", "FILE2", "REQUIREMENTS_FILE", "FILE3"]], - ["ROOT3", ["DIR1", "DIR2"], ["FILE1", "FILE2", "REQUIREMENTS_FILE", "FILE3"]], - ["ROOT4", ["DIR1", "DIR2"], ["FILE1", "FILE2", "FILE3"]]] - + dummy_glob = [ + "FILE1", "FILE2", "FILE3", + "REQUIREMENTS_FILE", "FILE4", + "REQUIREMENTS_FILE", "FILE5"] patched = patches( ("PipChecker.requirements_filename", dict(new_callable=PropertyMock)), ("PipChecker.path", dict(new_callable=PropertyMock)), - "os.walk", prefix="tools.dependency.pip_check") + expected = [] - with patched as (m_reqs, m_path, m_walk): + with patched as (m_reqs, m_path): m_reqs.return_value = "REQUIREMENTS_FILE" - m_path.return_value = "ROO" - m_walk.return_value = dummy_walker - assert checker.requirements_dirs == {'T3', 'T2'} + _glob = [] - assert m_reqs.called - assert m_path.called - assert ( - list(m_walk.call_args) - == [('ROO',), {}]) + for fname in dummy_glob: + _mock = MagicMock() + _mock.name = fname + if fname == "REQUIREMENTS_FILE": + expected.append(_mock) + _glob.append(_mock) + + m_path.return_value.glob.return_value = _glob + assert checker.requirements_dirs == {f"/{f.parent.relative_to.return_value}" for f in expected} + + for exp in expected: + assert ( + list(exp.parent.relative_to.call_args) + == [(m_path.return_value,), {}]) + assert "requirements_dirs" in checker.__dict__ TEST_REQS = ( diff --git a/tools/distribution/distrotest.py b/tools/distribution/distrotest.py index 750a27e034537..265c99ba89817 100644 --- a/tools/distribution/distrotest.py +++ b/tools/distribution/distrotest.py @@ -4,9 +4,9 @@ import shutil from functools import cached_property from itertools import chain -from typing import Callable, Iterable, List, Optional, Tuple, Type, Union +from typing import Callable, Iterable, List, Optional, Tuple, Type -import verboselogs +import verboselogs # type:ignore import aiodocker @@ -91,7 +91,10 @@ def config(self) -> dict: This contains build information for different types of test image, and some defaults for specific test configuration. """ - return utils.from_yaml(self.config_path) + result = utils.from_yaml(self.config_path) + if not isinstance(result, dict): + raise ConfigurationError(f"Unable to parse configuration: {self.config_path}") + return result @cached_property def config_path(self) -> pathlib.Path: @@ -149,7 +152,8 @@ def keyfile(self) -> pathlib.Path: Copies the keyfile to the path on first access. """ # Add the keyfile and return the path - return shutil.copyfile(self._keyfile, self.ctx_keyfile) + shutil.copyfile(self._keyfile, self.ctx_keyfile) + return self.ctx_keyfile @cached_property def keyfile_img_path(self) -> pathlib.PurePosixPath: @@ -164,7 +168,8 @@ def packages_dir(self) -> pathlib.Path: Packages are extracted on first access """ - return utils.extract(self.rel_ctx_packages, self.tarball) + utils.extract(self.rel_ctx_packages, self.tarball) + return self.rel_ctx_packages @cached_property def testfile(self) -> pathlib.Path: @@ -173,7 +178,8 @@ def testfile(self) -> pathlib.Path: Copies the testfile to the path on first access. """ # Add the testfile - distrotest.sh - and return the path - return shutil.copyfile(self._testfile, self.ctx_testfile) + shutil.copyfile(self._testfile, self.ctx_testfile) + return self.ctx_testfile @cached_property def testfile_img_path(self) -> pathlib.PurePosixPath: @@ -437,7 +443,7 @@ def __init__( self.distro = name self.build_image = image self.rebuild = rebuild - self._failures = [] + self._failures: List[str] = [] @property def config(self) -> dict: @@ -573,7 +579,7 @@ async def exec(self, container: aiodocker.containers.DockerContainer) -> None: elif _out: self.handle_test_output(_out) - def error(self, errors: Union[list, tuple]) -> int: + def error(self, errors: Optional[Iterable[str]]) -> int: """Fail a test and log the errors""" return self.checker.error(self.checker.active_check, errors) @@ -641,8 +647,9 @@ async def logs(self, container: aiodocker.containers.DockerContainer) -> str: """Return the concatenated container logs, only called if the container fails to start""" return ''.join(await container.log(stdout=True, stderr=True)) - async def on_test_complete(self, container: aiodocker.containers.DockerContainer, - failed: bool) -> Optional[Tuple[str]]: + async def on_test_complete( + self, container: Optional[aiodocker.containers.DockerContainer], + failed: bool) -> Optional[Tuple[str]]: """Stop the container and record the results""" self.log_failures() await self.stop(container) @@ -684,7 +691,7 @@ async def stop(self, container: Optional[aiodocker.containers.DockerContainer] = await container.delete() self.run_log("Container stopped", test=self.package_name) - async def _run(self) -> Optional[Tuple[str]]: + async def _run(self) -> Optional[Tuple[str, ...]]: container = None # As `finally` is always called, regardless of any errors being # raised, we assume that something failed, unless build/start/exec diff --git a/tools/distribution/sign.py b/tools/distribution/sign.py index e9830f0871255..2fa35b882c78a 100644 --- a/tools/distribution/sign.py +++ b/tools/distribution/sign.py @@ -20,16 +20,16 @@ # import argparse -import os +import pathlib import shutil import subprocess import sys import tarfile from functools import cached_property from itertools import chain -from typing import Iterator, Optional, Type +from typing import Iterator, Optional, Tuple, Type, Union -import verboselogs +import verboselogs # type:ignore from tools.base import runner, utils from tools.gpg import identity @@ -55,17 +55,17 @@ class SigningError(Exception): class DirectorySigningUtil(object): """Base class for signing utils - eg for deb or rpm packages""" - command_name = None - _package_type = None - ext = None + command_name = "" + _package_type = "" + ext = "" def __init__( self, - path: str, + path: Union[pathlib.Path, str], maintainer: identity.GPGIdentity, log: verboselogs.VerboseLogger, command: Optional[str] = ""): - self.path = path + self._path = path self.maintainer = maintainer self.log = log self._command = command @@ -87,34 +87,35 @@ def package_type(self) -> str: return self._package_type or self.ext @property - def pkg_files(self) -> tuple: + def path(self) -> pathlib.Path: + return pathlib.Path(self._path) + + @property + def pkg_files(self) -> Tuple[pathlib.Path, ...]: """Tuple of paths to package files to sign""" # TODO?(phlax): check maintainer/packager field matches key id return tuple( - os.path.join(self.path, filename) - for filename in os.listdir(self.path) - if filename.endswith(f".{self.ext}")) + pkg_file for pkg_file in self.path.glob("*") if pkg_file.name.endswith(f".{self.ext}")) def sign(self) -> None: """Sign the packages""" for pkg in self.pkg_files: self.sign_pkg(pkg) - def sign_command(self, pkg_file: str) -> tuple: + def sign_command(self, pkg_file: pathlib.Path) -> tuple: """Tuple of command parts to sign a specific package""" - return (self.command,) + self.command_args + (pkg_file,) + return (self.command,) + self.command_args + (str(pkg_file),) - def sign_pkg(self, pkg_file: str) -> None: + def sign_pkg(self, pkg_file: pathlib.Path) -> None: """Sign a specific package file""" - pkg_name = os.path.basename(pkg_file) - self.log.notice(f"Sign package ({self.package_type}): {pkg_name}") + self.log.notice(f"Sign package ({self.package_type}): {pkg_file.name}") response = subprocess.run( self.sign_command(pkg_file), capture_output=True, encoding="utf-8") if response.returncode: raise SigningError(response.stdout + response.stderr) - self.log.success(f"Signed package ({self.package_type}): {pkg_name}") + self.log.success(f"Signed package ({self.package_type}): {pkg_file.name}") # Runner @@ -161,12 +162,12 @@ def package_type(self) -> str: return self.args.package_type @property - def path(self) -> str: + def path(self) -> pathlib.Path: """Path to the packages directory""" - return self.args.path + return pathlib.Path(self.args.path) @property - def tar(self) -> bool: + def tar(self) -> str: return self.args.tar @cached_property @@ -198,33 +199,32 @@ def add_arguments(self, parser: argparse.ArgumentParser) -> None: default="", help="Maintainer email to match when searching for a GPG key to match with") - def archive(self, path: str) -> None: + def archive(self, path: Union[pathlib.Path, str]) -> None: with tarfile.open(self.tar, "w") as tar: tar.add(path, arcname=".") - def get_signing_util(self, package_type: str, path: str) -> DirectorySigningUtil: - return self.signing_utils[package_type](path, self.maintainer, self.log) + def get_signing_util(self, path: pathlib.Path) -> DirectorySigningUtil: + return self.signing_utils[path.name](path, self.maintainer, self.log) @runner.catches((identity.GPGError, SigningError)) - def run(self) -> Optional[int]: + def run(self) -> None: if self.extract: self.sign_tarball() else: self.sign_directory() self.log.success("Successfully signed packages") - def sign(self, package_type: str, path: str) -> None: - self.log.notice(f"Signing {package_type}s ({self.maintainer}) {path}") - self.get_signing_util(package_type, path).sign() + def sign(self, path: pathlib.Path) -> None: + self.log.notice(f"Signing {path.name}s ({self.maintainer}) {str(path)}") + self.get_signing_util(path).sign() - def sign_all(self, path: str) -> None: - for package_type in os.listdir(path): - if package_type in self.signing_utils: - target = os.path.join(path, package_type) - self.sign(package_type, target) + def sign_all(self, path: pathlib.Path) -> None: + for directory in path.glob("*"): + if directory.name in self.signing_utils: + self.sign(directory) def sign_directory(self) -> None: - self.sign(self.package_type, self.path) + self.sign(self.path) if self.tar: self.archive(self.path) @@ -244,20 +244,24 @@ class RPMMacro(object): _macro_filename = ".rpmmacros" - def __init__(self, home: str, overwrite: bool = False, **kwargs): - self.home = home + def __init__(self, home: Union[pathlib.Path, str], overwrite: bool = False, **kwargs): + self._home = home self.overwrite = bool(overwrite) self.kwargs = kwargs @property - def path(self) -> str: - return os.path.join(self.home, self._macro_filename) + def home(self) -> pathlib.Path: + return pathlib.Path(self._home) + + @property + def path(self) -> pathlib.Path: + return self.home.joinpath(self._macro_filename) @property def macro(self) -> str: macro = self.template for k, v in self.kwargs.items(): - macro = macro.replace(f"__{k.upper()}__", v) + macro = macro.replace(f"__{k.upper()}__", str(v)) return macro @property @@ -265,10 +269,9 @@ def template(self) -> str: return RPMMACRO_TEMPLATE def write(self) -> None: - if not self.overwrite and os.path.exists(self.path): + if not self.overwrite and self.path.exists(): return - with open(self.path, "w") as f: - f.write(self.macro) + self.path.write_text(self.macro) class RPMSigningUtil(DirectorySigningUtil): @@ -283,7 +286,7 @@ def __init__(self, *args, **kwargs): @cached_property def command(self) -> str: - if not os.path.basename(self.maintainer.gpg_bin) == "gpg2": + if not (self.maintainer.gpg_bin and self.maintainer.gpg_bin.name == "gpg2"): raise SigningError("GPG2 is required to sign RPM packages") return super().command @@ -303,8 +306,8 @@ def setup(self) -> None: gpg_bin=self.maintainer.gpg_bin, gpg_config=self.maintainer.gnupg_home).write() - def sign_pkg(self, pkg_file: str) -> None: - os.chmod(pkg_file, 0o755) + def sign_pkg(self, pkg_file: pathlib.Path) -> None: + pkg_file.chmod(0o755) super().sign_pkg(pkg_file) @@ -333,13 +336,13 @@ class DebChangesFiles(object): def __init__(self, src): self.src = src - def __iter__(self) -> Iterator[str]: + def __iter__(self) -> Iterator[pathlib.Path]: """Iterate the required changes files, creating them, yielding the paths of the newly created files, and deleting the original """ for path in self.files: yield path - os.unlink(self.src) + self.src.unlink() @cached_property def distributions(self) -> str: @@ -354,22 +357,20 @@ def distributions(self) -> str: raise SigningError(f"Did not find Distribution field in changes file {self.src}") @property - def files(self) -> Iterator[str]: + def files(self) -> Iterator[pathlib.Path]: """Create changes files for each distro, yielding the paths""" for distro in self.distributions.split(): yield self.changes_file(distro) - def changes_file(self, distro: str) -> str: + def changes_file(self, distro: str) -> pathlib.Path: """Create a `changes` file for a specific distro""" target = self.changes_file_path(distro) - with open(target, "w") as df: - with open(self.src) as f: - df.write(f.read().replace(self.distributions, distro)) + target.write_text(self.src.read_text().replace(self.distributions, distro)) return target - def changes_file_path(self, distro: str) -> str: + def changes_file_path(self, distro: str) -> pathlib.Path: """Path to write the new changes file to""" - return ".".join([os.path.splitext(self.src)[0], distro, "changes"]) + return self.src.with_suffix(f".{distro}.changes") class DebSigningUtil(DirectorySigningUtil): diff --git a/tools/distribution/tests/test_distrotest.py b/tools/distribution/tests/test_distrotest.py index 77072ca165d62..0ad033ddd99d9 100644 --- a/tools/distribution/tests/test_distrotest.py +++ b/tools/distribution/tests/test_distrotest.py @@ -46,7 +46,8 @@ def test_config_dunder_getitem(patches): # props -def test_config_config(patches): +@pytest.mark.parametrize("isdict", [True, False]) +def test_config_config(patches, isdict): config = distrotest.DistroTestConfig( "DOCKER", "KEYFILE", "PATH", "TARBALL", "TESTFILE", "MAINTAINER", "VERSION") patched = patches( @@ -55,12 +56,24 @@ def test_config_config(patches): prefix="tools.distribution.distrotest") with patched as (m_utils, m_path): - assert config.config == m_utils.from_yaml.return_value + if isdict: + m_utils.from_yaml.return_value = {} + + if isdict: + assert config.config == m_utils.from_yaml.return_value + else: + with pytest.raises(distrotest.ConfigurationError) as e: + config.config assert ( list(m_utils.from_yaml.call_args) == [(m_path.return_value,), {}]) - assert "config" in config.__dict__ + if isdict: + assert "config" in config.__dict__ + else: + assert ( + e.value.args[0] + == f"Unable to parse configuration: {m_path.return_value}") @pytest.mark.parametrize("config_path", [None, "CONFIG_PATH"]) @@ -173,7 +186,7 @@ def test_config_keyfile(patches): prefix="tools.distribution.distrotest") with patched as (m_shutil, m_key): - assert config.keyfile == m_shutil.copyfile.return_value + assert config.keyfile == m_key.return_value assert ( list(m_shutil.copyfile.call_args) @@ -206,7 +219,7 @@ def test_config_packages_dir(patches): prefix="tools.distribution.distrotest") with patched as (m_utils, m_packages): - assert config.packages_dir == m_utils.extract.return_value + assert config.packages_dir == m_packages.return_value assert ( list(m_utils.extract.call_args) @@ -223,7 +236,7 @@ def test_config_testfile(patches): prefix="tools.distribution.distrotest") with patched as (m_shutil, m_key): - assert config.testfile == m_shutil.copyfile.return_value + assert config.testfile == m_key.return_value assert ( list(m_shutil.copyfile.call_args) @@ -301,7 +314,6 @@ def test_config_get_package_type(patches, pkg_type, pkg_types): if pkg_type in pkg_types: assert config.get_package_type("IMAGE") == "Y" else: - with pytest.raises(distrotest.ConfigurationError) as e: config.get_package_type("IMAGE") @@ -1355,7 +1367,6 @@ async def test_distrotest_start(patches, running): if running: assert await dtest.start() == m_create.return_value else: - with pytest.raises(distrotest.ContainerError) as e: await dtest.start() diff --git a/tools/distribution/tests/test_sign.py b/tools/distribution/tests/test_sign.py index e2c80d8af5b36..8cc367e9be41c 100644 --- a/tools/distribution/tests/test_sign.py +++ b/tools/distribution/tests/test_sign.py @@ -18,7 +18,7 @@ def test_util_constructor(command): if command is not None: args += (command, ) util = sign.DirectorySigningUtil(*args) - assert util.path == "PATH" + assert util._path == "PATH" assert util.maintainer == maintainer assert util.log == "LOG" assert util._command == (command or "") @@ -48,10 +48,10 @@ def test_util_command(patches, command_name, command, which): assert ( list(m_shutil.which.call_args) - == [(command_name,), {}]) + == [(command_name or "",), {}]) assert ( e.value.args[0] - == f"Signing software missing ({m_type.return_value}): {command_name}") + == f"Signing software missing ({m_type.return_value}): {command_name or ''}") return result = util.command @@ -66,7 +66,7 @@ def test_util_command(patches, command_name, command, which): assert ( list(m_shutil.which.call_args) - == [(command_name,), {}]) + == [(command_name or "",), {}]) assert result == m_shutil.which.return_value @@ -111,33 +111,29 @@ def test_util_sign_pkg(patches, returncode): packager = sign.PackageSigningRunner("x", "y", "z") maintainer = identity.GPGIdentity(packager) util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + util.log = MagicMock() + pkg_file = MagicMock() patched = patches( - "os", "subprocess", "DirectorySigningUtil.sign_command", ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), ("DirectorySigningUtil.package_type", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - util.log = MagicMock() - - with patched as (m_os, m_subproc, m_command, m_log, m_type): + with patched as (m_subproc, m_command, m_log, m_type): m_subproc.run.return_value.returncode = returncode if returncode: with pytest.raises(sign.SigningError) as e: - util.sign_pkg("PACKAGE") + util.sign_pkg(pkg_file) else: - assert not util.sign_pkg("PACKAGE") + assert not util.sign_pkg(pkg_file) - assert ( - list(m_os.path.basename.call_args) - == [('PACKAGE',), {}]) assert ( list(util.log.notice.call_args) - == [(f"Sign package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) + == [(f"Sign package ({m_type.return_value}): {pkg_file.name}",), {}]) assert ( list(m_command.call_args) - == [('PACKAGE',), {}]) + == [(pkg_file,), {}]) assert ( list(m_subproc.run.call_args) == [(m_command.return_value,), @@ -147,7 +143,7 @@ def test_util_sign_pkg(patches, returncode): if not returncode: assert ( list(util.log.success.call_args) - == [(f"Signed package ({m_type.return_value}): {m_os.path.basename.return_value}",), {}]) + == [(f"Signed package ({m_type.return_value}): {pkg_file.name}",), {}]) return assert e.value.args[0] == m_subproc.run.return_value.stdout + m_subproc.run.return_value.stderr @@ -163,6 +159,21 @@ def test_util_package_type(ext, package_type): assert util.package_type == package_type or ext +def test_util_path(patches): + packager = sign.PackageSigningRunner("x", "y", "z") + maintainer = identity.GPGIdentity(packager) + util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") + patched = patches( + "pathlib", + prefix="tools.distribution.sign") + with patched as (m_plib, ): + assert util.path == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(util._path,), {}]) + + @pytest.mark.parametrize( "files", [[], @@ -174,34 +185,30 @@ def test_util_pkg_files(patches, files): maintainer = identity.GPGIdentity(packager) util = sign.DirectorySigningUtil("PATH", maintainer, "LOG") patched = patches( - "os", ("DirectorySigningUtil.ext", dict(new_callable=PropertyMock)), + ("DirectorySigningUtil.path", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_os, m_ext): + with patched as (m_ext, m_path): + _glob = {} + + for _path in files: + _mock = MagicMock() + _mock.name = _path + _glob[_path] = _mock + m_path.return_value.glob.return_value = _glob.values() + m_ext.return_value = "EXT" - m_os.listdir.return_value = files result = util.pkg_files expected = [fname for fname in files if fname.endswith(".EXT")] assert ( - list(m_os.listdir.call_args) - == [("PATH",), {}]) - if not expected: - assert not m_os.path.join.called - assert not result - else: - assert ( - result - == tuple( - m_os.path.join.return_value - for fname in expected)) - assert ( - list(list(c) for c in m_os.path.join.call_args_list) - == [[("PATH", fname), {}] - for fname in expected]) - + list(m_path.return_value.glob.call_args) + == [("*",), {}]) assert "pkg_files" not in util.__dict__ + assert ( + result + == tuple(_glob[k] for k in expected)) # PackageSigningRunner @@ -305,14 +312,17 @@ def test_packager_package_type(patches): def test_packager_path(patches): packager = sign.PackageSigningRunner("x", "y", "z") - patched = patches( + "pathlib", ("PackageSigningRunner.args", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_args, ): - assert packager.path == m_args.return_value.path + with patched as (m_plib, m_args): + assert packager.path == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.path, ), {}]) assert "path" not in packager.__dict__ @@ -390,16 +400,17 @@ def test_packager_get_signing_util(patches): ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") + path = MagicMock() with patched as (m_log, m_maintainer, m_utils): - assert packager.get_signing_util("UTIL", "PATH") == m_utils.return_value.__getitem__.return_value.return_value + assert packager.get_signing_util(path) == m_utils.return_value.__getitem__.return_value.return_value assert ( list(m_utils.return_value.__getitem__.call_args) - == [("UTIL",), {}]) + == [(path.name,), {}]) assert ( list(m_utils.return_value.__getitem__.return_value.call_args) - == [("PATH", m_maintainer.return_value, m_log.return_value), {}]) + == [(path, m_maintainer.return_value, m_log.return_value), {}]) @pytest.mark.parametrize("extract", [True, False]) @@ -460,16 +471,17 @@ def test_packager_sign(patches): ("PackageSigningRunner.log", dict(new_callable=PropertyMock)), ("PackageSigningRunner.maintainer", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") + path = MagicMock() with patched as (m_util, m_log, m_maintainer): - assert not packager.sign("PACKAGE_TYPE", "PATH") + assert not packager.sign(path) assert ( list(m_log.return_value.notice.call_args) - == [(f"Signing PACKAGE_TYPEs ({m_maintainer.return_value}) PATH",), {}]) + == [(f"Signing {path.name}s ({m_maintainer.return_value}) {path}",), {}]) assert ( list(m_util.call_args) - == [('PACKAGE_TYPE', 'PATH'), {}]) + == [(path, ), {}]) assert ( list(m_util.return_value.sign.call_args) == [(), {}]) @@ -480,25 +492,29 @@ def test_packager_sign(patches): def test_packager_sign_all(patches, listdir, utils): packager = sign.PackageSigningRunner("x", "y", "z") patched = patches( - "os", "PackageSigningRunner.sign", ("PackageSigningRunner.signing_utils", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") + path = MagicMock() + + with patched as (m_sign, m_utils): + _glob = {} - with patched as (m_os, m_sign, m_utils): - m_os.listdir.return_value = listdir + for _path in listdir: + _mock = MagicMock() + _mock.name = _path + _glob[_path] = _mock + path.glob.return_value = _glob.values() m_utils.return_value = utils - assert not packager.sign_all("PATH") + assert not packager.sign_all(path) + assert ( - list(m_os.listdir.call_args) - == [('PATH',), {}]) + list(path.glob.call_args) + == [('*',), {}]) expected = [x for x in listdir if x in utils] - assert ( - list(list(c) for c in m_os.path.join.call_args_list) - == [[('PATH', k), {}] for k in expected]) assert ( list(list(c) for c in m_sign.call_args_list) - == [[(k, m_os.path.join.return_value), {}] for k in expected]) + == [[(_glob[k], ), {}] for k in expected]) @pytest.mark.parametrize("tar", [True, False]) @@ -507,18 +523,17 @@ def test_packager_sign_directory(patches, tar): patched = patches( "PackageSigningRunner.archive", "PackageSigningRunner.sign", - ("PackageSigningRunner.package_type", dict(new_callable=PropertyMock)), ("PackageSigningRunner.path", dict(new_callable=PropertyMock)), ("PackageSigningRunner.tar", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_archive, m_sign, m_type, m_path, m_tar): + with patched as (m_archive, m_sign, m_path, m_tar): m_tar.return_value = tar assert not packager.sign_directory() assert ( list(m_sign.call_args) - == [(m_type.return_value, m_path.return_value), {}]) + == [(m_path.return_value, ), {}]) if not tar: assert not m_archive.called return @@ -577,23 +592,36 @@ def test_rpmmacro_constructor(patches, overwrite, kwargs): if overwrite != [] else sign.RPMMacro("HOME", **kwargs)) assert rpmmacro._macro_filename == ".rpmmacros" - assert rpmmacro.home == "HOME" + assert rpmmacro._home == "HOME" assert rpmmacro.overwrite == bool(overwrite or False) assert rpmmacro.kwargs == kwargs assert rpmmacro.template == sign.RPMMACRO_TEMPLATE +def test_rpmmacro_home(patches): + rpmmacro = sign.RPMMacro("HOME") + patched = patches( + "pathlib", + prefix="tools.distribution.sign") + with patched as (m_plib, ): + assert rpmmacro.home == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(rpmmacro._home,), {}]) + + def test_rpmmacro_path(patches): rpmmacro = sign.RPMMacro("HOME") patched = patches( - "os", + ("RPMMacro.home", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_os, ): - assert rpmmacro.path == m_os.path.join.return_value + with patched as (m_home, ): + assert rpmmacro.path == m_home.return_value.joinpath.return_value assert ( - list(m_os.path.join.call_args) - == [('HOME', rpmmacro._macro_filename), {}]) + list(m_home.return_value.joinpath.call_args) + == [(rpmmacro._macro_filename, ), {}]) @pytest.mark.parametrize("kwargs", [{}, dict(K1="V1", K2="V2")]) @@ -621,34 +649,28 @@ def test_rpmmacro_macro(patches, kwargs): def test_rpmmacro_write(patches, overwrite, exists): rpmmacro = sign.RPMMacro("HOME") patched = patches( - "open", - "os", ("RPMMacro.macro", dict(new_callable=PropertyMock)), ("RPMMacro.path", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") rpmmacro.overwrite = overwrite - with patched as (m_open, m_os, m_macro, m_path): - m_os.path.exists.return_value = exists + with patched as (m_macro, m_path): + m_path.return_value.exists.return_value = exists assert not rpmmacro.write() if not overwrite: assert ( - list(m_os.path.exists.call_args) - == [(m_path.return_value,), {}]) + list(m_path.return_value.exists.call_args) + == [(), {}]) else: - assert not m_os.path.join.called - assert not m_os.exists.join.called + assert not m_path.return_value.exists.join.called if not overwrite and exists: - assert not m_open.called + assert not m_path.return_value.write_text.called return assert ( - list(m_open.call_args) - == [(m_path.return_value, 'w'), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) + list(m_path.return_value.write_text.call_args) == [(m_macro.return_value,), {}]) @@ -683,14 +705,13 @@ def test_rpmsign_constructor(patches, args, kwargs): def test_rpmsign_command(patches, gpg2): maintainer = identity.GPGIdentity() patched = patches( - "os", "RPMSigningUtil.__init__", ("DirectorySigningUtil.command", dict(new_callable=PropertyMock)), ("identity.GPGIdentity.gpg_bin", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_os, m_init, m_super, m_gpg): - m_os.path.basename.return_value = "gpg2" if gpg2 else "notgpg2" + with patched as (m_init, m_super, m_gpg): + m_gpg.return_value.name = "gpg2" if gpg2 else "notgpg2" m_init.return_value = None rpmsign = sign.RPMSigningUtil("PATH", maintainer, "LOG") rpmsign.maintainer = maintainer @@ -705,9 +726,6 @@ def test_rpmsign_command(patches, gpg2): e.value.args[0] == 'GPG2 is required to sign RPM packages') - assert ( - list(m_os.path.basename.call_args) - == [(m_gpg.return_value,), {}]) if gpg2: assert "command" in rpmsign.__dict__ else: @@ -735,7 +753,7 @@ def test_rpmsign_command_args(patches): class DummyRPMSigningUtil(sign.RPMSigningUtil): def __init__(self, path, maintainer): - self.path = path + self._path = path self.maintainer = maintainer @@ -765,19 +783,19 @@ def test_rpmsign_sign_pkg(patches): maintainer = identity.GPGIdentity(packager) rpmsign = DummyRPMSigningUtil("PATH", maintainer) patched = patches( - "os", "DirectorySigningUtil.sign_pkg", prefix="tools.distribution.sign") + file = MagicMock() - with patched as (m_os, m_sign): - assert not rpmsign.sign_pkg("FILE") + with patched as (m_sign, ): + assert not rpmsign.sign_pkg(file) assert ( - list(m_os.chmod.call_args) - == [('FILE', 0o755), {}]) + list(file.chmod.call_args) + == [(0o755, ), {}]) assert ( list(m_sign.call_args) - == [('FILE',), {}]) + == [(file,), {}]) # DebChangesFiles @@ -788,23 +806,23 @@ def test_changes_constructor(): def test_changes_dunder_iter(patches): - changes = sign.DebChangesFiles("SRC") + path = MagicMock() + changes = sign.DebChangesFiles(path) patched = patches( - "os", ("DebChangesFiles.files", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") _files = ["FILE1", "FILE2", "FILE3"] - with patched as (m_os, m_files): + with patched as (m_files, ): m_files.return_value = _files result = changes.__iter__() assert list(result) == _files assert isinstance(result, types.GeneratorType) assert ( - list(m_os.unlink.call_args) - == [('SRC',), {}]) + list(path.unlink.call_args) + == [(), {}]) @pytest.mark.parametrize( @@ -885,14 +903,14 @@ def test_changes_files(patches): def test_changes_changes_file(patches): - changes = sign.DebChangesFiles("SRC") + path = MagicMock() + changes = sign.DebChangesFiles(path) patched = patches( - "open", "DebChangesFiles.changes_file_path", ("DebChangesFiles.distributions", dict(new_callable=PropertyMock)), prefix="tools.distribution.sign") - with patched as (m_open, m_path, m_distros): + with patched as (m_path, m_distros): assert ( changes.changes_file("DISTRO") == m_path.return_value) @@ -901,29 +919,23 @@ def test_changes_changes_file(patches): list(m_path.call_args) == [('DISTRO',), {}]) assert ( - list(list(c) for c in m_open.call_args_list) - == [[(m_path.return_value, 'w'), {}], - [('SRC',), {}]]) - assert ( - list(m_open.return_value.__enter__.return_value.write.call_args) - == [(m_open.return_value.__enter__.return_value.read.return_value.replace.return_value,), {}]) + list(m_path.return_value.write_text.call_args) + == [(path.read_text.return_value.replace.return_value,), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) + list(path.read_text.call_args) == [(), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.return_value.replace.call_args) - == [(m_distros.return_value, 'DISTRO'), {}]) + list(path.read_text.return_value.replace.call_args) + == [(m_distros.return_value, "DISTRO"), {}]) -@pytest.mark.parametrize( - "path", - [("SRC", "SRC.DISTRO.changes"), - ("SRC.changes", "SRC.DISTRO.changes"), - ("SRC.FOO.BAR.changes", "SRC.FOO.BAR.DISTRO.changes")]) -def test_changes_file_path(path): - path, expected = path +def test_changes_file_path(): + path = MagicMock() changes = sign.DebChangesFiles(path) - assert changes.changes_file_path("DISTRO") == expected + assert changes.changes_file_path("DISTRO") == path.with_suffix.return_value + assert ( + list(path.with_suffix.call_args) + == [('.DISTRO.changes',), {}]) # DebSigningUtil @@ -939,7 +951,7 @@ def test_debsign_constructor(patches, args): assert debsign.command_name == "debsign" assert debsign._package_type == "deb" assert debsign.changes_files == sign.DebChangesFiles - assert debsign.path == "PATH" + assert debsign._path == "PATH" assert debsign.maintainer == maintainer assert debsign.log == "LOG" diff --git a/tools/docker/utils.py b/tools/docker/utils.py index 93ca70592040b..b28cce2876f2d 100644 --- a/tools/docker/utils.py +++ b/tools/docker/utils.py @@ -1,7 +1,7 @@ import tarfile import tempfile from contextlib import asynccontextmanager -from typing import Callable, Iterator, Optional +from typing import AsyncIterator, Callable, Optional import aiodocker @@ -11,7 +11,7 @@ class BuildError(Exception): async def _build_image( - tar: tempfile.NamedTemporaryFile, + tar, #: IO[bytes] (`docker.images.build` expects `BinaryIO`) docker: aiodocker.Docker, context: str, tag: str, @@ -75,7 +75,7 @@ async def myimage(): @asynccontextmanager -async def docker_client(url: Optional[str] = "") -> Iterator[aiodocker.Docker]: +async def docker_client(url: Optional[str] = "") -> AsyncIterator[aiodocker.Docker]: """Aiodocker client For example to dump the docker image data: diff --git a/tools/docs/sphinx_runner.py b/tools/docs/sphinx_runner.py index cd211bdf1a996..1baf597f839ab 100644 --- a/tools/docs/sphinx_runner.py +++ b/tools/docs/sphinx_runner.py @@ -1,15 +1,17 @@ import argparse import os +import pathlib import platform import re import sys import tarfile import tempfile from functools import cached_property +from typing import Tuple -from colorama import Fore, Style +from colorama import Fore, Style # type:ignore -from sphinx.cmd.build import main as sphinx_build +from sphinx.cmd.build import main as sphinx_build # type:ignore from tools.base import runner, utils @@ -32,9 +34,9 @@ def blob_sha(self) -> str: return self.docs_tag or self.build_sha @property - def build_dir(self) -> str: + def build_dir(self) -> pathlib.Path: """Returns current build_dir - most likely a temp directory""" - return self._build_dir + return pathlib.Path(self.tempdir.name) @property def build_sha(self) -> str: @@ -47,17 +49,17 @@ def colors(self) -> dict: return dict(chrome=Fore.LIGHTYELLOW_EX, key=Fore.LIGHTCYAN_EX, value=Fore.LIGHTMAGENTA_EX) @cached_property - def config_file(self) -> str: + def config_file(self) -> pathlib.Path: """Populates a config file with self.configs and returns the file path""" return utils.to_yaml(self.configs, self.config_file_path) @property - def config_file_path(self) -> str: + def config_file_path(self) -> pathlib.Path: """Path to a (temporary) build config""" - return os.path.join(self.build_dir, "build.yaml") + return self.build_dir.joinpath("build.yaml") @cached_property - def configs(self) -> str: + def configs(self) -> dict: """Build configs derived from provided args""" _configs = dict( version_string=self.version_string, @@ -66,15 +68,15 @@ def configs(self) -> str: version_number=self.version_number, docker_image_tag_name=self.docker_image_tag_name) if self.validator_path: - _configs["validator_path"] = self.validator_path + _configs["validator_path"] = str(self.validator_path) if self.descriptor_path: - _configs["descriptor_path"] = self.descriptor_path + _configs["descriptor_path"] = str(self.descriptor_path) return _configs @property - def descriptor_path(self) -> str: + def descriptor_path(self) -> pathlib.Path: """Path to a descriptor file for config validation""" - return os.path.abspath(self.args.descriptor_path) + return pathlib.Path(self.args.descriptor_path) @property def docker_image_tag_name(self) -> str: @@ -87,14 +89,14 @@ def docs_tag(self) -> str: return self.args.docs_tag @cached_property - def html_dir(self) -> str: + def html_dir(self) -> pathlib.Path: """Path to (temporary) directory for outputting html""" - return os.path.join(self.build_dir, "generated/html") + return self.build_dir.joinpath("generated", "html") @property - def output_filename(self) -> str: + def output_filename(self) -> pathlib.Path: """Path to tar file for saving generated html docs""" - return self.args.output_filename + return pathlib.Path(self.args.output_filename) @property def py_compatible(self) -> bool: @@ -107,40 +109,44 @@ def release_level(self) -> str: return "tagged" if self.docs_tag else "pre-release" @cached_property - def rst_dir(self) -> str: + def rst_dir(self) -> pathlib.Path: """Populates an rst directory with contents of given rst tar, and returns the path to the directory """ - rst_dir = os.path.join(self.build_dir, "generated/rst") + rst_dir = self.build_dir.joinpath("generated", "rst") if self.rst_tar: utils.extract(rst_dir, self.rst_tar) return rst_dir @property - def rst_tar(self) -> str: + def rst_tar(self) -> pathlib.Path: """Path to the rst tarball""" - return self.args.rst_tar + return pathlib.Path(self.args.rst_tar) @property - def sphinx_args(self) -> list: + def sphinx_args(self) -> Tuple[str, ...]: """Command args for sphinx""" - return ["-W", "--keep-going", "--color", "-b", "html", self.rst_dir, self.html_dir] + return ( + "-W", "--keep-going", "--color", "-b", "html", str(self.rst_dir), str(self.html_dir)) + + @cached_property + def tempdir(self) -> tempfile.TemporaryDirectory: + return tempfile.TemporaryDirectory() @property - def validator_path(self) -> str: + def validator_path(self) -> pathlib.Path: """Path to validator utility for validating snippets""" - return os.path.abspath(self.args.validator_path) + return pathlib.Path(self.args.validator_path) @property - def version_file(self) -> str: + def version_file(self) -> pathlib.Path: """Path to version files for deriving docs version""" - return self.args.version_file + return pathlib.Path(self.args.version_file) @cached_property def version_number(self) -> str: """Semantic version""" - with open(self.version_file) as f: - return f.read().strip() + return self.version_file.read_text().strip() @property def version_string(self) -> str: @@ -182,25 +188,32 @@ def check_env(self) -> None: raise SphinxEnvError( "Given git tag does not match the VERSION file content:" f"{self.docs_tag} vs v{self.version_number}") - with open(os.path.join(self.rst_dir, "version_history/current.rst")) as f: - if not self.version_number in f.read(): - raise SphinxEnvError( - f"Git tag ({self.version_number}) not found in version_history/current.rst") + # this should probs only check the first line + version_current = self.rst_dir.joinpath("version_history", "current.rst").read_text() + if not self.version_number in version_current: + raise SphinxEnvError( + f"Git tag ({self.version_number}) not found in version_history/current.rst") + + def cleanup(self): + if "tempdir" in self.__dict__: + self.tempdir.cleanup() + del self.__dict__["tempdir"] def create_tarball(self) -> None: with tarfile.open(self.output_filename, "w") as tar: tar.add(self.html_dir, arcname=".") def run(self) -> int: - with tempfile.TemporaryDirectory() as build_dir: - return self._run(build_dir) + try: + return self._run() + finally: + self.cleanup() def _color(self, msg, name=None): return f"{self.colors[name or 'chrome']}{msg}{Style.RESET_ALL}" - def _run(self, build_dir): - self._build_dir = build_dir - os.environ["ENVOY_DOCS_BUILD_CONFIG"] = self.config_file + def _run(self): + os.environ["ENVOY_DOCS_BUILD_CONFIG"] = str(self.config_file) try: self.check_env() except SphinxEnvError as e: diff --git a/tools/docs/tests/test_sphinx_runner.py b/tools/docs/tests/test_sphinx_runner.py index 9e2f1df491aab..86a16b1eddc2d 100644 --- a/tools/docs/tests/test_sphinx_runner.py +++ b/tools/docs/tests/test_sphinx_runner.py @@ -7,9 +7,6 @@ def test_sphinx_runner_constructor(): runner = sphinx_runner.SphinxRunner() - assert runner.build_dir == "." - runner._build_dir = "foo" - assert runner.build_dir == "foo" assert runner._build_sha == "UNKNOWN" assert "blob_dir" not in runner.__dict__ @@ -31,6 +28,22 @@ def test_sphinx_runner_blob_sha(patches, docs_tag): assert "blob_sha" not in runner.__dict__ +def test_sphinx_runner_build_dir(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "pathlib", + ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_plib, m_temp): + assert runner.build_dir == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(m_temp.return_value.name, ), {}]) + assert "build_dir" not in runner.__dict__ + + @pytest.mark.parametrize("build_sha", [None, "", "SOME_BUILD_SHA"]) def test_sphinx_runner_build_sha(patches, build_sha): runner = sphinx_runner.SphinxRunner() @@ -87,16 +100,15 @@ def test_sphinx_runner_config_file(patches): def test_sphinx_runner_config_file_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_build): - assert runner.config_file_path == m_path.join.return_value + with patched as (m_build, ): + assert runner.config_file_path == m_build.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_build.return_value, 'build.yaml',), {}]) + list(m_build.return_value.joinpath.call_args) + == [('build.yaml',), {}]) assert "config_file_path" not in runner.__dict__ @@ -120,7 +132,10 @@ def test_sphinx_runner_configs(patches): _configs = {} for k, v in mapping.items(): - _configs[k] = _mocks[list(mapping.values()).index(v)] + _v = _mocks[list(mapping.values()).index(v)] + if k in ["validator_path", "descriptor_path"]: + _v = str(_v) + _configs[k] = _v assert result == _configs assert "configs" in runner.__dict__ @@ -128,17 +143,17 @@ def test_sphinx_runner_configs(patches): def test_sphinx_runner_descriptor_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_args): + with patched as (m_plib, m_args): assert ( runner.descriptor_path - == m_path.abspath.return_value) + == m_plib.Path.return_value) assert ( - list(m_path.abspath.call_args) + list(m_plib.Path.call_args) == [(m_args.return_value.descriptor_path,), {}]) assert "descriptor_path" not in runner.__dict__ @@ -177,29 +192,31 @@ def test_sphinx_runner_docs_tag(patches): def test_sphinx_runner_html_dir(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), - ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_build, m_args): - assert runner.html_dir == m_path.join.return_value + with patched as (m_build, ): + assert runner.html_dir == m_build.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_build.return_value, 'generated/html'), {}]) - + list(m_build.return_value.joinpath.call_args) + == [('generated', 'html'), {}]) assert "html_dir" in runner.__dict__ def test_sphinx_runner_output_filename(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.output_filename == m_args.return_value.output_filename + with patched as (m_plib, m_args): + assert runner.output_filename == m_plib.Path.return_value + + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.output_filename, ), {}]) assert "output_filename" not in runner.__dict__ @@ -246,24 +263,24 @@ def test_sphinx_runner_release_level(patches, docs_tag): def test_sphinx_runner_rst_dir(patches, rst_tar): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", + "pathlib", "utils", ("SphinxRunner.build_dir", dict(new_callable=PropertyMock)), ("SphinxRunner.rst_tar", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_utils, m_dir, m_rst): + with patched as (m_plib, m_utils, m_dir, m_rst): m_rst.return_value = rst_tar - assert runner.rst_dir == m_path.join.return_value + assert runner.rst_dir == m_dir.return_value.joinpath.return_value assert ( - list(m_path.join.call_args) - == [(m_dir.return_value, 'generated/rst'), {}]) + list(m_dir.return_value.joinpath.call_args) + == [('generated', 'rst'), {}]) if rst_tar: assert ( list(m_utils.extract.call_args) - == [(m_path.join.return_value, rst_tar), {}]) + == [(m_dir.return_value.joinpath.return_value, rst_tar), {}]) else: assert not m_utils.extract.called assert "rst_dir" in runner.__dict__ @@ -272,12 +289,16 @@ def test_sphinx_runner_rst_dir(patches, rst_tar): def test_sphinx_runner_rst_tar(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.rst_tar == m_args.return_value.rst_tar + with patched as (m_plib, m_args): + assert runner.rst_tar == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.rst_tar, ), {}]) assert "rst_tar" not in runner.__dict__ @@ -291,27 +312,42 @@ def test_sphinx_runner_sphinx_args(patches): with patched as (m_html, m_rst): assert ( runner.sphinx_args - == ['-W', '--keep-going', '--color', '-b', 'html', - m_rst.return_value, - m_html.return_value]) + == ('-W', '--keep-going', '--color', '-b', 'html', + str(m_rst.return_value), + str(m_html.return_value))) assert "sphinx_args" not in runner.__dict__ +def test_sphinx_runner_tempdir(patches): + runner = sphinx_runner.SphinxRunner() + patched = patches( + "tempfile", + prefix="tools.docs.sphinx_runner") + + with patched as (m_temp, ): + assert runner.tempdir == m_temp.TemporaryDirectory.return_value + + assert ( + list(m_temp.TemporaryDirectory.call_args) + == [(), {}]) + assert "tempdir" in runner.__dict__ + + def test_sphinx_runner_validator_path(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "os.path", + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_path, m_args): + with patched as (m_plib, m_args): assert ( runner.validator_path - == m_path.abspath.return_value) + == m_plib.Path.return_value) assert ( - list(m_path.abspath.call_args) + list(m_plib.Path.call_args) == [(m_args.return_value.validator_path,), {}]) assert "validator_path" not in runner.__dict__ @@ -319,35 +355,35 @@ def test_sphinx_runner_validator_path(patches): def test_sphinx_runner_version_file(patches): runner = sphinx_runner.SphinxRunner() patched = patches( + "pathlib", ("SphinxRunner.args", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_args, ): - assert runner.version_file == m_args.return_value.version_file + with patched as (m_plib, m_args): + assert runner.version_file == m_plib.Path.return_value + assert ( + list(m_plib.Path.call_args) + == [(m_args.return_value.version_file, ), {}]) assert "version_file" not in runner.__dict__ def test_sphinx_runner_version_number(patches): runner = sphinx_runner.SphinxRunner() patched = patches( - "open", ("SphinxRunner.version_file", dict(new_callable=PropertyMock)), prefix="tools.docs.sphinx_runner") - with patched as (m_open, m_file): + with patched as (m_file, ): assert ( runner.version_number - == m_open.return_value.__enter__.return_value.read.return_value.strip.return_value) + == m_file.return_value.read_text.return_value.strip.return_value) assert ( - list(m_open.call_args) - == [(m_file.return_value,), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) + list(m_file.return_value.read_text.call_args) == [(), {}]) assert ( - list(m_open.return_value.__enter__.return_value.read.return_value.strip.call_args) + list(m_file.return_value.read_text.return_value.strip.call_args) == [(), {}]) assert "version_number" in runner.__dict__ @@ -461,8 +497,6 @@ def test_sphinx_runner_build_summary(patches): def test_sphinx_runner_check_env(patches, py_compat, release_level, version_number, docs_tag, current): runner = sphinx_runner.SphinxRunner() patched = patches( - "open", - "os.path", "platform", ("SphinxRunner.configs", dict(new_callable=PropertyMock)), ("SphinxRunner.version_number", dict(new_callable=PropertyMock)), @@ -477,12 +511,12 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb and (f"v{version_number}" != docs_tag or version_number not in current))) - with patched as (m_open, m_path, m_platform, m_configs, m_version, m_tag, m_py, m_rst): + with patched as (m_platform, m_configs, m_version, m_tag, m_py, m_rst): m_py.return_value = py_compat m_configs.return_value.__getitem__.return_value = release_level m_version.return_value = version_number m_tag.return_value = docs_tag - m_open.return_value.__enter__.return_value.read.return_value = current + m_rst.return_value.joinpath.return_value.read_text.return_value = current if fails: with pytest.raises(sphinx_runner.SphinxEnvError) as e: @@ -495,15 +529,12 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb e.value.args == ("ERROR: python version must be >= 3.8, " f"you have {m_platform.python_version.return_value}", )) - assert not m_open.called return if release_level != "tagged": - assert not m_open.called return if f"v{version_number}" != docs_tag: - assert not m_open.called assert ( e.value.args == ("Given git tag does not match the VERSION file content:" @@ -511,14 +542,8 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb return assert ( - list(m_open.call_args) - == [(m_path.join.return_value,), {}]) - assert ( - list(m_path.join.call_args) - == [(m_rst.return_value, "version_history/current.rst"), {}]) - assert ( - list(m_open.return_value.__enter__.return_value.read.call_args) - == [(), {}]) + list(m_rst.return_value.joinpath.call_args) + == [("version_history", "current.rst"), {}]) if version_number not in current: assert ( @@ -526,6 +551,27 @@ def test_sphinx_runner_check_env(patches, py_compat, release_level, version_numb == (f"Git tag ({version_number}) not found in version_history/current.rst", )) +@pytest.mark.parametrize("exists", [True, False]) +def test_sphinx_runner_cleanup(patches, exists): + runner = sphinx_runner.SphinxRunner() + patched = patches( + ("SphinxRunner.tempdir", dict(new_callable=PropertyMock)), + prefix="tools.docs.sphinx_runner") + + with patched as (m_temp, ): + if exists: + runner.__dict__["tempdir"] = m_temp.return_value + assert not runner.cleanup() + + assert not "tempdir" in runner.__dict__ + if exists: + assert ( + list(m_temp.return_value.cleanup.call_args) + == [(), {}]) + else: + assert not m_temp.called + + def test_sphinx_runner_create_tarball(patches): runner = sphinx_runner.SphinxRunner() patched = patches( @@ -545,19 +591,30 @@ def test_sphinx_runner_create_tarball(patches): == [(m_html.return_value,), {'arcname': '.'}]) -def test_sphinx_runner_run(patches): +@pytest.mark.parametrize("raises", [True, False]) +def test_sphinx_runner_run(patches, raises): runner = sphinx_runner.SphinxRunner() patched = patches( - "tempfile", + "SphinxRunner.cleanup", "SphinxRunner._run", prefix="tools.docs.sphinx_runner") - with patched as (m_tmp, m_run): - assert runner.run() == m_run.return_value + with patched as (m_cleanup, m_run): + if raises: + m_run.side_effect = Exception("AN ERROR OCCURRED") + + if not raises: + assert runner.run() == m_run.return_value + else: + with pytest.raises(Exception): + runner.run() assert ( list(m_run.call_args) - == [(m_tmp.TemporaryDirectory.return_value.__enter__.return_value,), {}]) + == [(), {}]) + assert ( + list(m_cleanup.call_args) + == [(), {}]) @pytest.mark.parametrize("color", [None, "COLOR"]) @@ -601,17 +658,14 @@ def _raise(error): if build_fails: _build_error = sphinx_runner.SphinxBuildError("BUILD FAILED") m_build.side_effect = lambda: _raise(_build_error) - assert runner._run("BUILD_DIR") == (1 if (check_fails or build_fails) else None) + assert runner._run() == (1 if (check_fails or build_fails) else None) - assert ( - runner._build_dir - == "BUILD_DIR") assert ( list(m_check.call_args) == [(), {}]) assert ( list(m_os.environ.__setitem__.call_args) - == [('ENVOY_DOCS_BUILD_CONFIG', m_config.return_value), {}]) + == [('ENVOY_DOCS_BUILD_CONFIG', str(m_config.return_value)), {}]) if check_fails: assert ( diff --git a/tools/extensions/extensions_check.py b/tools/extensions/extensions_check.py index c9cddf61a35e6..7a4b50fa52d83 100644 --- a/tools/extensions/extensions_check.py +++ b/tools/extensions/extensions_check.py @@ -6,8 +6,9 @@ import re import sys from functools import cached_property +from importlib.abc import Loader from importlib.util import spec_from_loader, module_from_spec -from importlib.machinery import SourceFileLoader +from importlib.machinery import ModuleSpec, SourceFileLoader from typing import Iterator from tools.base import checker, utils @@ -71,6 +72,10 @@ METADATA_PATH = "source/extensions/extensions_metadata.yaml" +class ExtensionsConfigurationError(Exception): + pass + + class ExtensionsChecker(checker.Checker): checks = ("registered", "fuzzed", "metadata") extension_categories = EXTENSION_CATEGORIES @@ -88,9 +93,16 @@ def configured_extensions(self) -> dict: _extensions_build_config_spec = spec_from_loader( "extensions_build_config", SourceFileLoader("extensions_build_config", BUILD_CONFIG_PATH)) + + if not isinstance(_extensions_build_config_spec, ModuleSpec): + raise ExtensionsConfigurationError(f"Unable to parse build config {BUILD_CONFIG_PATH}") extensions_build_config = module_from_spec(_extensions_build_config_spec) + + if not isinstance(_extensions_build_config_spec.loader, Loader): + raise ExtensionsConfigurationError(f"Unable to parse build config {BUILD_CONFIG_PATH}") + _extensions_build_config_spec.loader.exec_module(extensions_build_config) - return extensions_build_config.EXTENSIONS + return getattr(extensions_build_config, "EXTENSIONS") @property def fuzzed_count(self) -> int: @@ -101,7 +113,10 @@ def fuzzed_count(self) -> int: @cached_property def metadata(self) -> dict: - return utils.from_yaml(METADATA_PATH) + result = utils.from_yaml(METADATA_PATH) + if not isinstance(result, dict): + raise ExtensionsConfigurationError(f"Unable to parse configuration: {METADATA_PATH}") + return result @property def robust_to_downstream_count(self) -> int: diff --git a/tools/extensions/tests/test_extensions_check.py b/tools/extensions/tests/test_extensions_check.py index ed1ede98d5af9..12c7ef9a83814 100644 --- a/tools/extensions/tests/test_extensions_check.py +++ b/tools/extensions/tests/test_extensions_check.py @@ -1,4 +1,5 @@ import types +from importlib.machinery import ModuleSpec from unittest.mock import patch, PropertyMock import pytest @@ -29,18 +30,32 @@ def test_extensions_checker_all_extensions(): assert "all_extensions" in checker.__dict__ -def test_extensions_checker_configured_extensions(patches): +@pytest.mark.parametrize("is_module", [True, False]) +@pytest.mark.parametrize("is_loader", [True, False]) +def test_extensions_checker_configured_extensions(patches, is_module, is_loader): checker = extensions_check.ExtensionsChecker() patched = patches( + "isinstance", "spec_from_loader", "SourceFileLoader", "module_from_spec", prefix="tools.extensions.extensions_check") - with patched as (m_spec, m_loader, m_module): - assert ( - checker.configured_extensions - == m_module.return_value.EXTENSIONS) + def _is_instance(obj, types): + if types == ModuleSpec: + return is_module + return is_loader + + with patched as (m_inst, m_spec, m_loader, m_module): + m_inst.side_effect = _is_instance + + if is_module and is_loader: + assert ( + checker.configured_extensions + == m_module.return_value.EXTENSIONS) + else: + with pytest.raises(extensions_check.ExtensionsConfigurationError) as e: + checker.configured_extensions assert ( list(m_spec.call_args) @@ -48,9 +63,20 @@ def test_extensions_checker_configured_extensions(patches): assert ( list(m_loader.call_args) == [('extensions_build_config', extensions_check.BUILD_CONFIG_PATH), {}]) + + if not is_module: + assert not m_module.called + assert not m_spec.return_value.loader.exec_module.called + return + assert ( list(m_module.call_args) == [(m_spec.return_value,), {}]) + + if not is_loader: + assert not m_spec.return_value.loader.exec_module.called + return + assert ( list(m_spec.return_value.loader.exec_module.call_args) == [(m_module.return_value,), {}]) @@ -88,20 +114,34 @@ def test_extensions_fuzzed_count(patches): assert "fuzzed_count" not in checker.__dict__ -def test_extensions_metadata(patches): +@pytest.mark.parametrize("is_dict", [True, False]) +def test_extensions_metadata(patches, is_dict): checker = extensions_check.ExtensionsChecker() patched = patches( + "isinstance", "utils", prefix="tools.extensions.extensions_check") - with patched as (m_utils, ): - assert ( - checker.metadata - == m_utils.from_yaml.return_value) + with patched as (m_inst, m_utils): + m_inst.return_value = is_dict + + if is_dict: + assert ( + checker.metadata + == m_utils.from_yaml.return_value) + else: + with pytest.raises(extensions_check.ExtensionsConfigurationError) as e: + checker.metadata assert ( list(m_utils.from_yaml.call_args) == [(extensions_check.METADATA_PATH,), {}]) + + if not is_dict: + assert ( + e.value.args[0] + == f'Unable to parse configuration: {extensions_check.METADATA_PATH}') + return assert "metadata" in checker.__dict__ diff --git a/tools/gpg/identity.py b/tools/gpg/identity.py index 9d8a51aa278ce..6d9c99607fa2f 100644 --- a/tools/gpg/identity.py +++ b/tools/gpg/identity.py @@ -1,12 +1,13 @@ import logging import os +import pathlib import pwd import shutil from functools import cached_property from email.utils import formataddr, parseaddr -from typing import Optional +from typing import Iterable, Optional -import gnupg +import gnupg # type:ignore class GPGError(Exception): @@ -47,35 +48,37 @@ def gpg(self) -> gnupg.GPG: return gnupg.GPG() @cached_property - def gpg_bin(self) -> str: - return shutil.which("gpg2") or shutil.which("gpg") + def gpg_bin(self) -> Optional[pathlib.Path]: + gpg_bin = shutil.which("gpg2") or shutil.which("gpg") + return pathlib.Path(gpg_bin) if gpg_bin else None @property - def gnupg_home(self) -> str: - return os.path.join(self.home, ".gnupg") + def gnupg_home(self) -> pathlib.Path: + return self.home.joinpath(".gnupg") @cached_property - def home(self) -> str: + def home(self) -> pathlib.Path: """Gets *and sets if required* the `HOME` env var""" - os.environ["HOME"] = os.environ.get("HOME", pwd.getpwuid(os.getuid()).pw_dir) - return os.environ["HOME"] + home_dir = os.environ.get("HOME", pwd.getpwuid(os.getuid()).pw_dir) + os.environ["HOME"] = home_dir + return pathlib.Path(home_dir) @cached_property def log(self) -> logging.Logger: return self._log or logging.getLogger(self.__class__.__name__) @property - def provided_email(self) -> Optional[str]: + def provided_email(self) -> str: """Provided email for the identity""" - return self._provided_email + return self._provided_email or "" @cached_property def provided_id(self) -> Optional[str]: """Provided name and/or email for the identity""" if not (self.provided_name or self.provided_email): - return + return None return ( - formataddr(self.provided_name, self.provided_email) if + formataddr((self.provided_name, self.provided_email)) if (self.provided_name and self.provided_email) else (self.provided_name or self.provided_email)) @@ -122,13 +125,13 @@ def match(self, key: dict) -> Optional[dict]: key["uid"] = key["uids"][0] return key - def _match_email(self, uids: list) -> Optional[str]: + def _match_email(self, uids: Iterable) -> Optional[str]: """Match only the email""" for uid in uids: if parseaddr(uid)[1] == self.provided_email: return uid - def _match_key(self, uids: dict) -> Optional[str]: + def _match_key(self, uids: Iterable) -> Optional[str]: """If either/both name or email are supplied it tries to match either/both""" if self.provided_name and self.provided_email: return self._match_uid(uids) @@ -137,12 +140,12 @@ def _match_key(self, uids: dict) -> Optional[str]: elif self.provided_email: return self._match_email(uids) - def _match_name(self, uids: list) -> Optional[str]: + def _match_name(self, uids: Iterable) -> Optional[str]: """Match only the name""" for uid in uids: if parseaddr(uid)[0] == self.provided_name: return uid - def _match_uid(self, uids: list) -> Optional[str]: + def _match_uid(self, uids: Iterable) -> Optional[str]: """Match the whole uid - ie `Name `""" return self.provided_id if self.provided_id in uids else None diff --git a/tools/gpg/tests/test_identity.py b/tools/gpg/tests/test_identity.py index 7e191d929cf0a..0cd3086f5a6ee 100644 --- a/tools/gpg/tests/test_identity.py +++ b/tools/gpg/tests/test_identity.py @@ -11,7 +11,7 @@ def test_identity_constructor(name, email, log): gpg = identity.GPGIdentity(name, email, log) assert gpg.provided_name == name - assert gpg.provided_email == email + assert gpg.provided_email == (email or "") assert gpg._log == log @@ -80,37 +80,47 @@ def test_identity_gpg(patches): def test_identity_gnupg_home(patches): gpg = identity.GPGIdentity() patched = patches( - "os", ("GPGIdentity.home", dict(new_callable=PropertyMock)), prefix="tools.gpg.identity") - with patched as (m_os, m_home): - assert gpg.gnupg_home == m_os.path.join.return_value + with patched as (m_home, ): + assert gpg.gnupg_home == m_home.return_value.joinpath.return_value assert ( - list(m_os.path.join.call_args) - == [(m_home.return_value, '.gnupg'), {}]) + list(m_home.return_value.joinpath.call_args) + == [('.gnupg', ), {}]) assert "gnupg_home" not in gpg.__dict__ -@pytest.mark.parametrize("gpg", [None, "GPG"]) +@pytest.mark.parametrize("gpg1", [None, "GPG"]) @pytest.mark.parametrize("gpg2", [None, "GPG2"]) -def test_identity_gpg_bin(patches, gpg, gpg2): +def test_identity_gpg_bin(patches, gpg1, gpg2): gpg = identity.GPGIdentity() patched = patches( + "pathlib", "shutil", prefix="tools.gpg.identity") def _get_bin(_cmd): if _cmd == "gpg2" and gpg2: return gpg2 - if _cmd == "gpg" and gpg: - return gpg + if _cmd == "gpg" and gpg1: + return gpg1 - with patched as (m_shutil, ): + with patched as (m_plib, m_shutil): m_shutil.which.side_effect = _get_bin - assert gpg.gpg_bin == gpg2 or gpg + if gpg2 or gpg1: + assert gpg.gpg_bin == m_plib.Path.return_value + else: + assert not gpg.gpg_bin + + if gpg2 or gpg1: + assert ( + list(m_plib.Path.call_args) + == [(gpg2 or gpg1, ), {}]) + else: + assert not m_plib.Path.called if gpg2: assert ( @@ -126,18 +136,17 @@ def test_identity_home(patches): gpg = identity.GPGIdentity() patched = patches( "os", + "pathlib", "pwd", prefix="tools.gpg.identity") - with patched as (m_os, m_pwd): - assert gpg.home == m_os.environ.__getitem__.return_value + with patched as (m_os, m_plib, m_pwd): + assert gpg.home == m_plib.Path.return_value + # m_os.environ.__getitem__.return_value assert ( - list(m_os.environ.__getitem__.call_args) - == [('HOME', ), {}]) - assert ( - list(m_os.environ.__setitem__.call_args) - == [('HOME', m_os.environ.get.return_value), {}]) + list(m_plib.Path.call_args) + == [(m_os.environ.get.return_value, ), {}]) assert ( list(m_os.environ.get.call_args) == [('HOME', m_pwd.getpwuid.return_value.pw_dir), {}]) @@ -191,7 +200,7 @@ def test_identity_identity_id(patches, name, email): if name and email: assert ( list(m_format.call_args) - == [('NAME', 'EMAIL'), {}]) + == [(('NAME', 'EMAIL'),), {}]) assert result == m_format.return_value return diff --git a/tools/testing/all_pytests.py b/tools/testing/all_pytests.py index df94b8175f871..4225add80659a 100644 --- a/tools/testing/all_pytests.py +++ b/tools/testing/all_pytests.py @@ -10,6 +10,7 @@ import os import sys from functools import cached_property +from typing import Optional from tools.base import checker, runner @@ -51,7 +52,7 @@ def add_arguments(self, parser): default=None, help="Specify a path to collect html coverage with") - def check_pytests(self) -> int: + def check_pytests(self) -> None: for target in self.pytest_targets: try: self.bazel.run(target, *self.pytest_bazel_args) @@ -71,7 +72,7 @@ def on_checks_complete(self): return super().on_checks_complete() -def main(*args: list) -> None: +def main(*args: str) -> Optional[int]: return PytestChecker(*args).run() diff --git a/tools/testing/plugin.py b/tools/testing/plugin.py index 2eb61ec6792d2..da3ca38ad15ee 100644 --- a/tools/testing/plugin.py +++ b/tools/testing/plugin.py @@ -2,20 +2,21 @@ # This is pytest plugin providing fixtures for tests. # +import functools from contextlib import contextmanager, ExitStack -from typing import ContextManager, Iterator +from typing import Callable, ContextManager, Iterator from unittest.mock import patch import pytest @contextmanager -def nested(*contexts) -> Iterator[list]: +def nested(*contexts) -> Iterator[tuple]: with ExitStack() as stack: - yield [stack.enter_context(context) for context in contexts] + yield tuple(stack.enter_context(context) for context in contexts) -def _patches(*args, prefix: str = "") -> ContextManager: +def _patches(*args: str, prefix: str = "") -> ContextManager[tuple]: """Takes a list of module/class paths to patch and an optional prefix The prefix is used to prefix all of the paths @@ -37,20 +38,40 @@ def _patches(*args, prefix: str = "") -> ContextManager: @pytest.fixture -def patches(): +def patches() -> Callable[..., ContextManager[tuple]]: return _patches -def _command_main(main, handler, args=("arg0", "arg1", "arg2")): - class_mock = patch(handler) +def _async_command_main(patches, main: Callable, handler: str, args: tuple) -> None: + parts = handler.split(".") + patched = patches("asyncio.run", parts.pop(), prefix=".".join(parts)) - with class_mock as m_class: - assert (main(*args) == m_class.return_value.run.return_value) + with patched as (m_run, m_handler): + assert main(*args) == m_run.return_value - assert (list(m_class.call_args) == [args, {}]) - assert (list(m_class.return_value.run.call_args) == [(), {}]) + assert list(m_run.call_args) == [(m_handler.return_value.run.return_value,), {}] + assert list(m_handler.call_args) == [args, {}] + assert list(m_handler.return_value.run.call_args) == [(), {}] + + +def _command_main( + patches, + main: Callable, + handler: str, + args=("arg0", "arg1", "arg2"), + async_run: bool = False) -> None: + if async_run: + return _async_command_main(patches, main, handler, args=args) + + patched = patches(handler) + + with patched as (m_handler,): + assert main(*args) == m_handler.return_value.run.return_value + + assert list(m_handler.call_args) == [args, {}] + assert list(m_handler.return_value.run.call_args) == [(), {}] @pytest.fixture -def command_main(): - return _command_main +def command_main(patches) -> Callable: + return functools.partial(_command_main, patches) diff --git a/tools/testing/tests/test_python_pytest.py b/tools/testing/tests/test_python_pytest.py index a25464c9c3564..c1056546f29b8 100644 --- a/tools/testing/tests/test_python_pytest.py +++ b/tools/testing/tests/test_python_pytest.py @@ -81,3 +81,125 @@ def test_pytest_main(command_main): command_main( python_pytest.main, "tools.testing.python_pytest.PytestRunner") + + +def test_plugin_command_main(patches): + patched = patches( + "functools", + "_command_main", + prefix="tools.testing.plugin") + + with patched as (m_funct, m_command): + assert plugin.command_main._pytestfixturefunction.scope == "function" + assert plugin.command_main._pytestfixturefunction.autouse is False + assert ( + plugin.command_main.__pytest_wrapped__.obj(patches) + == m_funct.partial.return_value) + + assert ( + list(m_funct.partial.call_args) + == [(m_command, patches), {}]) + + +@pytest.mark.parametrize("args", [None, (), tuple(f"ARG{i}" for i in range(0, 3))]) +@pytest.mark.parametrize("async_run", [None, True, False]) +@pytest.mark.parametrize("raises", [None, "main", "handler", "run"]) +def test_plugin__command_main(patches, args, async_run, raises): + patched = patches( + "_async_command_main", + prefix="tools.testing.plugin") + _args = ("arg0", "arg1", "arg2") if args is None else args + _m_handler = MagicMock() + _patches = MagicMock() + _patches.return_value.__enter__.return_value = (_m_handler, ) + main = MagicMock() + handler = MagicMock() + kwargs = {} + if args is not None: + kwargs["args"] = args + if async_run is not None: + kwargs["async_run"] = async_run + if raises != "main": + main.return_value = _m_handler.return_value.run.return_value + if raises != "handler": + _m_handler(*_args) + else: + _m_handler("SOMETHING", "ELSE") + if raises != "run": + _m_handler.return_value.run() + else: + _m_handler.return_value.run("NOT", "RUN") + + with patched as (m_command, ): + if not raises or async_run: + result = plugin._command_main(_patches, main, handler, **kwargs) + else: + with pytest.raises(AssertionError) as e: + plugin._command_main(_patches, main, handler, **kwargs) + + if async_run: + assert result == m_command.return_value + assert ( + list(m_command.call_args) + == [(_patches, + main, + handler), + {'args': _args}]) + assert not _patches.called + assert not main.called + return + + assert not m_command.called + assert ( + list(_patches.call_args) + == [(handler,), {}]) + assert ( + list(main.call_args) + == [_args, {}]) + + if not raises: + assert not result + + +@pytest.mark.parametrize("raises", [None, "main", "aiorun", "handler", "run"]) +def test_plugin__async_command_main(raises): + _m_run = MagicMock() + _m_handler = MagicMock() + _patches = MagicMock() + _patches.return_value.__enter__.return_value = (_m_run, _m_handler) + main = MagicMock() + handler = MagicMock() + handler.split.return_value = [f"PART{i}" for i in range(0, 3)] + args = ("arg0", "arg1", "arg2") + + if raises != "main": + main.return_value = _m_run.return_value + + if raises != "aiorun": + _m_run(_m_handler.return_value.run.return_value) + else: + _m_run("NOT", "AIORUN") + if raises != "handler": + _m_handler(*args) + else: + _m_handler("SOMETHING", "ELSE") + if raises != "run": + _m_handler.return_value.run() + else: + _m_handler.return_value.run("NOT", "RUN") + + if not raises: + assert not plugin._async_command_main(_patches, main, handler, args) + else: + with pytest.raises(AssertionError): + plugin._async_command_main(_patches, main, handler, args) + + assert ( + list(_patches.call_args) + == [('asyncio.run', 'PART2'), {'prefix': 'PART0.PART1'}]) + assert ( + list(handler.split.call_args) + == [('.',), {}]) + assert ( + list(main.call_args) + == [args, {}])