diff --git a/README.md b/README.md index 83b79df..fd5afb0 100644 --- a/README.md +++ b/README.md @@ -153,6 +153,55 @@ collected 58 items Logging shows that the plugin first listed all tracks and then generated test functions for each track-challenge combination it found. +#### Filter tests based on tracks + +Marking classes or functions with @pytest.mark.track("track_name") like the following test class member + +``` python +class TestCustomParameters: + @pytest.mark.track("tsdb") + def test_tsdb_esql(self, es_cluster, rally): + ret = rally.race( + track="tsdb", + track_params={"run_esql_aggs": True, "index_mode": "time_series"}, + ) + assert ret == 0 +``` +can be used for the --track-filter option. If given that option with a comma-separated list of track-names +pytest will generate tests only for those tracks. Example: + +`pytest --log-cli-level=INFO --track-filter=big5,tsdb it/test_security.py it/test_custom_parameters.py` + +will skip the tests for security because they are marked to be using the 'elastic/security' track, while +testing custom parameters only contains a function marked to be using 'tsdb' track which is included in the +--track-filter option. + +``` +========================================================= test session starts ========================================================== +platform darwin -- Python 3.12.8, pytest-7.1.2, pluggy-1.6.0 +cachedir: .pytest_cache +benchmark: 3.4.1 (defaults: timer=time.perf_counter disable_gc=False min_rounds=5 min_time=0.000005 max_time=1.0 calibration_precision=10 warmup=False warmup_iterations=100000) +rootdir: , configfile: pyproject.toml +plugins: benchmark-3.4.1, httpserver-1.0.5, asyncio-0.19.0, rally-0.0.1, anyio-4.10.0 +asyncio: mode=Mode.STRICT +collected 5 items + +it/test_security.py::TestSecurity::test_security_indexing SKIPPED (Skipping test for tracks ['elastic/security'] not in trac...) [ 20%] +it/test_security.py::TestSecurity::test_security_indexing_querying SKIPPED (Skipping test for tracks ['elastic/security'] no...) [ 40%] +it/test_security.py::TestSecurity::test_security_indexing_querying_logsdb SKIPPED (Skipping test for tracks ['elastic/securi...) [ 60%] +it/test_security.py::TestSecurity::test_security_generate_alerts_source_events SKIPPED (Skipping test for tracks ['elastic/s...) [ 80%] +it/test_custom_parameters.py::TestCustomParameters::test_tsdb_esql +------------------------------------------------------------ live log setup ------------------------------------------------------------ +INFO pytest_rally.elasticsearch:elasticsearch.py:84 Installing Elasticsearch: [esrally install --quiet --http-port=19200 --node=rally-node --master-nodes=rally-node --car=4gheap,trial-license,x-pack-ml,lean-watermarks --seed-hosts="127.0.0.1:19300" --revision=current] +INFO pytest_rally.elasticsearch:elasticsearch.py:93 Starting Elasticsearch: [esrally start --runtime-jdk=bundled --installation-id=1ee3c852-b86c-4126-ad1e-ac2088e30335 --race-id=1d107ad1-28a0-4446-b71d-3787ae56fd19] +------------------------------------------------------------ live log call ------------------------------------------------------------- +INFO pytest_rally.rally:rally.py:144 Running command: [esrally race --track="tsdb" --track-repository="/Users/nikosdris/Projects/rally-tracks" --track-revision="improved-filtered-tests" --configuration-name="pytest" --enable-assertions --kill-running-processes --on-error="abort" --pipeline="benchmark-only" --target-hosts="127.0.0.1:19200" --test-mode --track-params="run_esql_aggs:True,index_mode:time_series"] +PASSED [100%] +---------------------------------------------------------- live log teardown ----------------------------------------------------------- +INFO pytest_rally.rally:rally.py:91 Removing Rally config from [/Users/nikosdris/.rally/rally-pytest.ini] +INFO pytest_rally.elasticsearch:elasticsearch.py:104 Stopping Elasticsearch: [esrally stop --installation-id=1ee3c852-b86c-4126-ad1e-ac2088e30335] +==================================================== 1 passed, 4 skipped in 40.22s ===================================================== +``` #### Test execution Because our `test_autogenerated` function uses the [`es_cluster` fixture](#es_cluster), `pytest-rally` will install and start an Elasticsearch cluster during setup and stop it during teardown. All of our autogenerated tests will run their races with this cluster as their benchmark candidate. @@ -301,3 +350,14 @@ The plugin includes the CLI option `--debug-rally`. If provided, the plugin will ## Skipping autogenerated tests The plugin [marks](https://docs.pytest.org/en/6.2.x/mark.html#mark) all autogenerated tests with `autogenerated`, a custom marker. If you would like to skip running tests generated by the plugin, simply pass `--skip-autogenerated-tests`. The plugin will then skip all tests with this marker. Note that this does not affect test collection. + +# Testing + +``` +python -mvenv .venv +source .venv/bin/activate +# provide full path to Rally source repository +pip install -e "//rally[develop]" +pip install -e . +pytest +``` diff --git a/pytest.ini b/pytest.ini index 9c911ee..7ea459c 100644 --- a/pytest.ini +++ b/pytest.ini @@ -6,3 +6,5 @@ addopts = --verbose --color=yes testpaths = tests junit_family = xunit2 junit_logging = all +markers = + track(tracklist): mark test with a comma-separated list of track names for filtering diff --git a/pytest_rally/plugin.py b/pytest_rally/plugin.py index fd0c2c1..5290661 100644 --- a/pytest_rally/plugin.py +++ b/pytest_rally/plugin.py @@ -52,6 +52,18 @@ def pytest_addoption(parser): action="store_true", default=False, help=("If provided, Rally commands will just be logged, not executed.")) + group.addoption("--track-filter", + action="store", + default="", + help="Comma-separated list of track names to filter tests with (e.g., --track-filter=track1,track2)") + group.addoption("--track-repository", + action="store", + default=None, + help="Path to a local track repository. If not provided, the rootdir of the pytest run is used.") + group.addoption("--track-revision", + action="store", + default=None, + help="Git revision of the track repository to use. If not provided, the current branch or commit is used.") @pytest.hookimpl def pytest_cmdline_main(config): @@ -66,15 +78,25 @@ def current_branch(repo): else: return current.split()[1].strip() - repo = config.getoption("--track-repository", str(config.rootdir)) - rev = config.getoption("--track-revision", current_branch(repo)) - + repo = config.getoption("--track-repository") + if repo is None: + repo = str(config.rootdir) + rev = config.getoption("--track-revision") + if rev is None: + rev = current_branch(repo) + tfilter = config.getoption("--track-filter") + if tfilter: + tfilter = [t.strip() for t in tfilter.split(",") if t.strip()] + else: + tfilter = [] + config.option.track_repository = repo config.option.track_revision = rev + config.option.track_filter = tfilter def validate_options(config): if config.option.distribution_version and config.option.revision: - pytest.fail(msg="--distribution-version and --es-revision are mutually exclusive.", pytrace=False) + pytest.fail(msg="--distribution-version and --revision are mutually exclusive.", pytrace=False) def configure_markers(config): config.addinivalue_line("markers", "autogenerated: mark test as autogenerated") @@ -97,8 +119,7 @@ def default_params(track, challenge): @pytest.hookimpl def pytest_generate_tests(metafunc): - repo = metafunc.config.getoption('track_repository') - rev = metafunc.config.getoption('track_revision') + tfilter = metafunc.config.getoption('track_filter') current_class = metafunc.cls desired_class = metafunc.config.option.test_class @@ -108,13 +129,26 @@ def pytest_generate_tests(metafunc): params = [] tracks_and_challenges = r.all_tracks_and_challenges() for track, challenges in tracks_and_challenges: - params += [(default_params(track, challenge)) for challenge in challenges] + if not tfilter or track in tfilter: + params += [(default_params(track, challenge)) for challenge in challenges] metafunc.parametrize("track,challenge,rally_options", params) metafunc.definition.parent.add_marker("autogenerated") @pytest.hookimpl -def pytest_runtest_setup(item): - markers = [m.name for m in item.iter_markers()] - if "autogenerated" in markers: - if item.config.getoption("--skip-autogenerated-tests"): - pytest.skip(msg="--skip-autogenerated-tests flag was set") +def pytest_collection_modifyitems(session,config,items): + for item in items: + markers = [m.name for m in item.iter_markers()] + if "autogenerated" in markers: + if item.config.getoption("--skip-autogenerated-tests"): + item.add_marker(pytest.mark.skip(reason="--skip-autogenerated-tests flag was set")) + + track_filter = item.config.getoption("track_filter") + if track_filter: + track_marker = item.get_closest_marker("track") + if track_marker: + # Support marker as a list: @pytest.mark.track(["track1", "track2"]) + marker_tracks = track_marker.args[0] + if isinstance(marker_tracks, str): + marker_tracks = marker_tracks.split(",") + if not any(track in track_filter for track in marker_tracks): + item.add_marker(pytest.mark.skip(reason=f"Skipping test for tracks {marker_tracks} not in track_filter {track_filter}")) diff --git a/setup.py b/setup.py index 6238120..1c6ddda 100644 --- a/setup.py +++ b/setup.py @@ -20,8 +20,8 @@ setup( name="pytest-rally", packages=["pytest_rally"], - version="0.0.1", + version="0.0.2", include_package_data=True, - entry_points={"pytest11": ["name_of_plugin = pytest_rally.plugin"]}, + entry_points={"pytest11": ["rally = pytest_rally.plugin"]}, classifiers=["Framework :: Pytest"], ) diff --git a/tests/conftest.py b/tests/conftest.py index db64fab..7e6d6ec 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,32 +33,36 @@ def resources(request): def repo(resources): yield Path(resources).joinpath("track-repo") -@pytest.fixture(scope="function", autouse=True) -def make_conftest(pytester, repo): - conftest_str = """ - import pytest - - def pytest_addoption(parser): - group = parser.getgroup("rally") - group.addoption("--track-repository", action="store", default=None) - group.addoption("--track-revision", action="store", default="main") - """ - pytester.makeconftest(conftest_str) - @pytest.fixture(scope="function", autouse=True) def temp_repo(pytester, repo): temp_repo = pytester.mkdir("track-repo") copytree(repo, temp_repo, dirs_exist_ok=True) prefix = f"git -C {temp_repo}" - commands = ["init", "add .", "commit -am 'test'"] + commands = ["init -b main", "add .", "commit -am 'test'"] for command in commands: run_command_with_return_code(f"{prefix} {command}") yield temp_repo @pytest.fixture(scope="function") def example(pytester): - yield pytester.copy_example("race.py") + examples_dir = Path(__file__).parent.joinpath("examples") + example_files = examples_dir.glob("*.py") + examples={} + for f in example_files: + examples.update({f.name[:-3]: pytester.copy_example(f.name)}) + yield examples @pytest.fixture(scope="function") def run(pytester, temp_repo, example): - yield partial(pytester.runpytest, "--debug-rally", f"--track-repository={temp_repo}", example) + yield partial(pytester.runpytest, "--debug-rally", f"--track-repository={temp_repo}", example["all_tracks_and_challenges"]) + +@pytest.fixture(scope="function") +def run_with_filter(pytester, temp_repo): + def _run_with_filter(track_filter, test_module): + return pytester.runpytest( + "--debug-rally", + f"--track-repository={temp_repo}", + f"--track-filter={track_filter}", + test_module + ) + yield _run_with_filter diff --git a/tests/examples/race.py b/tests/examples/all_tracks_and_challenges.py similarity index 100% rename from tests/examples/race.py rename to tests/examples/all_tracks_and_challenges.py diff --git a/tests/examples/marked_tracks.py b/tests/examples/marked_tracks.py new file mode 100644 index 0000000..50f8643 --- /dev/null +++ b/tests/examples/marked_tracks.py @@ -0,0 +1,46 @@ +# Licensed to Elasticsearch B.V. under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch B.V. licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import pytest + +# Marks at module level for tests with track filters that doesn't include the listed track names +pytestmark = pytest.mark.track("test-track", "test-track2", "test-track3") + +class TestMarkedFunctions: + @pytest.mark.track("test-track","test-track2") + def test_mark_track(self, es_cluster, rally): + rally.race(track="test-track",challenge="index-only") + rally.race(track="test-track2",challenge="force-merge") + + @pytest.mark.track("test-track2") + def test_mark_track2(self, es_cluster, rally): + rally.race(track="test-track2",challenge="cluster-health") + + @pytest.mark.track("test-track3") + def test_mark_track3(self, es_cluster, rally): + rally.race(track="test-track3",challenge="index-only") + +@pytest.mark.track("test-track") +class TestMarkedClass: + @pytest.mark.track("test-track","test-track2") + def test_mark_track(self, es_cluster, rally): + rally.race(track="test-track",challenge="index-only") + rally.race(track="test-track2",challenge="force-merge") + + @pytest.mark.track("test-track3") + def test_mark_track3(self, es_cluster, rally): + rally.race(track="test-track3",challenge="index-only") diff --git a/tests/resources/track-repo/test-track/track.json b/tests/resources/track-repo/test-track/track.json index a1528f8..c7bb2dc 100644 --- a/tests/resources/track-repo/test-track/track.json +++ b/tests/resources/track-repo/test-track/track.json @@ -1,6 +1,6 @@ { "version": 2, - "description": "Skeleton track for testing pytest-rally", + "description": "Track 1 for testing pytest-rally", "indices": [ { "name": "test", diff --git a/tests/resources/track-repo/test-track2/index.json b/tests/resources/track-repo/test-track2/index.json new file mode 100644 index 0000000..c36b03e --- /dev/null +++ b/tests/resources/track-repo/test-track2/index.json @@ -0,0 +1,28 @@ +{ + "settings": { + "index.number_of_replicas": 0 + }, + "mappings": { + "dynamic": "strict", + "properties": { + "geonameid": { + "type": "long" + }, + "name": { + "type": "text" + }, + "latitude": { + "type": "double" + }, + "longitude": { + "type": "double" + }, + "country_code": { + "type": "text" + }, + "population": { + "type": "long" + } + } + } +} diff --git a/tests/resources/track-repo/test-track2/track.json b/tests/resources/track-repo/test-track2/track.json new file mode 100644 index 0000000..fc04352 --- /dev/null +++ b/tests/resources/track-repo/test-track2/track.json @@ -0,0 +1,109 @@ +{ + "version": 2, + "description": "Track 2 for testing pytest-rally", + "indices": [ + { + "name": "test", + "body": "index.json" + } + ], + "corpora": [ + { + "name": "test-track2", + "documents": [ + { + "source-file": "documents.json", + "document-count": 100, + "uncompressed-bytes": 100 + } + ] + } + ], + "challenges": [ + { + "name": "index-and-query", + "default": true, + "schedule": [ + { + "operation": { + "operation-type": "delete-index" + } + }, + { + "operation": { + "operation-type": "create-index" + } + }, + { + "operation": { + "operation-type": "cluster-health", + "request-params": { + "wait_for_status": "green" + }, + "retry-until-success": true + } + }, + { + "operation": { + "operation-type": "bulk", + "bulk-size": 5000 + }, + "warmup-time-period": 120, + "clients": 8 + }, + { + "operation": { + "operation-type": "force-merge" + } + }, + { + "operation": { + "name": "query-match-all", + "operation-type": "search", + "body": { + "query": { + "match_all": {} + } + } + }, + "clients": 8, + "warmup-iterations": 1000, + "iterations": 1000, + "target-throughput": 100 + } + ] + }, + { + "name": "index-only", + "schedule": [ + { + "operation": { + "operation-type": "delete-index" + } + }, + { + "operation": { + "operation-type": "create-index" + } + }, + { + "operation": { + "operation-type": "cluster-health", + "request-params": { + "wait_for_status": "green" + }, + "retry-until-success": true + } + }, + { + "operation": { + "operation-type": "bulk", + "bulk-size": 5000 + }, + "warmup-time-period": 120, + "clients": 8 + } + ] + } + ] +} diff --git a/tests/resources/track-repo/test-track3/index.json b/tests/resources/track-repo/test-track3/index.json new file mode 100644 index 0000000..c36b03e --- /dev/null +++ b/tests/resources/track-repo/test-track3/index.json @@ -0,0 +1,28 @@ +{ + "settings": { + "index.number_of_replicas": 0 + }, + "mappings": { + "dynamic": "strict", + "properties": { + "geonameid": { + "type": "long" + }, + "name": { + "type": "text" + }, + "latitude": { + "type": "double" + }, + "longitude": { + "type": "double" + }, + "country_code": { + "type": "text" + }, + "population": { + "type": "long" + } + } + } +} diff --git a/tests/resources/track-repo/test-track3/track.json b/tests/resources/track-repo/test-track3/track.json new file mode 100644 index 0000000..5beb2f7 --- /dev/null +++ b/tests/resources/track-repo/test-track3/track.json @@ -0,0 +1,109 @@ +{ + "version": 2, + "description": "Track 3 for testing pytest-rally", + "indices": [ + { + "name": "test", + "body": "index.json" + } + ], + "corpora": [ + { + "name": "test-track3", + "documents": [ + { + "source-file": "documents.json", + "document-count": 100, + "uncompressed-bytes": 100 + } + ] + } + ], + "challenges": [ + { + "name": "index-and-query", + "default": true, + "schedule": [ + { + "operation": { + "operation-type": "delete-index" + } + }, + { + "operation": { + "operation-type": "create-index" + } + }, + { + "operation": { + "operation-type": "cluster-health", + "request-params": { + "wait_for_status": "green" + }, + "retry-until-success": true + } + }, + { + "operation": { + "operation-type": "bulk", + "bulk-size": 5000 + }, + "warmup-time-period": 120, + "clients": 8 + }, + { + "operation": { + "operation-type": "force-merge" + } + }, + { + "operation": { + "name": "query-match-all", + "operation-type": "search", + "body": { + "query": { + "match_all": {} + } + } + }, + "clients": 8, + "warmup-iterations": 1000, + "iterations": 1000, + "target-throughput": 100 + } + ] + }, + { + "name": "index-only", + "schedule": [ + { + "operation": { + "operation-type": "delete-index" + } + }, + { + "operation": { + "operation-type": "create-index" + } + }, + { + "operation": { + "operation-type": "cluster-health", + "request-params": { + "wait_for_status": "green" + }, + "retry-until-success": true + } + }, + { + "operation": { + "operation-type": "bulk", + "bulk-size": 5000 + }, + "warmup-time-period": 120, + "clients": 8 + } + ] + } + ] +} diff --git a/tests/test_plugin.py b/tests/test_plugin.py index 5187d53..b87175e 100644 --- a/tests/test_plugin.py +++ b/tests/test_plugin.py @@ -16,43 +16,113 @@ # under the License. import pytest +import re -def test_generates_tests_from_list_tracks(pytester, example, temp_repo): - expected = [ - "test_track_challenge[test-track-index-and-query]", - "test_track_challenge[test-track-index-only]", - ] - generated, _ = pytester.inline_genitems(example, f"--track-repository={temp_repo}") - assert [func.name for func in generated] == expected - -def test_runs_correct_race_commands(caplog, temp_repo, run): - def expected_log_line(track, challenge): - command = ( - f'esrally race --track="{track}" --challenge="{challenge}" ' - f'--track-repository="{temp_repo}" --track-revision="main" ' - '--configuration-name="pytest" --enable-assertions --kill-running-processes ' - '--on-error="abort" --pipeline="benchmark-only" --target-hosts="127.0.0.1:19200" --test-mode' - ) +# metafunc.parametrize returns this string when given an empty list. +# Ref1: https://github.com/elastic/pytest-rally/blob/60042c441fc0ca2d6aafe0e298fd7f08e3c30334/pytest_rally/plugin.py#L134 +# Ref2: https://docs.pytest.org/en/7.1.x/reference/reference.html#pytest.Metafunc.parametrize +DEFAULT_TRACK_AND_CHALLENGE="track0-challenge0-rally_options0" + +class TestPlugin: + # this should be sorted as per Rally list tracks output + tracks = ["test-track", "test-track2", "test-track3"] + challenges = ["index-and-query", "index-only"] + + def test_generates_tests_from_list_tracks(self, pytester, example, temp_repo): + expected = [ + f"test_track_challenge[{track}-{challenge}]" for track in self.tracks for challenge in self.challenges + ] + generated, _ = pytester.inline_genitems(example["all_tracks_and_challenges"], f"--track-repository={temp_repo}") + assert [func.name for func in generated] == expected + + def test_runs_correct_race_commands(self, caplog, temp_repo, run): + def expected_log_line(track, challenge): + command = ( + f'esrally race --track="{track}" --challenge="{challenge}" ' + f'--track-repository="{temp_repo}" --track-revision="main" ' + '--configuration-name="pytest" --enable-assertions --kill-running-processes ' + '--on-error="abort" --pipeline="benchmark-only" --target-hosts="127.0.0.1:19200" --test-mode' + ) + + return ("pytest_rally.rally", "INFO", f'Running command: [{command}]') + + challenges = [ + "index-and-query", + "index-only", + ] - return ("pytest_rally.rally", "INFO", f'Running command: [{command}]') - - challenges = [ - "index-and-query", - "index-only", - ] - - expected = [expected_log_line("test-track", challenge) for challenge in challenges] - res = run() - actual = [(r.name, r.levelname, r.message) for r in caplog.records if "esrally race" in r.message] - assert actual == expected - -def test_runs_correct_install_command(caplog, temp_repo, run): - expected = [ - ("pytest_rally.elasticsearch", "DEBUG", 'Installing Elasticsearch: ' - '[esrally install --quiet --http-port=19200 --node=rally-node --master-nodes=rally-node ' - '--car=4gheap,trial-license,x-pack-ml,lean-watermarks --seed-hosts="127.0.0.1:19300" ' - '--revision=current]') - ] - res = run() - actual = [(r.name, r.levelname, r.message) for r in caplog.records if "esrally install" in r.message] - assert actual == expected + expected = [expected_log_line(track, challenge) for track in self.tracks for challenge in challenges] + res = run() + actual = [(r.name, r.levelname, r.message) for r in caplog.records if "esrally race" in r.message] + assert actual == expected + + def test_runs_correct_install_command(self, caplog, temp_repo, run): + expected = [ + ("pytest_rally.elasticsearch", "DEBUG", 'Installing Elasticsearch: ' + '[esrally install --quiet --http-port=19200 --node=rally-node --master-nodes=rally-node ' + '--car=4gheap,trial-license,x-pack-ml,lean-watermarks --seed-hosts="127.0.0.1:19300" ' + '--revision=current]') + ] + res = run() + actual = [(r.name, r.levelname, r.message) for r in caplog.records if "esrally install" in r.message] + assert actual == expected + + def test_track_filter_limits_autogenerated_tracks(self, pytester, example, temp_repo): + def expected_test_names(track_filter): + filter_items = None if track_filter == "" else [t.strip() for t in track_filter.split(",")] + if filter_items and all(f not in self.tracks for f in filter_items): + result = [f"test_track_challenge[{DEFAULT_TRACK_AND_CHALLENGE}]"] + else: + result = [ + f"test_track_challenge[{track}-{challenge}]" + for track in self.tracks if not filter_items or track in filter_items + for challenge in self.challenges + ] + return result + + test_track_filters = ["","test-track2", "test-track,test-track2", "test-track500"] + for track_filter in test_track_filters: + expected = expected_test_names(track_filter) + generated, _ = pytester.inline_genitems( + example["all_tracks_and_challenges"], + f"--track-repository={temp_repo}", + f"--track-filter={track_filter}" + ) + assert [func.name for func in generated] == expected + + def test_track_filter_skips_tracks(self, caplog, temp_repo, example, run_with_filter): + def expected_tracks_filtered(track_filter): + filter_items = None if track_filter == "" else [t.strip() for t in track_filter.split(",")] + return set([track for track in self.tracks if not filter_items or track in filter_items]) + + test_track_filters = ["", "test-track2", "test-track2,test-track", "test-track500"] + for track_filter in test_track_filters: + caplog.clear() + run_function = run_with_filter(track_filter, example["marked_tracks"]) + races = [r for r in caplog.records if "esrally race" in r.message] + raced_tracks = [] + for r in races: + track_match = re.search(r'--track="([^"]+)"', r.message) + if track_match: + raced_tracks.append(track_match.group(1)) + expected_tracks = expected_tracks_filtered(track_filter) + actual_tracks = set(raced_tracks) + assert actual_tracks == expected_tracks + + def test_skip_autogenerated_tests_option(self, pytester, example, temp_repo): + expected_all = [ + f"test_track_challenge[{track}-{challenge}]" + for track in self.tracks + for challenge in self.challenges + ] + # Without the flag all autogenerated tests should run + generated, _ = pytester.inline_genitems(example["all_tracks_and_challenges"], f"--track-repository={temp_repo}") + assert [func.name for func in generated] == expected_all + + # With flag generated items should be marked as skipped + generated, _ = pytester.inline_genitems( + example["all_tracks_and_challenges"], + f"--track-repository={temp_repo}", + "--skip-autogenerated-tests" + ) + assert all("skip" in [m.name for m in func.iter_markers()] for func in generated)