From 65ee2e1e8d7218ba23554ae92831cff988db3157 Mon Sep 17 00:00:00 2001 From: mloubout Date: Fri, 5 Apr 2024 09:39:35 -0400 Subject: [PATCH] CI: revamp parallel marker --- conftest.py | 121 ++++++++++++--------------- tests/test_autotuner.py | 2 +- tests/test_benchmark.py | 2 +- tests/test_builtins.py | 8 +- tests/test_data.py | 44 +++++----- tests/test_dle.py | 2 +- tests/test_dse.py | 2 +- tests/test_gpu_common.py | 6 +- tests/test_gpu_openacc.py | 4 +- tests/test_gpu_openmp.py | 6 +- tests/test_linearize.py | 7 +- tests/test_mpi.py | 170 +++++++++++++++++++------------------- tests/test_operator.py | 2 +- tests/test_pickle.py | 8 +- tests/test_sparse.py | 4 +- tests/test_subdomains.py | 4 +- 16 files changed, 189 insertions(+), 203 deletions(-) diff --git a/conftest.py b/conftest.py index 3a4d6d4ef4..bc2526c3d7 100644 --- a/conftest.py +++ b/conftest.py @@ -122,7 +122,7 @@ def EVAL(exprs, *args): return processed[0] if isinstance(exprs, str) else processed -def parallel(item): +def parallel(item, m): """ Run a test in parallel. Readapted from: @@ -131,47 +131,44 @@ def parallel(item): mpi_exec = 'mpiexec' mpi_distro = sniff_mpi_distro(mpi_exec) - marker = item.get_closest_marker("parallel") - mode = as_tuple(marker.kwargs.get("mode", 2)) - for m in mode: - # Parse the `mode` - if isinstance(m, int): - nprocs = m - scheme = 'basic' - else: - if len(m) == 2: - nprocs, scheme = m - else: - raise ValueError("Can't run test: unexpected mode `%s`" % m) - - pyversion = sys.executable - # Only spew tracebacks on rank 0. - # Run xfailing tests to ensure that errors are reported to calling process - if item.cls is not None: - testname = "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name) - else: - testname = "%s::%s" % (item.fspath, item.name) - args = ["-n", "1", pyversion, "-m", "pytest", "--runxfail", "-s", - "-q", testname] - if nprocs > 1: - args.extend([":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", - "--runxfail", "--tb=no", "-q", testname]) - # OpenMPI requires an explicit flag for oversubscription. We need it as some - # of the MPI tests will spawn lots of processes - if mpi_distro == 'OpenMPI': - call = [mpi_exec, '--oversubscribe', '--timeout', '300'] + args + # Parse the `mode` + if isinstance(m, int): + nprocs = m + scheme = 'basic' + else: + if len(m) == 2: + nprocs, scheme = m else: - call = [mpi_exec] + args + raise ValueError("Can't run test: unexpected mode `%s`" % m) - # Tell the MPI ranks that they are running a parallel test - os.environ['DEVITO_MPI'] = scheme - try: - check_call(call) - return True - except: - return False - finally: - os.environ['DEVITO_MPI'] = '0' + pyversion = sys.executable + # Only spew tracebacks on rank 0. + # Run xfailing tests to ensure that errors are reported to calling process + if item.cls is not None: + testname = "%s::%s::%s" % (item.fspath, item.cls.__name__, item.name) + else: + testname = "%s::%s" % (item.fspath, item.name) + args = ["-n", "1", pyversion, "-m", "pytest", "--runxfail", "-q", testname] + if nprocs > 1: + args.extend([":", "-n", "%d" % (nprocs - 1), pyversion, "-m", "pytest", + "--runxfail", "--tb=no", "-q", testname]) + # OpenMPI requires an explicit flag for oversubscription. We need it as some + # of the MPI tests will spawn lots of processes + if mpi_distro == 'OpenMPI': + call = [mpi_exec, '--oversubscribe', '--timeout', '300'] + args + else: + call = [mpi_exec] + args + + # Tell the MPI ranks that they are running a parallel test + os.environ['DEVITO_MPI'] = scheme + try: + check_call(call) + res = True + except: + res = False + finally: + os.environ['DEVITO_MPI'] = '0' + return res def pytest_configure(config): @@ -182,55 +179,45 @@ def pytest_configure(config): ) -def pytest_runtest_setup(item): - partest = os.environ.get('DEVITO_MPI', 0) - try: - partest = int(partest) - except ValueError: - pass - if item.get_closest_marker("parallel"): - if MPI is None: - pytest.skip("mpi4py/MPI not installed") - else: - # Blow away function arg in "master" process, to ensure - # this test isn't run on only one process - dummy_test = lambda *args, **kwargs: True - # For pytest <7 - if item.cls is not None: - attr = item.originalname or item.name - setattr(item.cls, attr, dummy_test) - else: - item.obj = dummy_test - # For pytest >= 7 - setattr(item, '_obj', dummy_test) +def pytest_generate_tests(metafunc): + # Process custom parallel marker as a parametrize to avoid + # running a single test for all modes + if 'mode' in metafunc.fixturenames: + markers = metafunc.definition.iter_markers() + for marker in markers: + if marker.name == 'parallel': + mode = list(as_tuple(marker.kwargs.get('mode', 2))) + metafunc.parametrize("mode", mode) +@pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_call(item): partest = os.environ.get('DEVITO_MPI', 0) try: partest = int(partest) except ValueError: pass + if item.get_closest_marker("parallel") and not partest: # Spawn parallel processes to run test - passed = parallel(item) - if not passed: - pytest.fail(f"{item} failed in parallel execution") + outcome = parallel(item, item.funcargs['mode']) + if outcome: + pytest.skip(f"{item} success in parallel") else: - pytest.skip(f"{item}t passed in parallel execution") + pytest.fail(f"{item} failed in parallel") + else: + outcome = yield @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield result = outcome.get_result() - partest = os.environ.get('DEVITO_MPI', 0) try: partest = int(partest) except ValueError: pass - if item.get_closest_marker("parallel") and not partest: if call.when == 'call' and result.outcome == 'skipped': result.outcome = 'passed' diff --git a/tests/test_autotuner.py b/tests/test_autotuner.py index 72233d3fa0..ca1644316c 100644 --- a/tests/test_autotuner.py +++ b/tests/test_autotuner.py @@ -181,7 +181,7 @@ def test_discarding_runs(): @pytest.mark.parallel(mode=[(2, 'diag'), (2, 'full')]) -def test_at_w_mpi(): +def test_at_w_mpi(mode): """Make sure autotuning works in presence of MPI. MPI ranks work in isolation to determine the best block size, locally.""" grid = Grid(shape=(8, 8)) diff --git a/tests/test_benchmark.py b/tests/test_benchmark.py index 2b0988fc33..92ae2b36ed 100644 --- a/tests/test_benchmark.py +++ b/tests/test_benchmark.py @@ -71,7 +71,7 @@ def test_bench(mode, problem, op): @pytest.mark.parallel(mode=2) @switchconfig(profiling='advanced') -def test_run_mpi(): +def test_run_mpi(mode): """ Test the `run` mode over MPI, with all key arguments used. """ diff --git a/tests/test_builtins.py b/tests/test_builtins.py index d086415376..8bb68d976c 100644 --- a/tests/test_builtins.py +++ b/tests/test_builtins.py @@ -92,7 +92,7 @@ def test_assign_subsampled_timefunction(self): assert np.all(f.data == 1) @pytest.mark.parallel(mode=4) - def test_assign_parallel(self): + def test_assign_parallel(self, mode): a = np.arange(64).reshape((8, 8)) grid = Grid(shape=a.shape) @@ -174,7 +174,7 @@ def test_gs_2d_float(self, sigma): assert np.amax(np.abs(sp_smoothed - np.array(dv_smoothed))) <= 1e-5 @pytest.mark.parallel(mode=[(4, 'full')]) - def test_gs_parallel(self): + def test_gs_parallel(self, mode): a = np.arange(64).reshape((8, 8)) grid = Grid(shape=a.shape) @@ -236,7 +236,7 @@ def test_nbl_zero(self): assert np.all(a[:] - np.array(f.data[:]) == 0) @pytest.mark.parallel(mode=4) - def test_if_parallel(self): + def test_if_parallel(self, mode): a = np.arange(36).reshape((6, 6)) grid = Grid(shape=(18, 18)) x, y = grid.dimensions @@ -292,7 +292,7 @@ def test_if_halo(self, ndim, nbl): @pytest.mark.parametrize('nbl', [0, 2]) @pytest.mark.parallel(mode=4) - def test_if_halo_mpi(self, nbl): + def test_if_halo_mpi(self, nbl, mode): """ Test that FD halo is padded as well. """ diff --git a/tests/test_data.py b/tests/test_data.py index 232aff9c97..a0833722fe 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -492,7 +492,7 @@ class TestDataDistributed(object): """ @pytest.mark.parallel(mode=4) - def test_localviews(self): + def test_localviews(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map @@ -520,7 +520,7 @@ def test_localviews(self): assert np.all(u.data_ro_with_halo._local[2] == 0.) @pytest.mark.parallel(mode=4) - def test_trivial_insertion(self): + def test_trivial_insertion(self, mode): grid = Grid(shape=(4, 4)) u = Function(name='u', grid=grid, space_order=0) v = Function(name='v', grid=grid, space_order=1) @@ -536,7 +536,7 @@ def test_trivial_insertion(self): assert np.all(v.data_with_halo._local == 1.) @pytest.mark.parallel(mode=4) - def test_indexing(self): + def test_indexing(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map @@ -567,7 +567,7 @@ def test_indexing(self): assert np.all(u.data[:, 2] == [myrank, myrank]) @pytest.mark.parallel(mode=4) - def test_slicing(self): + def test_slicing(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map @@ -594,7 +594,7 @@ def test_slicing(self): assert u.data[:2, 2:].size == u.data[2:, :2].size == u.data[:2, :2].size == 0 @pytest.mark.parallel(mode=4) - def test_slicing_ns(self): + def test_slicing_ns(self, mode): # Test slicing with a negative step grid = Grid(shape=(4, 4)) x, y = grid.dimensions @@ -619,7 +619,7 @@ def test_slicing_ns(self): assert np.all(u.data == [[5, 4], [1, 0]]) @pytest.mark.parallel(mode=4) - def test_getitem(self): + def test_getitem(self, mode): # __getitem__ mpi slicing tests: grid = Grid(shape=(8, 8)) x, y = grid.dimensions @@ -697,7 +697,7 @@ def test_getitem(self): assert np.all(result4 == [[28, 27, 26]]) @pytest.mark.parallel(mode=4) - def test_big_steps(self): + def test_big_steps(self, mode): # Test slicing with a step size > 1 grid = Grid(shape=(8, 8)) x, y = grid.dimensions @@ -749,7 +749,7 @@ def test_big_steps(self): assert np.all(r3 == [[0]]) @pytest.mark.parallel(mode=4) - def test_setitem(self): + def test_setitem(self, mode): # __setitem__ mpi slicing tests grid = Grid(shape=(12, 12)) x, y = grid.dimensions @@ -810,7 +810,7 @@ def test_setitem(self): [0, 0, 0, 0, 0, 0]]) @pytest.mark.parallel(mode=4) - def test_hd_slicing(self): + def test_hd_slicing(self, mode): # Test higher dimension slices grid = Grid(shape=(4, 4, 4)) x, y, z = grid.dimensions @@ -889,7 +889,7 @@ def test_hd_slicing(self): [63]]) @pytest.mark.parallel(mode=4) - def test_niche_slicing(self): + def test_niche_slicing(self, mode): grid0 = Grid(shape=(8, 8)) x0, y0 = grid0.dimensions glb_pos_map0 = grid0.distributor.glb_pos_map @@ -1029,7 +1029,7 @@ def test_niche_slicing(self): ((8, 8, 8), (slice(None, None, 1), 5, slice(None, None, 1)), (slice(None, None, 1), 1, slice(None, None, 1)), (slice(None, None, 1), 7, slice(None, None, 1)))]) - def test_niche_slicing2(self, shape, slice0, slice1, slice2): + def test_niche_slicing2(self, shape, slice0, slice1, slice2, mode): grid = Grid(shape=shape) f = Function(name='f', grid=grid) f.data[:] = 1 @@ -1063,7 +1063,7 @@ def test_empty_slicing(self): assert(g.data[1:1, 0:0, 1:1].shape == (0, 0, 0)) @pytest.mark.parallel(mode=4) - def test_neg_start_stop(self): + def test_neg_start_stop(self, mode): grid0 = Grid(shape=(8, 8)) f = Function(name='f', grid=grid0, space_order=0, dtype=np.int32) dat = np.arange(64, dtype=np.int32) @@ -1094,7 +1094,7 @@ def test_neg_start_stop(self): assert np.count_nonzero(h.data[:]) == 0 @pytest.mark.parallel(mode=4) - def test_indexing_in_views(self): + def test_indexing_in_views(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions glb_pos_map = grid.distributor.glb_pos_map @@ -1158,7 +1158,7 @@ def test_indexing_in_views(self): assert view2.size == 0 @pytest.mark.parallel(mode=4) - def test_from_replicated_to_distributed(self): + def test_from_replicated_to_distributed(self, mode): shape = (4, 4) grid = Grid(shape=shape) x, y = grid.dimensions @@ -1207,7 +1207,7 @@ def test_from_replicated_to_distributed(self): assert False @pytest.mark.parallel(mode=4) - def test_misc_setup(self): + def test_misc_setup(self, mode): """Test setup of Functions with mixed distributed/replicated Dimensions.""" grid = Grid(shape=(4, 4)) _, y = grid.dimensions @@ -1248,7 +1248,7 @@ def test_misc_setup(self): assert True @pytest.mark.parallel(mode=4) - def test_misc_data(self): + def test_misc_data(self, mode): """ Test data insertion/indexing for Functions with mixed distributed/replicated Dimensions. @@ -1294,7 +1294,7 @@ def test_misc_data(self): (slice(None, None, -1), slice(0, 1, 1), slice(None, None, -1)), (0, slice(None, None, -1), slice(None, None, -1)), (slice(0, 1, 1), slice(None, None, -1), slice(None, None, -1))]) - def test_inversions(self, gslice): + def test_inversions(self, gslice, mode): """ Test index flipping along different axes.""" nx = 8 ny = 8 @@ -1337,7 +1337,7 @@ def test_inversions(self, gslice): assert res.shape == vdat[tuple(sl)].shape @pytest.mark.parallel(mode=4) - def test_setitem_shorthands(self): + def test_setitem_shorthands(self, mode): # Test setitem with various slicing shorthands nx = 8 ny = 8 @@ -1387,7 +1387,7 @@ class TestDataGather(object): @pytest.mark.parallel(mode=4) @pytest.mark.parametrize('rank', [0, 1, 2, 3]) - def test_simple_gather(self, rank): + def test_simple_gather(self, rank, mode): """ Test a simple gather on various ranks.""" grid = Grid(shape=(10, 10), extent=(9, 9)) f = Function(name='f', grid=grid, dtype=np.int32) @@ -1408,7 +1408,7 @@ def test_simple_gather(self, rank): (None, None, -2), (1, 8, 3), ((0, 4), None, (2, 1))]) - def test_sliced_gather_2D(self, start, stop, step): + def test_sliced_gather_2D(self, start, stop, step, mode): """ Test gather for various 2D slices.""" grid = Grid(shape=(10, 10), extent=(9, 9)) f = Function(name='f', grid=grid, dtype=np.int32) @@ -1442,7 +1442,7 @@ def test_sliced_gather_2D(self, start, stop, step): (None, None, -2), (1, 8, 3), ((0, 4, 4), None, (2, 1, 1))]) - def test_sliced_gather_3D(self, start, stop, step): + def test_sliced_gather_3D(self, start, stop, step, mode): """ Test gather for various 3D slices.""" grid = Grid(shape=(10, 10, 10), extent=(9, 9, 9)) f = Function(name='f', grid=grid, dtype=np.int32) @@ -1469,7 +1469,7 @@ def test_sliced_gather_3D(self, start, stop, step): assert ans == np.array(None) @pytest.mark.parallel(mode=[4, 6]) - def test_gather_time_function(self): + def test_gather_time_function(self, mode): """ Test gathering of TimeFunction objects. """ grid = Grid(shape=(11, 11)) f = TimeFunction(name='f', grid=grid, save=11) diff --git a/tests/test_dle.py b/tests/test_dle.py index b2896dac4f..42e52297c7 100644 --- a/tests/test_dle.py +++ b/tests/test_dle.py @@ -148,7 +148,7 @@ def test_cache_blocking_structure_subdims(): @pytest.mark.parallel(mode=[(1, 'full')]) # Shortcut to put loops in nested efuncs -def test_cache_blocking_structure_distributed(): +def test_cache_blocking_structure_distributed(mode): """ Test cache blocking in multiple nested elemental functions. """ diff --git a/tests/test_dse.py b/tests/test_dse.py index 9435d58c54..871a575fce 100644 --- a/tests/test_dse.py +++ b/tests/test_dse.py @@ -2801,7 +2801,7 @@ def test_fullopt(self): @switchconfig(profiling='advanced') @pytest.mark.parallel(mode=[(1, 'full')]) - def test_fullopt_w_mpi(self): + def test_fullopt_w_mpi(self, mode): tti_noopt = self.tti_operator(opt=None) rec0, u0, v0, _ = tti_noopt.forward() tti_agg = self.tti_operator(opt='advanced') diff --git a/tests/test_gpu_common.py b/tests/test_gpu_common.py index 7c06ce9bf1..1204eb2c13 100644 --- a/tests/test_gpu_common.py +++ b/tests/test_gpu_common.py @@ -1184,7 +1184,7 @@ def test_streaming_split_noleak(self): @pytest.mark.skip(reason="Unsupported MPI + .dx when streaming backwards") @pytest.mark.parallel(mode=4) @switchconfig(safe_math=True) # Or NVC will crash - def test_streaming_w_mpi(self): + def test_streaming_w_mpi(self, mode): nt = 5 grid = Grid(shape=(16, 16)) @@ -1382,7 +1382,7 @@ def test_deviceid(self): @skipif('device-openmp') @pytest.mark.parallel(mode=1) - def test_deviceid_w_mpi(self): + def test_deviceid_w_mpi(self, mode): self.check_deviceid() def test_devicerm(self): @@ -1503,7 +1503,7 @@ def test_empty_arrays(self): @skipif('device-openmp') @pytest.mark.parallel(mode=4) - def test_degenerate_subdomainset(self): + def test_degenerate_subdomainset(self, mode): """ MFE for issue #1766 """ diff --git a/tests/test_gpu_openacc.py b/tests/test_gpu_openacc.py index 5bb9424b86..c3056cb5cf 100644 --- a/tests/test_gpu_openacc.py +++ b/tests/test_gpu_openacc.py @@ -248,7 +248,7 @@ def test_iso_acoustic(self, opt): class TestMPI(object): @pytest.mark.parallel(mode=2) - def test_basic(self): + def test_basic(self, mode): grid = Grid(shape=(6, 6)) x, y = grid.dimensions t = grid.stepping_dim @@ -276,5 +276,5 @@ def test_basic(self): [11., 16., 17., 17., 16., 11.]]) @pytest.mark.parallel(mode=2) - def test_iso_ac(self): + def test_iso_ac(self, mode): TestOperator().iso_acoustic(opt='advanced') diff --git a/tests/test_gpu_openmp.py b/tests/test_gpu_openmp.py index ebda431a37..5f500c02c3 100644 --- a/tests/test_gpu_openmp.py +++ b/tests/test_gpu_openmp.py @@ -24,7 +24,7 @@ def test_init_omp_env(self): 'if (deviceid != -1)\n{\n omp_set_default_device(deviceid);\n}' @pytest.mark.parallel(mode=1) - def test_init_omp_env_w_mpi(self): + def test_init_omp_env_w_mpi(self, mode): grid = Grid(shape=(3, 3, 3)) u = TimeFunction(name='u', grid=grid) @@ -321,7 +321,7 @@ def test_iso_acoustic(self, opt): class TestMPI(object): @pytest.mark.parallel(mode=[2, 4]) - def test_mpi_nocomms(self): + def test_mpi_nocomms(self, mode): grid = Grid(shape=(3, 3, 3)) u = TimeFunction(name='u', grid=grid, dtype=np.int32) @@ -337,5 +337,5 @@ def test_mpi_nocomms(self): assert np.all(np.array(u.data[0, :, :, :]) == time_steps) @pytest.mark.parallel(mode=[2, 4]) - def test_iso_ac(self): + def test_iso_ac(self, mode): TestOperator().iso_acoustic(opt='advanced') diff --git a/tests/test_linearize.py b/tests/test_linearize.py index 7f87eecedd..b236170e9d 100644 --- a/tests/test_linearize.py +++ b/tests/test_linearize.py @@ -31,7 +31,7 @@ def test_basic(): @pytest.mark.parallel(mode=[(1, 'basic'), (1, 'diag2'), (1, 'full')]) -def test_mpi(): +def test_mpi(mode): grid = Grid(shape=(4, 4)) u = TimeFunction(name='u', grid=grid, space_order=2) @@ -153,15 +153,13 @@ def test_interpolation_msf(): @pytest.mark.parallel(mode=[(1, 'diag2')]) -def test_codegen_quality0(): +def test_codegen_quality0(mode): grid = Grid(shape=(4, 4)) - u = TimeFunction(name='u', grid=grid, space_order=2) eqn = Eq(u.forward, u.dx2 + 1.) op = Operator(eqn, opt=('advanced', {'linearize': True})) - assert 'uL0' in str(op) exprs = FindNodes(Expression).visit(op) @@ -172,6 +170,7 @@ def test_codegen_quality0(): # for the efunc args # (the other three obviously are _POSIX_C_SOURCE, START, STOP) assert len(op._headers) == 6 + return "bonjour" def test_codegen_quality1(): diff --git a/tests/test_mpi.py b/tests/test_mpi.py index d6cb431a90..dabd82dda7 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -23,7 +23,7 @@ class TestDistributor(object): @pytest.mark.parallel(mode=[2, 4]) - def test_partitioning(self): + def test_partitioning(self, mode): grid = Grid(shape=(15, 15)) f = Function(name='f', grid=grid) @@ -37,7 +37,7 @@ def test_partitioning(self): assert distributor.nprocs_local == distributor.nprocs @pytest.mark.parallel(mode=[2, 4]) - def test_partitioning_fewer_dims(self): + def test_partitioning_fewer_dims(self, mode): """Test domain decomposition for Functions defined over a strict subset of grid-decomposed dimensions.""" size_x, size_y = 16, 16 @@ -55,7 +55,7 @@ def test_partitioning_fewer_dims(self): assert f.shape == expected[distributor.nprocs][distributor.myrank] @pytest.mark.parallel(mode=[2, 4]) - def test_partitioning_fewer_dims_timefunc(self): + def test_partitioning_fewer_dims_timefunc(self, mode): """Test domain decomposition for Functions defined over a strict subset of grid-decomposed dimensions.""" size_x, size_y = 16, 16 @@ -80,7 +80,7 @@ def test_partitioning_fewer_dims_timefunc(self): assert f.shape[1:] == expected[distributor.nprocs][distributor.myrank] @pytest.mark.parallel(mode=9) - def test_neighborhood_horizontal_2d(self): + def test_neighborhood_horizontal_2d(self, mode): grid = Grid(shape=(3, 3)) x, y = grid.dimensions @@ -111,7 +111,7 @@ def test_neighborhood_horizontal_2d(self): assert expected[distributor.myrank][y] == distributor.neighborhood[y] @pytest.mark.parallel(mode=9) - def test_neighborhood_diagonal_2d(self): + def test_neighborhood_diagonal_2d(self, mode): grid = Grid(shape=(3, 3)) x, y = grid.dimensions @@ -142,7 +142,7 @@ def test_neighborhood_diagonal_2d(self): for i in [(LEFT, LEFT), (LEFT, RIGHT), (RIGHT, LEFT), (RIGHT, RIGHT)]) @pytest.mark.parallel(mode=[2, 4]) - def test_ctypes_neighborhood(self): + def test_ctypes_neighborhood(self, mode): grid = Grid(shape=(4, 4)) distributor = grid.distributor @@ -163,7 +163,7 @@ def test_ctypes_neighborhood(self): assert all(getattr(value._obj, k) == v for k, v in mapper.items()) @pytest.mark.parallel(mode=[4]) - def test_custom_topology(self): + def test_custom_topology(self, mode): grid = Grid(shape=(15, 15)) f = Function(name='f', grid=grid) @@ -222,7 +222,7 @@ def test_custom_topology(self): (256, ('*', 32, 2), (4, 32, 2)), ]) @pytest.mark.parallel(mode=[2]) - def test_custom_topology_v2(self, comm_size, topology, dist_topology): + def test_custom_topology_v2(self, comm_size, topology, dist_topology, mode): dummy_comm = Bunch(size=comm_size) custom_topology = CustomTopology(topology, dummy_comm) assert custom_topology == dist_topology @@ -231,7 +231,7 @@ def test_custom_topology_v2(self, comm_size, topology, dist_topology): class TestFunction(object): @pytest.mark.parallel(mode=2) - def test_halo_exchange_bilateral(self): + def test_halo_exchange_bilateral(self, mode): """ Test halo exchange between two processes organised in a 2x1 cartesian grid. @@ -282,7 +282,7 @@ def test_halo_exchange_bilateral(self): ((1, 0), (0, 1)), ]) @pytest.mark.parallel(mode=2) - def test_halo_exchange_bilateral_asymmetric(self, paddings): + def test_halo_exchange_bilateral_asymmetric(self, paddings, mode): """ Test halo exchange between two processes organised in a 2x1 cartesian grid. @@ -332,7 +332,7 @@ def test_halo_exchange_bilateral_asymmetric(self, paddings): assert np.all(f._data_ro_with_inhalo[:, -2:] == 0.) @pytest.mark.parallel(mode=4) - def test_halo_exchange_quadrilateral(self): + def test_halo_exchange_quadrilateral(self, mode): """ Test halo exchange between four processes organised in a 2x2 cartesian grid. @@ -412,7 +412,7 @@ def test_halo_exchange_quadrilateral(self): ((15, 15), [((0, 8), (0, 8)), ((0, 8), (8, 15)), ((8, 15), (0, 8)), ((8, 15), (8, 15))]), ]) - def test_local_indices(self, shape, expected): + def test_local_indices(self, shape, expected, mode): grid = Grid(shape=shape) f = Function(name='f', grid=grid) @@ -421,7 +421,7 @@ def test_local_indices(self, shape, expected): @pytest.mark.parallel(mode=4) @pytest.mark.parametrize('shape', [(1,), (2, 3), (4, 5, 6)]) - def test_mpi4py_nodevmpi(self, shape): + def test_mpi4py_nodevmpi(self, shape, mode): with switchconfig(mpi=False): # Mimic external mpi init @@ -443,7 +443,7 @@ class TestSparseFunction(object): ((8, ), ((1.,), (3.,), (5.,), (7.,)), 1), ((8, ), ((1.,), (2.,), (3.,), (4.,), (5.,), (6.,), (7.,), (8.,)), 2) ]) - def test_ownership(self, shape, coords, points): + def test_ownership(self, shape, coords, points, mode): """Given a sparse point ``p`` with known coordinates, this test checks that the MPI rank owning ``p`` is retrieved correctly.""" grid = Grid(shape=shape, extent=shape) @@ -463,7 +463,7 @@ def test_ownership(self, shape, coords, points): ([(1.5, 1.5), ], [[], [], [], [0.]], [(slice(0, -1), ), (slice(0, -1), ), (slice(0, -1), ), (slice(0, 1), )]) ]) - def test_local_indices(self, coords, expected, expectedinds): + def test_local_indices(self, coords, expected, expectedinds, mode): grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) data = np.array([0., 1., 2., 3.]) @@ -483,7 +483,7 @@ def test_local_indices(self, coords, expected, expectedinds): assert sf.local_indices == expectedinds @pytest.mark.parallel(mode=4) - def test_scatter_gather(self): + def test_scatter_gather(self, mode): """ Test scattering and gathering of sparse data from and to a single MPI rank. @@ -533,7 +533,7 @@ def test_scatter_gather(self): @pytest.mark.parallel(mode=4) @switchconfig(condition=isinstance(configuration['compiler'], (OneapiCompiler)), safe_math=True) - def test_sparse_coords(self): + def test_sparse_coords(self, mode): grid = Grid(shape=(21, 31, 21), extent=(20, 30, 20)) x, y, z = grid.dimensions @@ -556,7 +556,7 @@ def test_sparse_coords(self): assert sf.data[i] == coords_loc @pytest.mark.parallel(mode=4) - def test_sparse_coords_issue1823(self): + def test_sparse_coords_issue1823(self, mode): grid = Grid((101, 101, 101), extent=(1000, 1000, 1000)) coords = np.array([[1000., 0., 900.], [1000., 300., 700.], [1000., 500., 500.], [1000., 700., 300.], @@ -573,7 +573,7 @@ def test_sparse_coords_issue1823(self): @pytest.mark.parallel(mode=4) @pytest.mark.parametrize('r', [2]) - def test_precomputed_sparse(self, r): + def test_precomputed_sparse(self, r, mode): grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) coords = np.array([(1.0, 1.0), (2.0, 2.0), (1.0, 2.0), (2.0, 1.0)]) @@ -601,7 +601,7 @@ def test_precomputed_sparse(self, r): assert np.all(sf1.data == 4) @pytest.mark.parallel(mode=4) - def test_sparse_first(self): + def test_sparse_first(self, mode): """ Tests custom sprase function with sparse dimension as first index. """ @@ -639,7 +639,7 @@ class SparseFirst(SparseFunction): assert np.allclose(s.data, expected) @pytest.mark.parallel(mode=[(4, 'diag2')]) - def test_no_grid_dim_slow(self): + def test_no_grid_dim_slow(self, mode): shape = (12, 13, 14) nfreq = 5 nrec = 2 @@ -669,7 +669,7 @@ class CoordSlowSparseFunction(SparseFunction): assert np.all(s.data == 1) @pytest.mark.parallel(mode=4) - def test_no_grid_dim_slow_time(self): + def test_no_grid_dim_slow_time(self, mode): shape = (12, 13, 14) nfreq = 5 nrec = 2 @@ -702,7 +702,7 @@ class CoordSlowSparseFunction(SparseTimeFunction): class TestOperatorSimple(object): @pytest.mark.parallel(mode=[2, 4, 8]) - def test_trivial_eq_1d(self): + def test_trivial_eq_1d(self, mode): grid = Grid(shape=(32,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -724,7 +724,7 @@ def test_trivial_eq_1d(self): assert np.all(f.data_ro_domain[0] == 7.) @pytest.mark.parallel(mode=[2]) - def test_trivial_eq_1d_asymmetric(self): + def test_trivial_eq_1d_asymmetric(self, mode): grid = Grid(shape=(32,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -743,7 +743,7 @@ def test_trivial_eq_1d_asymmetric(self): assert f.data_ro_domain[0, -1] == 2. @pytest.mark.parallel(mode=2) - def test_trivial_eq_1d_save(self): + def test_trivial_eq_1d_save(self, mode): grid = Grid(shape=(32,)) x = grid.dimensions[0] time = grid.time_dim @@ -765,7 +765,7 @@ def test_trivial_eq_1d_save(self): @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'diag'), (4, 'overlap'), (4, 'overlap2'), (4, 'diag2'), (4, 'full')]) - def test_trivial_eq_2d(self): + def test_trivial_eq_2d(self, mode): grid = Grid(shape=(8, 8,)) x, y = grid.dimensions t = grid.stepping_dim @@ -801,7 +801,7 @@ def test_trivial_eq_2d(self): @pytest.mark.parallel(mode=[(8, 'basic'), (8, 'diag'), (8, 'overlap'), (8, 'overlap2'), (8, 'diag2'), (8, 'full')]) - def test_trivial_eq_3d(self): + def test_trivial_eq_3d(self, mode): grid = Grid(shape=(8, 8, 8)) x, y, z = grid.dimensions t = grid.stepping_dim @@ -842,7 +842,7 @@ def test_trivial_eq_3d(self): assert np.all(f.data_ro_domain[0, 1:-1, 1:-1, 1:-1] == interior) @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'diag')]) - def test_multiple_eqs_funcs(self): + def test_multiple_eqs_funcs(self, mode): grid = Grid(shape=(12,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -874,7 +874,7 @@ def test_multiple_eqs_funcs(self): assert calls[0].ncomps == 2 @pytest.mark.parallel(mode=2) - def test_reapply_with_different_functions(self): + def test_reapply_with_different_functions(self, mode): grid1 = Grid(shape=(30, 30, 30)) f1 = Function(name='f', grid=grid1, space_order=4) @@ -894,7 +894,7 @@ def test_reapply_with_different_functions(self): class TestCodeGeneration(object): @pytest.mark.parallel(mode=1) - def test_avoid_haloupdate_as_nostencil_basic(self): + def test_avoid_haloupdate_as_nostencil_basic(self, mode): grid = Grid(shape=(12,)) f = TimeFunction(name='f', grid=grid) @@ -907,7 +907,7 @@ def test_avoid_haloupdate_as_nostencil_basic(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_avoid_haloupdate_as_nostencil_advanced(self): + def test_avoid_haloupdate_as_nostencil_advanced(self, mode): grid = Grid(shape=(4, 4)) u = TimeFunction(name='u', grid=grid, space_order=4, time_order=2, save=None) v = TimeFunction(name='v', grid=grid, space_order=0, time_order=0, save=5) @@ -927,7 +927,7 @@ def test_avoid_haloupdate_as_nostencil_advanced(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_avoid_redundant_haloupdate(self): + def test_avoid_redundant_haloupdate(self, mode): grid = Grid(shape=(12,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -946,7 +946,7 @@ def test_avoid_redundant_haloupdate(self): assert len(calls) == 1 @pytest.mark.parallel(mode=1) - def test_avoid_haloupdate_if_distr_but_sequential(self): + def test_avoid_haloupdate_if_distr_but_sequential(self, mode): grid = Grid(shape=(12,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -970,7 +970,7 @@ def test_avoid_haloupdate_if_distr_but_sequential(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_avoid_haloupdate_with_subdims(self): + def test_avoid_haloupdate_with_subdims(self, mode): grid = Grid(shape=(4,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -996,7 +996,7 @@ def test_avoid_haloupdate_with_subdims(self): assert len(calls) == 1 @pytest.mark.parallel(mode=1) - def test_avoid_haloupdate_with_constant_index(self): + def test_avoid_haloupdate_with_constant_index(self, mode): grid = Grid(shape=(4,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1010,7 +1010,7 @@ def test_avoid_haloupdate_with_constant_index(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_do_haloupdate_with_constant_locindex(self): + def test_do_haloupdate_with_constant_locindex(self, mode): """ Like `test_avoid_haloupdate_with_constant_index`, there is again a constant index, but this time along a loc-index (`t` Dimension), @@ -1029,7 +1029,7 @@ def test_do_haloupdate_with_constant_locindex(self): assert len(calls) == 1 @pytest.mark.parallel(mode=1) - def test_hoist_haloupdate_if_no_flowdep(self): + def test_hoist_haloupdate_if_no_flowdep(self, mode): grid = Grid(shape=(12,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1055,7 +1055,7 @@ def test_hoist_haloupdate_if_no_flowdep(self): assert len(calls) == 2 @pytest.mark.parallel(mode=1) - def test_hoist_haloupdate_with_subdims(self): + def test_hoist_haloupdate_with_subdims(self, mode): """ This test stems from https://github.com/devitocodes/devito/issues/1119 @@ -1088,7 +1088,7 @@ def test_hoist_haloupdate_with_subdims(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_hoist_haloupdate_from_innerloop(self): + def test_hoist_haloupdate_from_innerloop(self, mode): grid = Grid(shape=(4, 4, 4)) x, y, z = grid.dimensions @@ -1108,7 +1108,7 @@ def test_hoist_haloupdate_from_innerloop(self): assert op.body.body[-1].body[1].body[0].body[0].body[1].is_Iteration @pytest.mark.parallel(mode=2) - def test_unhoist_haloupdate_if_invariant(self): + def test_unhoist_haloupdate_if_invariant(self, mode): """ Test an Operator that computes coupled equations in which the first one *does require* a halo update on a Dimension-invariant Function. @@ -1140,7 +1140,7 @@ def test_unhoist_haloupdate_if_invariant(self): assert np.allclose(f.data_ro_domain[5:], [67., 67., 62., 56., 30.], rtol=R) @pytest.mark.parallel(mode=[(2, 'basic'), (2, 'diag')]) - def test_redo_haloupdate_due_to_antidep(self): + def test_redo_haloupdate_due_to_antidep(self, mode): grid = Grid(shape=(12,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1163,7 +1163,7 @@ def test_redo_haloupdate_due_to_antidep(self): assert np.all(g.data_ro_domain[1, :-1] == 2.) @pytest.mark.parallel(mode=[(1, 'full')]) - def test_avoid_fullmode_if_crossloop_dep(self): + def test_avoid_fullmode_if_crossloop_dep(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions @@ -1184,7 +1184,7 @@ def test_avoid_fullmode_if_crossloop_dep(self): assert np.all(f.data[:] == 2.) @pytest.mark.parallel(mode=2) - def test_avoid_haloudate_if_flowdep_along_other_dim(self): + def test_avoid_haloudate_if_flowdep_along_other_dim(self, mode): grid = Grid(shape=(10,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1219,7 +1219,7 @@ def test_avoid_haloudate_if_flowdep_along_other_dim(self): assert np.allclose(g.data_ro_domain[0, 5:], [4.8, 4.8, 4.8, 4.8, 2.], rtol=R) @pytest.mark.parallel(mode=2) - def test_unmerge_haloupdate_if_no_locindices(self): + def test_unmerge_haloupdate_if_no_locindices(self, mode): grid = Grid(shape=(10,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1260,7 +1260,7 @@ def test_unmerge_haloupdate_if_no_locindices(self): assert np.allclose(g.data_ro_domain[0, 5:], [16., 16., 14., 13., 6.], rtol=R) @pytest.mark.parallel(mode=1) - def test_merge_haloupdate_if_diff_locindices_v0(self): + def test_merge_haloupdate_if_diff_locindices_v0(self, mode): grid = Grid(shape=(101, 101)) x, y = grid.dimensions t = grid.stepping_dim @@ -1281,7 +1281,7 @@ def test_merge_haloupdate_if_diff_locindices_v0(self): op.cfunction @pytest.mark.parallel(mode=2) - def test_merge_haloupdate_if_diff_locindices_v1(self): + def test_merge_haloupdate_if_diff_locindices_v1(self, mode): """ This test is a revisited, more complex version of `test_merge_haloupdate_if_diff_locindices_v0`. And in addition to @@ -1335,7 +1335,7 @@ def test_merge_haloupdate_if_diff_locindices_v1(self): @pytest.mark.parallel(mode=1) @switchconfig(autopadding=True) - def test_process_but_avoid_haloupdate_along_replicated(self): + def test_process_but_avoid_haloupdate_along_replicated(self, mode): dx = Dimension(name='dx') grid = Grid(shape=(10, 10)) x, y = grid.dimensions @@ -1357,7 +1357,7 @@ def test_process_but_avoid_haloupdate_along_replicated(self): assert calls[0].arguments[0] is u @pytest.mark.parallel(mode=1) - def test_conditional_dimension(self): + def test_conditional_dimension(self, mode): """ Test the case of Functions in the condition of a ConditionalDimension. """ @@ -1381,7 +1381,7 @@ def test_conditional_dimension(self): assert len(calls) == 0 @pytest.mark.parallel(mode=1) - def test_conditional_dimension_v2(self): + def test_conditional_dimension_v2(self, mode): """ Make sure optimizations don't move around halo exchanges if embedded within conditionals. @@ -1414,7 +1414,7 @@ def test_conditional_dimension_v2(self): ('f[t,x-1,y-1] + f[t,x+1,y+1]', {'cr', 'rr', 'rc', 'cl', 'll', 'lc'}), ]) @pytest.mark.parallel(mode=[(1, 'diag')]) - def test_diag_comm_scheme(self, expr, expected): + def test_diag_comm_scheme(self, expr, expected, mode): """ Check that the 'diag' mode does not generate more communications than strictly necessary. @@ -1432,7 +1432,7 @@ def test_diag_comm_scheme(self, expr, expected): assert destinations == expected @pytest.mark.parallel(mode=[(1, 'full')]) - def test_poke_progress(self): + def test_poke_progress(self, mode): grid = Grid(shape=(4, 4)) x, y = grid.dimensions t = grid.stepping_dim @@ -1485,7 +1485,7 @@ def test_poke_progress(self): assert call._single_thread @pytest.mark.parallel(mode=[(1, 'diag2')]) - def test_diag2_quality(self): + def test_diag2_quality(self, mode): grid = Grid(shape=(10, 10, 10)) f = TimeFunction(name='f', grid=grid, space_order=2) @@ -1509,7 +1509,7 @@ def test_diag2_quality(self): (1, 'diag2'), (1, 'full'), ]) - def test_min_code_size(self): + def test_min_code_size(self, mode): grid = Grid(shape=(10, 10, 10)) f = TimeFunction(name='f', grid=grid, space_order=2) @@ -1550,7 +1550,7 @@ def test_min_code_size(self): assert len(FindNodes(ComputeCall).visit(op)) == 1 @pytest.mark.parallel(mode=[(1, 'diag2')]) - def test_many_functions(self): + def test_many_functions(self, mode): grid = Grid(shape=(10, 10, 10)) eqns = [] @@ -1570,7 +1570,7 @@ def test_many_functions(self): @pytest.mark.parallel(mode=[ (1, 'full'), ]) - def test_profiled_regions(self): + def test_profiled_regions(self, mode): grid = Grid(shape=(10, 10, 10)) f = TimeFunction(name='f', grid=grid, space_order=2) @@ -1584,7 +1584,7 @@ def test_profiled_regions(self): 'remainder0', 'compute0'] @pytest.mark.parallel(mode=1) - def test_enforce_haloupdate_if_unwritten_function(self): + def test_enforce_haloupdate_if_unwritten_function(self, mode): grid = Grid(shape=(16, 16)) u = TimeFunction(name='u', grid=grid) @@ -1607,7 +1607,7 @@ def test_enforce_haloupdate_if_unwritten_function(self): class TestOperatorAdvanced(object): @pytest.mark.parallel(mode=4) - def test_injection_wodup(self): + def test_injection_wodup(self, mode): """ Test injection operator when the sparse points don't need to be replicated ("wodup" -> w/o duplication) over multiple MPI ranks. @@ -1640,7 +1640,7 @@ def test_injection_wodup(self): @pytest.mark.parallel(mode=4) @switchconfig(condition=isinstance(configuration['compiler'], (OneapiCompiler)), safe_math=True) - def test_injection_wodup_wtime(self): + def test_injection_wodup_wtime(self, mode): """ Just like ``test_injection_wodup``, but using a SparseTimeFunction instead of a SparseFunction. Hence, the data scattering/gathering now @@ -1666,7 +1666,7 @@ def test_injection_wodup_wtime(self): assert np.all(f.data[2] == 3.25) @pytest.mark.parallel(mode=4) - def test_injection_dup(self): + def test_injection_dup(self, mode): """ Test injection operator when the sparse points are replicated over multiple MPI ranks. @@ -1721,7 +1721,7 @@ def test_injection_dup(self): assert np.all(f.data_ro_domain == [[3.75, 1.25], [1.25, 0.]]) @pytest.mark.parallel(mode=4) - def test_interpolation_wodup(self): + def test_interpolation_wodup(self, mode): grid = Grid(shape=(4, 4), extent=(3.0, 3.0)) f = Function(name='f', grid=grid, space_order=1) @@ -1748,7 +1748,7 @@ def test_interpolation_wodup(self): assert np.all(sf.data == 4.) @pytest.mark.parallel(mode=4) - def test_interpolation_dup(self): + def test_interpolation_dup(self, mode): """ Test interpolation operator when the sparse points are replicated over multiple MPI ranks. @@ -1800,7 +1800,7 @@ def test_interpolation_dup(self): assert np.all(sf.data == [1.5, 2.5, 2.5, 3.5][grid.distributor.myrank]) @pytest.mark.parallel(mode=2) - def test_subsampling(self): + def test_subsampling(self, mode): grid = Grid(shape=(40,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1837,7 +1837,7 @@ def test_subsampling(self): assert len(FindNodes(Call).visit(conditional[0])) == 0 @pytest.mark.parallel(mode=2) - def test_arguments_subrange(self): + def test_arguments_subrange(self, mode): """ Test op.apply when a subrange is specified for a distributed dimension. """ @@ -1858,7 +1858,7 @@ def test_arguments_subrange(self): assert np.all(f.data_ro_domain[1, -4:] == 0.) @pytest.mark.parallel(mode=2) - def test_bcs_basic(self): + def test_bcs_basic(self, mode): """ Test MPI in presence of boundary condition loops. Here, no halo exchange is expected (as there is no stencil in the computed expression) but we @@ -1897,7 +1897,7 @@ def test_bcs_basic(self): assert np.all(u.data_ro_domain[0, -thickness:] == range(2, thickness+2)) @pytest.mark.parallel(mode=2) - def test_interior_w_stencil(self): + def test_interior_w_stencil(self, mode): grid = Grid(shape=(10,)) x = grid.dimensions[0] t = grid.stepping_dim @@ -1916,7 +1916,7 @@ def test_interior_w_stencil(self): assert np.all(u.data_ro_domain[0, :-2] == 3.) @pytest.mark.parallel(mode=4) - def test_misc_dims(self): + def test_misc_dims(self, mode): """ Test MPI in presence of Functions with mixed distributed/replicated Dimensions, with only a strict subset of the Grid dimensions used. @@ -1959,7 +1959,7 @@ def test_misc_dims(self): assert(np.all(u.data[1, 3, :] == 8.0)) @pytest.mark.parallel(mode=9) - def test_nontrivial_operator(self): + def test_nontrivial_operator(self, mode): """ Test MPI in a non-trivial scenario: :: @@ -2040,7 +2040,7 @@ def test_nontrivial_operator(self): assert np.all(u.data_ro_domain[1] == 3) @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'overlap'), (4, 'full')]) - def test_coupled_eqs_mixed_dims(self): + def test_coupled_eqs_mixed_dims(self, mode): """ Test an Operator that computes coupled equations over partly disjoint sets of Dimensions (e.g., one Eq over [x, y, z], the other Eq over [x, yi, zi]). @@ -2089,7 +2089,7 @@ def test_coupled_eqs_mixed_dims(self): assert np.all(v.data_ro_domain[1, :, 3] == 0.) @pytest.mark.parallel(mode=2) - def test_haloupdate_same_timestep(self): + def test_haloupdate_same_timestep(self, mode): """ Test an Operator that computes coupled equations in which the second one requires a halo update right after the computation of the first one. @@ -2112,7 +2112,7 @@ def test_haloupdate_same_timestep(self): assert np.all(v.data_ro_domain[-1, :, 1:-1] == 6.) @pytest.mark.parallel(mode=2) - def test_haloupdate_same_timestep_v2(self): + def test_haloupdate_same_timestep_v2(self, mode): """ Similar to test_haloupdate_same_timestep, but switching the expression that writes to subsequent time step. Also checks halo update call placement. @@ -2143,7 +2143,7 @@ def test_haloupdate_same_timestep_v2(self): assert np.all(v.data_ro_domain[-1, :, 1:-1] == 6.) @pytest.mark.parallel(mode=4) - def test_haloupdate_multi_op(self): + def test_haloupdate_multi_op(self, mode): """ Test that halo updates are carried out correctly when multiple operators are applied consecutively. @@ -2168,7 +2168,7 @@ def test_haloupdate_multi_op(self): assert (np.isclose(norm(f), 17.24904, atol=1e-4, rtol=0)) @pytest.mark.parallel(mode=1) - def test_haloupdate_issue_1613(self): + def test_haloupdate_issue_1613(self, mode): """ Test the HaloScheme construction and generation when using u.dt2. @@ -2194,7 +2194,7 @@ def test_haloupdate_issue_1613(self): assert dims[0].origin is t @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'diag2'), (4, 'overlap2')]) - def test_cire(self): + def test_cire(self, mode): """ Check correctness when the DSE extracts aliases and places them into offset-ed loop (nest). For example, the compiler may generate: @@ -2236,7 +2236,7 @@ def test_cire(self): assert u0_norm == u1_norm @pytest.mark.parallel(mode=[(4, 'overlap2'), (4, 'diag2')]) - def test_cire_with_shifted_diagonal_halo_touch(self): + def test_cire_with_shifted_diagonal_halo_touch(self, mode): """ Like ``test_cire`` but now the diagonal halos required to compute the aliases are shifted due to the iteration space. Basically, this @@ -2274,7 +2274,7 @@ def test_cire_with_shifted_diagonal_halo_touch(self): {'cire-rotate': True}, # Issue #1490 {'min-storage': True}, # Issue #1491 ]) - def test_cire_options(self, opt_options): + def test_cire_options(self, opt_options, mode): """ MFEs for issues #1490 and #1491. """ @@ -2309,7 +2309,7 @@ def test_cire_options(self, opt_options): assert np.allclose(p.data, p1.data, rtol=10e-11) @pytest.mark.parallel(mode=[(4, 'full')]) - def test_staggering(self): + def test_staggering(self, mode): """ Test MPI in presence of staggered grids. @@ -2339,7 +2339,7 @@ def test_staggering(self): assert np.isclose(norm(uxy), 61427.853, rtol=1.e-4) @pytest.mark.parallel(mode=2) - def test_op_new_dist(self): + def test_op_new_dist(self, mode): """ Test that an operator made with one distributor produces correct results when executed with a different distributor. @@ -2367,7 +2367,7 @@ def test_op_new_dist(self): assert abs(norm(u) - norm(u2)) < 1.e-3 @pytest.mark.parallel(mode=[(4, 'full')]) - def test_misc_subdims(self): + def test_misc_subdims(self, mode): """ Test MPI full mode with an Operator having: @@ -2410,7 +2410,7 @@ def test_misc_subdims(self): assert np.all(u.data[1, :, 1:] == 1.) @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'full')]) - def test_misc_subdims_3D(self): + def test_misc_subdims_3D(self, mode): """ Test `SubDims` in 3D (so that spatial blocking is introduced). @@ -2447,7 +2447,7 @@ def test_misc_subdims_3D(self): assert np.all(u.data[1, :, :, -2:] == 1.) @pytest.mark.parallel(mode=[(4, 'full')]) - def test_custom_subdomain(self): + def test_custom_subdomain(self, mode): """ This test uses a custom SubDomain such that we end up with two loop nests with a data dependence across them inducing two halo exchanges, @@ -2497,7 +2497,7 @@ def define(self, dimensions): assert np.isclose(norm(v), 21.14994, atol=1e-5, rtol=0) @pytest.mark.parallel(mode=2) - def test_overriding_from_different_grid(self): + def test_overriding_from_different_grid(self, mode): """ MFE for issue #1629. """ @@ -2523,7 +2523,7 @@ def test_overriding_from_different_grid(self): assert np.all(u3.data[0, 3:-3, 3:-3] == 1.) @pytest.mark.parallel(mode=4) - def test_fission_due_to_antidep(self): + def test_fission_due_to_antidep(self, mode): grid = Grid(shape=(16, 16, 64), dtype=np.float64) u = TimeFunction(name='u', grid=grid, space_order=4) @@ -2604,7 +2604,7 @@ def norms(self): ((60, 70), 'OT2', 8, False), ]) @pytest.mark.parallel(mode=1) - def test_adjoint_codegen(self, shape, kernel, space_order, save): + def test_adjoint_codegen(self, shape, kernel, space_order, save, mode): solver = acoustic_setup(shape=shape, spacing=[15. for _ in shape], kernel=kernel, tn=500, space_order=space_order, nrec=130, preset='layers-isotropic', dtype=np.float64) @@ -2653,12 +2653,12 @@ def run_adjoint_F(self, nd): @pytest.mark.parametrize('nd', [1, 2, 3]) @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'diag'), (4, 'overlap'), (4, 'overlap2'), (4, 'full')]) - def test_adjoint_F(self, nd): + def test_adjoint_F(self, nd, mode): self.run_adjoint_F(nd) @pytest.mark.parallel(mode=[(8, 'diag2'), (8, 'full')]) @switchconfig(openmp=False) - def test_adjoint_F_no_omp(self): + def test_adjoint_F_no_omp(self, mode): """ ``run_adjoint_F`` with OpenMP disabled. By disabling OpenMP, we can practically scale up to higher process counts. diff --git a/tests/test_operator.py b/tests/test_operator.py index 5698f8f913..82f39bfea4 100644 --- a/tests/test_operator.py +++ b/tests/test_operator.py @@ -1208,7 +1208,7 @@ def test_incomplete_override(self): assert False @pytest.mark.parallel(mode=1) - def test_new_distributor(self): + def test_new_distributor(self, mode): """ Test that `comm` and `nb` are correctly updated when a different distributor from that it was originally built with is required by an operator. diff --git a/tests/test_pickle.py b/tests/test_pickle.py index 1089ca5bf2..cad0e03a63 100644 --- a/tests/test_pickle.py +++ b/tests/test_pickle.py @@ -636,7 +636,7 @@ def test_elemental(self, pickle): assert str(op) == str(new_op) @pytest.mark.parallel(mode=[1]) - def test_mpi_objects(self, pickle): + def test_mpi_objects(self, pickle, mode): grid = Grid(shape=(4, 4, 4)) # Neighbours @@ -684,7 +684,7 @@ def test_threadid(self, pickle): assert tid.symbolic_max.name == new_tid.symbolic_max.name @pytest.mark.parallel(mode=[2]) - def test_mpi_grid(self, pickle): + def test_mpi_grid(self, pickle, mode): grid = Grid(shape=(4, 4, 4)) pkl_grid = pickle.dumps(grid) @@ -704,7 +704,7 @@ def test_mpi_grid(self, pickle): MPI.COMM_WORLD.Barrier() @pytest.mark.parallel(mode=[(1, 'full')]) - def test_mpi_fullmode_objects(self, pickle): + def test_mpi_fullmode_objects(self, pickle, mode): grid = Grid(shape=(4, 4, 4)) x, y, _ = grid.dimensions @@ -743,7 +743,7 @@ def test_mpi_fullmode_objects(self, pickle): assert v[1] == Min(d.symbolic_max, d.symbolic_min) @pytest.mark.parallel(mode=[(1, 'basic'), (1, 'full')]) - def test_mpi_operator(self, pickle): + def test_mpi_operator(self, pickle, mode): grid = Grid(shape=(4,)) f = TimeFunction(name='f', grid=grid) diff --git a/tests/test_sparse.py b/tests/test_sparse.py index 31b6af0fdd..136d1ef9e3 100644 --- a/tests/test_sparse.py +++ b/tests/test_sparse.py @@ -343,7 +343,7 @@ def test_precomputed_subpoints_inject_dt2(self): assert m.data[0, 40, 39] == pytest.approx(2.0) @pytest.mark.parallel(mode=4) - def test_mpi(self): + def test_mpi(self, mode): # Shape chosen to get a source in multiple ranks shape = (91, 91) grid = Grid(shape=shape) @@ -458,7 +458,7 @@ def test_subs(self, sptype): @switchconfig(safe_math=True) @pytest.mark.parallel(mode=[1, 4]) - def test_mpi_no_data(self): + def test_mpi_no_data(self, mode): grid = Grid((11, 11), extent=(10, 10)) time = grid.time_dim # Base object diff --git a/tests/test_subdomains.py b/tests/test_subdomains.py index 61441dea65..839cb2299d 100644 --- a/tests/test_subdomains.py +++ b/tests/test_subdomains.py @@ -168,7 +168,7 @@ def define(self, dimensions): @pytest.mark.parametrize('spec', sd_specs) @pytest.mark.parallel(mode=[2, 3]) - def test_subdomains_mpi(self, spec): + def test_subdomains_mpi(self, spec, mode): class sd0(SubDomain): name = 'd0' @@ -362,7 +362,7 @@ class MySubdomains2(SubDomainSet): assert((np.array(f.data[:]+g.data[:]) == expected).all()) @pytest.mark.parallel(mode=[(4, 'basic'), (4, 'overlap')]) - def test_subdomainset_mpi(self): + def test_subdomainset_mpi(self, mode): n_domains = 5