diff --git a/lib/iris/tests/_shared_utils.py b/lib/iris/tests/_shared_utils.py index 91a2556cf5..207e2dd90a 100644 --- a/lib/iris/tests/_shared_utils.py +++ b/lib/iris/tests/_shared_utils.py @@ -802,7 +802,7 @@ def assert_equal_and_kind(value, expected): @contextlib.contextmanager -def cube_save_test( +def pp_cube_save_test( reference_txt_path, reference_cubes=None, reference_pp_path=None, @@ -827,7 +827,7 @@ def cube_save_test( into which the PP data to be tested should be saved. Example:: - with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path: + with self.pp_cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path: iris.save(cubes, temp_pp_path) """ diff --git a/lib/iris/tests/test_pp_cf.py b/lib/iris/tests/test_pp_cf.py index 8b0af5a5c3..538ce7c385 100644 --- a/lib/iris/tests/test_pp_cf.py +++ b/lib/iris/tests/test_pp_cf.py @@ -3,16 +3,16 @@ # This file is part of Iris and is released under the BSD license. # See LICENSE in the root of the repository for full licensing details. -# import iris tests first so that some things can be initialised before importing anything else -import iris.tests as tests # isort:skip - import os import tempfile +import pytest + import iris import iris.coords from iris.fileformats.netcdf import _thread_safe_nc from iris.fileformats.pp import STASH +from iris.tests import _shared_utils import iris.util @@ -62,10 +62,14 @@ def callback_aaxzc_n10r13xy_b_pp(cube, field, filename): cube.add_aux_coord(height_coord) -@tests.skip_data -class TestAll(tests.IrisTest, tests.PPTest): +@_shared_utils.skip_data +class TestAll: _ref_dir = ("usecases", "pp_to_cf_conversion") + @pytest.fixture(autouse=True) + def _setup(self, request): + self.request = request + def _test_file(self, name): """Main test routine that is called for each of the files listed below.""" pp_path = self._src_pp_path(name) @@ -80,7 +84,9 @@ def _test_file(self, name): else: fname_name = name - self.assertCML(cubes, self._ref_dir + ("from_pp", fname_name + ".cml")) + _shared_utils.assert_CML( + self.request, cubes, self._ref_dir + ("from_pp", fname_name + ".cml") + ) # 2) Save the Cube and check the netCDF nc_filenames = [] @@ -99,7 +105,8 @@ def _test_file(self, name): ) # Check the netCDF file against CDL expected output. - self.assertCDL( + _shared_utils.assert_CDL( + self.request, file_nc, self._ref_dir + ("to_netcdf", "%s_%d.cdl" % (fname_name, index)), ) @@ -109,7 +116,8 @@ def _test_file(self, name): for index, nc_filename in enumerate(nc_filenames): # Read netCDF to Cube. cube = iris.load_cube(nc_filename) - self.assertCML( + _shared_utils.assert_CML( + self.request, cube, self._ref_dir + ("from_netcdf", "%s_%d.cml" % (fname_name, index)), ) @@ -122,15 +130,15 @@ def _test_file(self, name): self._test_pp_save(cubes, name) def _src_pp_path(self, name): - return tests.get_data_path(("PP", "cf_processing", name)) + return _shared_utils.get_data_path(("PP", "cf_processing", name)) def _test_pp_save(self, cubes, name): # If there's no existing reference file then make it from the *source* data - reference_txt_path = tests.get_result_path( + reference_txt_path = _shared_utils.get_result_path( self._ref_dir + ("to_pp", name + ".txt") ) reference_pp_path = self._src_pp_path(name) - with self.cube_save_test( + with _shared_utils.pp_cube_save_test( reference_txt_path, reference_pp_path=reference_pp_path ) as temp_pp_path: iris.save(cubes, temp_pp_path) @@ -187,7 +195,3 @@ def attach_tests(): attach_tests() - - -if __name__ == "__main__": - tests.main() diff --git a/lib/iris/tests/test_pp_module.py b/lib/iris/tests/test_pp_module.py index 3a8e988a4d..72ed851275 100644 --- a/lib/iris/tests/test_pp_module.py +++ b/lib/iris/tests/test_pp_module.py @@ -3,58 +3,54 @@ # This file is part of Iris and is released under the BSD license. # See LICENSE in the root of the repository for full licensing details. -# import iris tests first so that some things can be initialised before importing anything else -import iris.tests as tests # isort:skip - from copy import deepcopy import os from types import GeneratorType -import unittest -from unittest import mock import cftime from numpy.testing import assert_array_equal +import pytest -import iris.fileformats import iris.fileformats.pp as pp -import iris.util +from iris.tests import _shared_utils -@tests.skip_data -class TestPPCopy(tests.IrisTest): - def setUp(self): - self.filename = tests.get_data_path(("PP", "aPPglob1", "global.pp")) +@_shared_utils.skip_data +class TestPPCopy: + @pytest.fixture(autouse=True) + def _setup(self): + self.filename = _shared_utils.get_data_path(("PP", "aPPglob1", "global.pp")) def test_copy_field_deferred(self): field = next(pp.load(self.filename)) clone = field.copy() - self.assertEqual(field, clone) + assert field == clone clone.lbyr = 666 - self.assertNotEqual(field, clone) + assert field != clone def test_deepcopy_field_deferred(self): field = next(pp.load(self.filename)) clone = deepcopy(field) - self.assertEqual(field, clone) + assert field == clone clone.lbyr = 666 - self.assertNotEqual(field, clone) + assert field != clone def test_copy_field_non_deferred(self): field = next(pp.load(self.filename, True)) clone = field.copy() - self.assertEqual(field, clone) + assert field == clone clone.data[0][0] = 666 - self.assertNotEqual(field, clone) + assert field != clone def test_deepcopy_field_non_deferred(self): field = next(pp.load(self.filename, True)) clone = deepcopy(field) - self.assertEqual(field, clone) + assert field == clone clone.data[0][0] = 666 - self.assertNotEqual(field, clone) + assert field != clone -class IrisPPTest(tests.IrisTest): +class IrisPPTest: def check_pp(self, pp_fields, reference_filename): """Checks the given iterable of PPField objects matches the reference file, or creates the reference file if it doesn't exist. @@ -68,11 +64,11 @@ def check_pp(self, pp_fields, reference_filename): pp_field.data test_string = str(pp_fields) - reference_path = tests.get_result_path(reference_filename) + reference_path = _shared_utils.get_result_path(reference_filename) if os.path.isfile(reference_path): with open(reference_path, "r") as reference_fh: reference = "".join(reference_fh.readlines()) - self._assert_str_same( + _shared_utils._assert_str_same( reference + "\n", test_string + "\n", reference_filename, @@ -83,48 +79,49 @@ def check_pp(self, pp_fields, reference_filename): reference_fh.writelines(test_string) -class TestPPHeaderDerived(tests.IrisTest): - def setUp(self): +class TestPPHeaderDerived: + @pytest.fixture(autouse=True) + def _setup(self): self.pp = pp.PPField2() self.pp.lbuser = (0, 1, 2, 3, 4, 5, 6) self.pp.lbtim = 11 self.pp.lbproc = 65539 def test_standard_access(self): - self.assertEqual(self.pp.lbtim, 11) + assert self.pp.lbtim == 11 def test_lbtim_access(self): - self.assertEqual(self.pp.lbtim[0], 1) - self.assertEqual(self.pp.lbtim.ic, 1) + assert self.pp.lbtim[0] == 1 + assert self.pp.lbtim.ic == 1 def test_lbtim_setter(self): self.pp.lbtim[4] = 4 self.pp.lbtim[0] = 4 - self.assertEqual(self.pp.lbtim[0], 4) - self.assertEqual(self.pp.lbtim.ic, 4) + assert self.pp.lbtim[0] == 4 + assert self.pp.lbtim.ic == 4 self.pp.lbtim.ib = 9 - self.assertEqual(self.pp.lbtim.ib, 9) - self.assertEqual(self.pp.lbtim[1], 9) + assert self.pp.lbtim.ib == 9 + assert self.pp.lbtim[1] == 9 def test_set_lbuser(self): self.pp.stash = "m02s12i003" - self.assertEqual(self.pp.stash, pp.STASH(2, 12, 3)) + assert self.pp.stash == pp.STASH(2, 12, 3) self.pp.lbuser[6] = 5 - self.assertEqual(self.pp.stash, pp.STASH(5, 12, 3)) + assert self.pp.stash == pp.STASH(5, 12, 3) self.pp.lbuser[3] = 4321 - self.assertEqual(self.pp.stash, pp.STASH(5, 4, 321)) + assert self.pp.stash == pp.STASH(5, 4, 321) def test_set_stash(self): self.pp.stash = "m02s12i003" - self.assertEqual(self.pp.stash, pp.STASH(2, 12, 3)) + assert self.pp.stash == pp.STASH(2, 12, 3) self.pp.stash = pp.STASH(3, 13, 4) - self.assertEqual(self.pp.stash, pp.STASH(3, 13, 4)) - self.assertEqual(self.pp.lbuser[3], self.pp.stash.lbuser3()) - self.assertEqual(self.pp.lbuser[6], self.pp.stash.lbuser6()) + assert self.pp.stash == pp.STASH(3, 13, 4) + assert self.pp.lbuser[3] == self.pp.stash.lbuser3() + assert self.pp.lbuser[6] == self.pp.stash.lbuser6() - with self.assertRaises(ValueError): + with pytest.raises(ValueError): self.pp.stash = (4, 15, 5) def test_lbproc_bad_access(self): @@ -133,141 +130,144 @@ def test_lbproc_bad_access(self): except AttributeError: pass except Exception as err: - self.fail("Should return a better error: " + str(err)) + pytest.fail("Should return a better error: " + str(err)) -@tests.skip_data +@_shared_utils.skip_data class TestPPField_GlobalTemperature(IrisPPTest): - def setUp(self): - self.original_pp_filepath = tests.get_data_path(("PP", "aPPglob1", "global.pp")) + @pytest.fixture(autouse=True) + def _setup(self): + self.original_pp_filepath = _shared_utils.get_data_path( + ("PP", "aPPglob1", "global.pp") + ) self.r = list(pp.load(self.original_pp_filepath)) def test_full_file(self): self.check_pp(self.r[0:10], ("PP", "global_test.pp.txt")) def test_lbtim_access(self): - self.assertEqual(self.r[0].lbtim[0], 2) - self.assertEqual(self.r[0].lbtim.ic, 2) + assert self.r[0].lbtim[0] == 2 + assert self.r[0].lbtim.ic == 2 def test_t1_t2_access(self): field = self.r[0] calendar = "360_day" - self.assertEqual( - field.t1.timetuple(), - cftime.datetime(1994, 12, 1, 0, 0, calendar=calendar).timetuple(), + assert ( + field.t1.timetuple() + == cftime.datetime(1994, 12, 1, 0, 0, calendar=calendar).timetuple() ) - def test_save_single(self): - temp_filename = iris.util.create_temp_filename(".pp") + def test_save_single(self, tmp_path): + temp_filename = tmp_path / "foo.pp" with open(temp_filename, "wb") as temp_fh: self.r[0].save(temp_fh) - self.assertEqual( - self.file_checksum(temp_filename), - self.file_checksum(self.original_pp_filepath), - ) - os.remove(temp_filename) + assert _shared_utils.file_checksum( + temp_filename + ) == _shared_utils.file_checksum(self.original_pp_filepath) - def test_save_api(self): + def test_save_api(self, tmp_path): filepath = self.original_pp_filepath f = next(pp.load(filepath)) - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = tmp_path / "foo.pp" with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) - self.assertEqual( - self.file_checksum(temp_filename), self.file_checksum(filepath) - ) + assert _shared_utils.file_checksum( + temp_filename + ) == _shared_utils.file_checksum(filepath) - os.remove(temp_filename) - -@tests.skip_data +@_shared_utils.skip_data class TestPackedPP(IrisPPTest): - def test_wgdos(self): - filepath = tests.get_data_path( + def test_wgdos(self, mocker, tmp_path): + filepath = _shared_utils.get_data_path( ("PP", "wgdos_packed", "nae.20100104-06_0001.pp") ) r = pp.load(filepath) # Check that the result is a generator and convert to a list so that we # can index and get the first one - self.assertEqual(type(r), GeneratorType) + assert isinstance(r, GeneratorType) r = list(r) self.check_pp(r, ("PP", "nae_unpacked.pp.txt")) # check that trying to save this field again raises an error # (we cannot currently write WGDOS packed fields without mo_pack) - temp_filename = iris.util.create_temp_filename(".pp") - with mock.patch("iris.fileformats.pp.mo_pack", None): - with self.assertRaises(NotImplementedError): - with open(temp_filename, "wb") as temp_fh: - r[0].save(temp_fh) - os.remove(temp_filename) - - @unittest.skipIf(pp.mo_pack is None, "Requires mo_pack.") - def test_wgdos_mo_pack(self): - filepath = tests.get_data_path( + temp_filename = tmp_path / "foo.pp" + mocker.patch("iris.fileformats.pp.mo_pack", None) + with pytest.raises(NotImplementedError): + with open(temp_filename, "wb") as temp_fh: + r[0].save(temp_fh) + + @pytest.mark.skipif(pp.mo_pack is None, reason="Requires mo_pack.") + def test_wgdos_mo_pack(self, tmp_path): + filepath = _shared_utils.get_data_path( ("PP", "wgdos_packed", "nae.20100104-06_0001.pp") ) orig_fields = pp.load(filepath) - with self.temp_filename(".pp") as temp_filename: - with open(temp_filename, "wb") as fh: - for field in orig_fields: - field.save(fh) - saved_fields = pp.load(temp_filename) - for orig_field, saved_field in zip(orig_fields, saved_fields): - assert_array_equal(orig_field.data, saved_field.data) + temp_filename = tmp_path / "foo.pp" + with open(temp_filename, "wb") as fh: + for field in orig_fields: + field.save(fh) + saved_fields = pp.load(temp_filename) + for orig_field, saved_field in zip(orig_fields, saved_fields): + assert_array_equal(orig_field.data, saved_field.data) - def test_rle(self): - r = pp.load(tests.get_data_path(("PP", "ocean_rle", "ocean_rle.pp"))) + def test_rle(self, tmp_path): + r = pp.load(_shared_utils.get_data_path(("PP", "ocean_rle", "ocean_rle.pp"))) # Check that the result is a generator and convert to a list so that we # can index and get the first one - self.assertEqual(type(r), GeneratorType) + assert isinstance(r, GeneratorType) r = list(r) self.check_pp(r, ("PP", "rle_unpacked.pp.txt")) # check that trying to save this field again raises an error # (we cannot currently write RLE packed fields) - with self.temp_filename(".pp") as temp_filename: - with self.assertRaises(NotImplementedError): - with open(temp_filename, "wb") as temp_fh: - r[0].save(temp_fh) + temp_filename = tmp_path / "foo.pp" + with pytest.raises(NotImplementedError): + with open(temp_filename, "wb") as temp_fh: + r[0].save(temp_fh) -@tests.skip_data +@_shared_utils.skip_data class TestPPFile(IrisPPTest): def test_lots_of_extra_data(self): r = pp.load( - tests.get_data_path( + _shared_utils.get_data_path( ("PP", "cf_processing", "HadCM2_ts_SAT_ann_18602100.b.pp") ) ) r = list(r) - self.assertEqual(r[0].lbcode.ix, 13) - self.assertEqual(r[0].lbcode.iy, 23) - self.assertEqual(len(r[0].lbcode), 5) + assert r[0].lbcode.ix == 13 + assert r[0].lbcode.iy == 23 + assert len(r[0].lbcode) == 5 self.check_pp(r, ("PP", "extra_data_time_series.pp.txt")) -@tests.skip_data +@_shared_utils.skip_data class TestPPFileExtraXData(IrisPPTest): - def setUp(self): - self.original_pp_filepath = tests.get_data_path(("PP", "ukV1", "ukVpmslont.pp")) + @pytest.fixture(autouse=True) + def _setup(self): + self.original_pp_filepath = _shared_utils.get_data_path( + ("PP", "ukV1", "ukVpmslont.pp") + ) self.r = list(pp.load(self.original_pp_filepath))[0:5] def test_full_file(self): self.check_pp(self.r, ("PP", "extra_x_data.pp.txt")) - def test_save_single(self): - filepath = tests.get_data_path(("PP", "ukV1", "ukVpmslont_first_field.pp")) + def test_save_single(self, tmp_path): + filepath = _shared_utils.get_data_path( + ("PP", "ukV1", "ukVpmslont_first_field.pp") + ) f = next(pp.load(filepath)) - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = tmp_path / "foo.pp" with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) @@ -275,36 +275,36 @@ def test_save_single(self): # force the data to be loaded (this was done for f when save was run) s.data - self._assert_str_same( + _shared_utils._assert_str_same( str(s) + "\n", str(f) + "\n", "", type_comparison_name="PP files" ) - self.assertEqual( - self.file_checksum(temp_filename), self.file_checksum(filepath) - ) - os.remove(temp_filename) + assert _shared_utils.file_checksum( + temp_filename + ) == _shared_utils.file_checksum(filepath) -@tests.skip_data +@_shared_utils.skip_data class TestPPFileWithExtraCharacterData(IrisPPTest): - def setUp(self): - self.original_pp_filepath = tests.get_data_path( + @pytest.fixture(autouse=True) + def _setup(self): + self.original_pp_filepath = _shared_utils.get_data_path( ("PP", "globClim1", "dec_subset.pp") ) self.r = pp.load(self.original_pp_filepath) self.r_loaded_data = pp.load(self.original_pp_filepath, read_data=True) # Check that the result is a generator and convert to a list so that we can index and get the first one - self.assertEqual(type(self.r), GeneratorType) + assert isinstance(self.r, GeneratorType) self.r = list(self.r) - self.assertEqual(type(self.r_loaded_data), GeneratorType) + assert isinstance(self.r_loaded_data, GeneratorType) self.r_loaded_data = list(self.r_loaded_data) def test_extra_field_title(self): - self.assertEqual( - self.r[0].field_title, - "AJHQA Time mean !C Atmos u compnt of wind after timestep at 9.998 metres !C 01/12/2007 00:00 -> 01/01/2008 00:00", + assert ( + self.r[0].field_title + == "AJHQA Time mean !C Atmos u compnt of wind after timestep at 9.998 metres !C 01/12/2007 00:00 -> 01/01/2008 00:00" ) def test_full_file(self): @@ -314,11 +314,13 @@ def test_full_file(self): ("PP", "extra_char_data.w_data_loaded.pp.txt"), ) - def test_save_single(self): - filepath = tests.get_data_path(("PP", "model_comp", "dec_first_field.pp")) + def test_save_single(self, tmp_path): + filepath = _shared_utils.get_data_path( + ("PP", "model_comp", "dec_first_field.pp") + ) f = next(pp.load(filepath)) - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = tmp_path / "foo.pp" with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) @@ -326,60 +328,59 @@ def test_save_single(self): # force the data to be loaded (this was done for f when save was run) s.data - self._assert_str_same( + _shared_utils._assert_str_same( str(s) + "\n", str(f) + "\n", "", type_comparison_name="PP files" ) - self.assertEqual( - self.file_checksum(temp_filename), self.file_checksum(filepath) - ) - os.remove(temp_filename) + assert _shared_utils.file_checksum( + temp_filename + ) == _shared_utils.file_checksum(filepath) -class TestSplittableInt(tests.IrisTest): +class TestSplittableInt: def test_3(self): t = pp.SplittableInt(3) - self.assertEqual(t[0], 3) + assert t[0] == 3 def test_grow_str_list(self): t = pp.SplittableInt(3) t[1] = 3 - self.assertEqual(t[1], 3) + assert t[1] == 3 t[5] = 4 - self.assertEqual(t[5], 4) + assert t[5] == 4 - self.assertEqual(int(t), 400033) + assert int(t) == 400033 - self.assertEqual(t, 400033) - self.assertNotEqual(t, 33) + assert t == 400033 + assert t != 33 - self.assertTrue(t >= 400033) - self.assertFalse(t >= 400034) + assert t >= 400033 + assert not t >= 400034 - self.assertTrue(t <= 400033) - self.assertFalse(t <= 400032) + assert t <= 400033 + assert not t <= 400032 - self.assertTrue(t > 400032) - self.assertFalse(t > 400034) + assert t > 400032 + assert not t > 400034 - self.assertTrue(t < 400034) - self.assertFalse(t < 400032) + assert t < 400034 + assert not t < 400032 def test_name_mapping(self): t = pp.SplittableInt(33214, {"ones": 0, "tens": 1, "hundreds": 2}) - self.assertEqual(t.ones, 4) - self.assertEqual(t.tens, 1) - self.assertEqual(t.hundreds, 2) + assert t.ones == 4 + assert t.tens == 1 + assert t.hundreds == 2 t.ones = 9 t.tens = 4 t.hundreds = 0 - self.assertEqual(t.ones, 9) - self.assertEqual(t.tens, 4) - self.assertEqual(t.hundreds, 0) + assert t.ones == 9 + assert t.tens == 4 + assert t.hundreds == 0 def test_name_mapping_multi_index(self): t = pp.SplittableInt( @@ -390,69 +391,66 @@ def test_name_mapping_multi_index(self): "backwards": slice(None, None, -1), }, ) - self.assertEqual(t.weird_number, 324) - self.assertEqual(t.last_few, 13) - self.assertRaises(ValueError, setattr, t, "backwards", 1) - self.assertRaises(ValueError, setattr, t, "last_few", 1) - self.assertEqual(t.backwards, 41233) - self.assertEqual(t, 33214) + assert t.weird_number == 324 + assert t.last_few == 13 + pytest.raises(ValueError, setattr, t, "backwards", 1) + pytest.raises(ValueError, setattr, t, "last_few", 1) + assert t.backwards == 41233 + assert t == 33214 t.weird_number = 99 # notice that this will zero the 5th number - self.assertEqual(t, 3919) + assert t == 3919 t.weird_number = 7899 - self.assertEqual(t, 7083919) + assert t == 7083919 t.foo = 1 t = pp.SplittableInt(33214, {"ix": slice(None, 2), "iy": slice(2, 4)}) - self.assertEqual(t.ix, 14) - self.assertEqual(t.iy, 32) + assert t.ix == 14 + assert t.iy == 32 t.ix = 21 - self.assertEqual(t, 33221) + assert t == 33221 t = pp.SplittableInt(33214, {"ix": slice(-1, 2)}) - self.assertEqual(t.ix, 0) + assert t.ix == 0 t = pp.SplittableInt(4, {"ix": slice(None, 2), "iy": slice(2, 4)}) - self.assertEqual(t.ix, 4) - self.assertEqual(t.iy, 0) + assert t.ix == 4 + assert t.iy == 0 def test_33214(self): t = pp.SplittableInt(33214) - self.assertEqual(t[4], 3) - self.assertEqual(t[3], 3) - self.assertEqual(t[2], 2) - self.assertEqual(t[1], 1) - self.assertEqual(t[0], 4) + assert t[4] == 3 + assert t[3] == 3 + assert t[2] == 2 + assert t[1] == 1 + assert t[0] == 4 # The rest should be zero for i in range(5, 100): - self.assertEqual(t[i], 0) + assert t[i] == 0 def test_negative_number(self): - self.assertRaises(ValueError, pp.SplittableInt, -5) - try: + with pytest.raises( + ValueError, + match="Negative numbers not supported with splittable integers object", + ): _ = pp.SplittableInt(-5) - except ValueError as err: - self.assertEqual( - str(err), - "Negative numbers not supported with splittable integers object", - ) -class TestSplittableIntEquality(tests.IrisTest): +class TestSplittableIntEquality: def test_not_implemented(self): class Terry: pass sin = pp.SplittableInt(0) - self.assertIs(sin.__eq__(Terry()), NotImplemented) - self.assertIs(sin.__ne__(Terry()), NotImplemented) + assert sin.__eq__(Terry()) is NotImplemented + assert sin.__ne__(Terry()) is NotImplemented -class TestPPDataProxyEquality(tests.IrisTest): +class TestPPDataProxyEquality: def test_not_implemented(self): class Terry: pass @@ -467,19 +465,15 @@ class Terry: "beans", "eggs", ) - self.assertIs(pox.__eq__(Terry()), NotImplemented) - self.assertIs(pox.__ne__(Terry()), NotImplemented) + assert pox.__eq__(Terry()) is NotImplemented + assert pox.__ne__(Terry()) is NotImplemented -class TestPPFieldEquality(tests.IrisTest): +class TestPPFieldEquality: def test_not_implemented(self): class Terry: pass pox = pp.PPField3() - self.assertIs(pox.__eq__(Terry()), NotImplemented) - self.assertIs(pox.__ne__(Terry()), NotImplemented) - - -if __name__ == "__main__": - tests.main() + assert pox.__eq__(Terry()) is NotImplemented + assert pox.__ne__(Terry()) is NotImplemented diff --git a/lib/iris/tests/test_pp_stash.py b/lib/iris/tests/test_pp_stash.py index e5b6953bf3..8123336c03 100644 --- a/lib/iris/tests/test_pp_stash.py +++ b/lib/iris/tests/test_pp_stash.py @@ -3,104 +3,98 @@ # This file is part of Iris and is released under the BSD license. # See LICENSE in the root of the repository for full licensing details. -# import iris tests first so that some things can be initialised before importing anything else -import iris.tests as tests # isort:skip +import pytest import iris import iris.fileformats.pp import iris.io +from iris.tests import _shared_utils import iris.tests.stock import iris.util -class TestPPStash(tests.IrisTest): - @tests.skip_data +class TestPPStash: + @_shared_utils.skip_data def test_cube_attributes(self): - cube = tests.stock.simple_pp() - self.assertEqual("m01s16i203", cube.attributes["STASH"]) - self.assertNotEqual("m01s16i999", cube.attributes["STASH"]) - self.assertEqual(cube.attributes["STASH"], "m01s16i203") - self.assertNotEqual(cube.attributes["STASH"], "m01s16i999") - - @tests.skip_data + cube = iris.tests.stock.simple_pp() + assert "m01s16i203" == cube.attributes["STASH"] + assert "m01s16i999" != cube.attributes["STASH"] + # Also exercise iris.fileformats.pp.STASH eq and ne methods. + assert cube.attributes["STASH"] == "m01s16i203" + assert cube.attributes["STASH"] != "m01s16i999" + + @_shared_utils.skip_data def test_ppfield(self): - data_path = tests.get_data_path(("PP", "simple_pp", "global.pp")) + data_path = _shared_utils.get_data_path(("PP", "simple_pp", "global.pp")) pps = iris.fileformats.pp.load(data_path) for pp in pps: - self.assertEqual("m01s16i203", pp.stash) - self.assertNotEqual("m01s16i999", pp.stash) - self.assertEqual(pp.stash, "m01s16i203") - self.assertNotEqual(pp.stash, "m01s16i999") + assert "m01s16i203" == pp.stash + assert "m01s16i999" != pp.stash + # Also exercise iris.fileformats.pp.STASH eq and ne methods. + assert pp.stash == "m01s16i203" + assert pp.stash != "m01s16i999" def test_stash_against_stash(self): - self.assertEqual( - iris.fileformats.pp.STASH(1, 2, 3), - iris.fileformats.pp.STASH(1, 2, 3), - ) - self.assertNotEqual( - iris.fileformats.pp.STASH(1, 2, 3), - iris.fileformats.pp.STASH(2, 3, 4), - ) + assert iris.fileformats.pp.STASH(1, 2, 3) == iris.fileformats.pp.STASH(1, 2, 3) + assert iris.fileformats.pp.STASH(1, 2, 3) != iris.fileformats.pp.STASH(2, 3, 4) def test_stash_against_str(self): - self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), "m01s02i003") - self.assertEqual("m01s02i003", iris.fileformats.pp.STASH(1, 2, 3)) - self.assertNotEqual(iris.fileformats.pp.STASH(1, 2, 3), "m02s03i004") - self.assertNotEqual("m02s03i004", iris.fileformats.pp.STASH(1, 2, 3)) + # Also exercise iris.fileformats.pp.STASH eq and ne methods. + assert iris.fileformats.pp.STASH(1, 2, 3) == "m01s02i003" + assert "m01s02i003" == iris.fileformats.pp.STASH(1, 2, 3) + assert iris.fileformats.pp.STASH(1, 2, 3) != "m02s03i004" + assert "m02s03i004" != iris.fileformats.pp.STASH(1, 2, 3) def test_irregular_stash_str(self): - self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), "m01s02i0000000003") - self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), "m01s02i3") - self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), "m01s2i3") - self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), "m1s2i3") - - self.assertEqual("m01s02i0000000003", iris.fileformats.pp.STASH(1, 2, 3)) - self.assertEqual("m01s02i3", iris.fileformats.pp.STASH(1, 2, 3)) - self.assertEqual("m01s2i3", iris.fileformats.pp.STASH(1, 2, 3)) - self.assertEqual("m1s2i3", iris.fileformats.pp.STASH(1, 2, 3)) - - self.assertNotEqual(iris.fileformats.pp.STASH(2, 3, 4), "m01s02i0000000003") - self.assertNotEqual(iris.fileformats.pp.STASH(2, 3, 4), "m01s02i3") - self.assertNotEqual(iris.fileformats.pp.STASH(2, 3, 4), "m01s2i3") - self.assertNotEqual(iris.fileformats.pp.STASH(2, 3, 4), "m1s2i3") - - self.assertNotEqual("m01s02i0000000003", iris.fileformats.pp.STASH(2, 3, 4)) - self.assertNotEqual("m01s02i3", iris.fileformats.pp.STASH(2, 3, 4)) - self.assertNotEqual("m01s2i3", iris.fileformats.pp.STASH(2, 3, 4)) - self.assertNotEqual("m1s2i3", iris.fileformats.pp.STASH(2, 3, 4)) - - self.assertEqual(iris.fileformats.pp.STASH.from_msi("M01s02i003"), "m01s02i003") - self.assertEqual("m01s02i003", iris.fileformats.pp.STASH.from_msi("M01s02i003")) + # Also exercise iris.fileformats.pp.STASH eq and ne methods. + assert iris.fileformats.pp.STASH(1, 2, 3) == "m01s02i0000000003" + assert iris.fileformats.pp.STASH(1, 2, 3) == "m01s02i3" + assert iris.fileformats.pp.STASH(1, 2, 3) == "m01s2i3" + assert iris.fileformats.pp.STASH(1, 2, 3) == "m1s2i3" + + assert "m01s02i0000000003" == iris.fileformats.pp.STASH(1, 2, 3) + assert "m01s02i3" == iris.fileformats.pp.STASH(1, 2, 3) + assert "m01s2i3" == iris.fileformats.pp.STASH(1, 2, 3) + assert "m1s2i3" == iris.fileformats.pp.STASH(1, 2, 3) + + assert iris.fileformats.pp.STASH(2, 3, 4) != "m01s02i0000000003" + assert iris.fileformats.pp.STASH(2, 3, 4) != "m01s02i3" + assert iris.fileformats.pp.STASH(2, 3, 4) != "m01s2i3" + assert iris.fileformats.pp.STASH(2, 3, 4) != "m1s2i3" + + assert "m01s02i0000000003" != iris.fileformats.pp.STASH(2, 3, 4) + assert "m01s02i3" != iris.fileformats.pp.STASH(2, 3, 4) + assert "m01s2i3" != iris.fileformats.pp.STASH(2, 3, 4) + assert "m1s2i3" != iris.fileformats.pp.STASH(2, 3, 4) + + assert iris.fileformats.pp.STASH.from_msi("M01s02i003") == "m01s02i003" + assert "m01s02i003" == iris.fileformats.pp.STASH.from_msi("M01s02i003") def test_illegal_stash_str_range(self): - self.assertEqual(iris.fileformats.pp.STASH(0, 2, 3), "m??s02i003") - self.assertNotEqual(iris.fileformats.pp.STASH(0, 2, 3), "m01s02i003") + # Also exercise iris.fileformats.pp.STASH eq and ne methods. + assert iris.fileformats.pp.STASH(0, 2, 3) == "m??s02i003" + assert iris.fileformats.pp.STASH(0, 2, 3) != "m01s02i003" - self.assertEqual("m??s02i003", iris.fileformats.pp.STASH(0, 2, 3)) - self.assertNotEqual("m01s02i003", iris.fileformats.pp.STASH(0, 2, 3)) + assert "m??s02i003" == iris.fileformats.pp.STASH(0, 2, 3) + assert "m01s02i003" != iris.fileformats.pp.STASH(0, 2, 3) - self.assertEqual(iris.fileformats.pp.STASH(0, 2, 3), "m??s02i003") - self.assertEqual(iris.fileformats.pp.STASH(0, 2, 3), "m00s02i003") - self.assertEqual("m??s02i003", iris.fileformats.pp.STASH(0, 2, 3)) - self.assertEqual("m00s02i003", iris.fileformats.pp.STASH(0, 2, 3)) + assert iris.fileformats.pp.STASH(0, 2, 3) == "m??s02i003" + assert iris.fileformats.pp.STASH(0, 2, 3) == "m00s02i003" + assert "m??s02i003" == iris.fileformats.pp.STASH(0, 2, 3) + assert "m00s02i003" == iris.fileformats.pp.STASH(0, 2, 3) - self.assertEqual(iris.fileformats.pp.STASH(100, 2, 3), "m??s02i003") - self.assertEqual(iris.fileformats.pp.STASH(100, 2, 3), "m100s02i003") - self.assertEqual("m??s02i003", iris.fileformats.pp.STASH(100, 2, 3)) - self.assertEqual("m100s02i003", iris.fileformats.pp.STASH(100, 2, 3)) + assert iris.fileformats.pp.STASH(100, 2, 3) == "m??s02i003" + assert iris.fileformats.pp.STASH(100, 2, 3) == "m100s02i003" + assert "m??s02i003" == iris.fileformats.pp.STASH(100, 2, 3) + assert "m100s02i003" == iris.fileformats.pp.STASH(100, 2, 3) def test_illegal_stash_stash_range(self): - self.assertEqual( - iris.fileformats.pp.STASH(0, 2, 3), - iris.fileformats.pp.STASH(0, 2, 3), - ) - self.assertEqual( - iris.fileformats.pp.STASH(100, 2, 3), - iris.fileformats.pp.STASH(100, 2, 3), + assert iris.fileformats.pp.STASH(0, 2, 3) == iris.fileformats.pp.STASH(0, 2, 3) + assert iris.fileformats.pp.STASH(100, 2, 3) == iris.fileformats.pp.STASH( + 100, 2, 3 ) - self.assertEqual( - iris.fileformats.pp.STASH(100, 2, 3), - iris.fileformats.pp.STASH(999, 2, 3), + assert iris.fileformats.pp.STASH(100, 2, 3) == iris.fileformats.pp.STASH( + 999, 2, 3 ) def test_illegal_stash_format(self): @@ -112,9 +106,9 @@ def test_illegal_stash_format(self): for test_value, reference in test_values: msg = "Expected STASH code .* {!r}".format(test_value) - with self.assertRaisesRegex(ValueError, msg): + with pytest.raises(ValueError, match=msg): test_value == iris.fileformats.pp.STASH(*reference) - with self.assertRaisesRegex(ValueError, msg): + with pytest.raises(ValueError, match=msg): iris.fileformats.pp.STASH(*reference) == test_value def test_illegal_stash_type(self): @@ -125,16 +119,12 @@ def test_illegal_stash_type(self): for test_value, reference in test_values: msg = "Expected STASH code .* {!r}".format(test_value) - with self.assertRaisesRegex(TypeError, msg): + with pytest.raises(TypeError, match=msg): iris.fileformats.pp.STASH.from_msi(test_value) == reference - with self.assertRaisesRegex(TypeError, msg): + with pytest.raises(TypeError, match=msg): reference == iris.fileformats.pp.STASH.from_msi(test_value) def test_stash_lbuser(self): stash = iris.fileformats.pp.STASH(2, 32, 456) - self.assertEqual(stash.lbuser6(), 2) - self.assertEqual(stash.lbuser3(), 32456) - - -if __name__ == "__main__": - tests.main() + assert stash.lbuser6() == 2 + assert stash.lbuser3() == 32456 diff --git a/lib/iris/tests/test_pp_to_cube.py b/lib/iris/tests/test_pp_to_cube.py index da49ff8188..eeee76273f 100644 --- a/lib/iris/tests/test_pp_to_cube.py +++ b/lib/iris/tests/test_pp_to_cube.py @@ -3,26 +3,28 @@ # This file is part of Iris and is released under the BSD license. # See LICENSE in the root of the repository for full licensing details. -# import iris tests first so that some things can be initialised before importing anything else -import iris.tests as tests # isort:skip +from uuid import uuid4 -import os +import pytest import iris import iris.fileformats.pp import iris.fileformats.pp_load_rules import iris.fileformats.rules import iris.io +from iris.tests import _shared_utils import iris.tests.stock import iris.util -@tests.skip_data -class TestPPLoadCustom(tests.IrisTest): - def setUp(self): +@_shared_utils.skip_data +class TestPPLoadCustom: + @pytest.fixture(autouse=True) + def _setup(self, request): self.subcubes = iris.cube.CubeList() - filename = tests.get_data_path(("PP", "aPPglob1", "global.pp")) + filename = _shared_utils.get_data_path(("PP", "aPPglob1", "global.pp")) self.template = next(iris.fileformats.pp.load(filename)) + self.request = request def _field_to_cube(self, field): cube, _, _ = iris.fileformats.rules._make_cube( @@ -38,7 +40,7 @@ def test_lbtim_2(self): cube = self._field_to_cube(field) self.subcubes.append(cube) cube = self.subcubes.merge()[0] - self.assertCML(cube, ("pp_load_rules", "lbtim_2.cml")) + _shared_utils.assert_CML(self.request, cube, ("pp_load_rules", "lbtim_2.cml")) def _ocean_depth(self, bounded=False): lbuser = list(self.template.lbuser) @@ -62,16 +64,21 @@ def _ocean_depth(self, bounded=False): def test_ocean_depth(self): self._ocean_depth() cube = self.subcubes.merge()[0] - self.assertCML(cube, ("pp_load_rules", "ocean_depth.cml")) + _shared_utils.assert_CML( + self.request, cube, ("pp_load_rules", "ocean_depth.cml") + ) def test_ocean_depth_bounded(self): self._ocean_depth(bounded=True) cube = self.subcubes.merge()[0] - self.assertCML(cube, ("pp_load_rules", "ocean_depth_bounded.cml")) + _shared_utils.assert_CML( + self.request, cube, ("pp_load_rules", "ocean_depth_bounded.cml") + ) -class TestReferences(tests.IrisTest): - def setUp(self): +class TestReferences: + @pytest.fixture(autouse=True) + def _setup(self): target = iris.tests.stock.simple_2d() target.data = target.data.astype("f4") self.target = target @@ -82,7 +89,7 @@ def test_regrid_missing_coord(self): # coords, ensure the re-grid fails nicely - i.e. returns None. self.target.remove_coord("bar") new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref, self.target) - self.assertIsNone(new_ref) + assert new_ref is None def test_regrid_codimension(self): # If the target cube has two of the source dimension coords @@ -93,48 +100,56 @@ def test_regrid_codimension(self): new_foo.rename("foo") self.target.add_aux_coord(new_foo, 0) new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref, self.target) - self.assertIsNone(new_ref) + assert new_ref is None def test_regrid_identity(self): new_ref = iris.fileformats.rules._ensure_aligned({}, self.ref, self.target) # Bounds don't make it through the re-grid process self.ref.coord("bar").bounds = None self.ref.coord("foo").bounds = None - self.assertEqual(new_ref, self.ref) + assert new_ref == self.ref -@tests.skip_data -class TestPPLoading(tests.IrisTest): - def test_simple(self): +@_shared_utils.skip_data +class TestPPLoading: + def test_simple(self, request): cube = iris.tests.stock.simple_pp() - self.assertCML(cube, ("cube_io", "pp", "load", "global.cml")) + _shared_utils.assert_CML(request, cube, ("cube_io", "pp", "load", "global.cml")) + +@_shared_utils.skip_data +class TestPPLoadRules: + @pytest.fixture(autouse=True) + def _setup(self, request): + self.request = request -@tests.skip_data -class TestPPLoadRules(tests.IrisTest): def test_pp_load_rules(self): # Test PP loading and rule evaluation. cube = iris.tests.stock.simple_pp() - self.assertCML(cube, ("pp_load_rules", "global.cml")) + _shared_utils.assert_CML(self.request, cube, ("pp_load_rules", "global.cml")) - data_path = tests.get_data_path(("PP", "rotated_uk", "rotated_uk.pp")) + data_path = _shared_utils.get_data_path(("PP", "rotated_uk", "rotated_uk.pp")) cube = iris.load(data_path)[0] - self.assertCML(cube, ("pp_load_rules", "rotated_uk.cml")) + _shared_utils.assert_CML( + self.request, cube, ("pp_load_rules", "rotated_uk.cml") + ) def test_lbproc(self): - data_path = tests.get_data_path( + data_path = _shared_utils.get_data_path( ("PP", "meanMaxMin", "200806081200__qwpb.T24.pp") ) # Set up standard name and T+24 constraint constraint = iris.Constraint("air_temperature", forecast_period=24) cubes = iris.load(data_path, constraint) cubes = iris.cube.CubeList([cubes[0], cubes[3], cubes[1], cubes[2], cubes[4]]) - self.assertCML(cubes, ("pp_load_rules", "lbproc_mean_max_min.cml")) + _shared_utils.assert_CML( + self.request, cubes, ("pp_load_rules", "lbproc_mean_max_min.cml") + ) - def test_cell_methods(self): + def test_cell_methods(self, tmp_path): # Test cell methods are created for correct values of lbproc - orig_file = tests.get_data_path(("PP", "aPPglob1", "global.pp")) + orig_file = _shared_utils.get_data_path(("PP", "aPPglob1", "global.pp")) # Values that result in cell methods being created cell_method_values = { @@ -158,7 +173,7 @@ def test_cell_methods(self): f.lbproc = value # set value # Write out pp file - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = (tmp_path / str(uuid4())).with_suffix(".pp") with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) @@ -167,16 +182,14 @@ def test_cell_methods(self): if value in cell_method_values: # Check for cell method on cube - self.assertEqual(cube.cell_methods[0].method, cell_method_values[value]) + assert cube.cell_methods[0].method == cell_method_values[value] else: # Check no cell method was created for values other than 128, 4096, 8192 - self.assertEqual(len(cube.cell_methods), 0) + assert len(cube.cell_methods) == 0 - os.remove(temp_filename) - - def test_process_flags(self): + def test_process_flags(self, tmp_path): # Test that process flags are created for correct values of lbproc - orig_file = tests.get_data_path(("PP", "aPPglob1", "global.pp")) + orig_file = _shared_utils.get_data_path(("PP", "aPPglob1", "global.pp")) # Values that result in process flags attribute NOT being created omit_process_flags_values = (64, 128, 4096, 8192) @@ -187,7 +200,7 @@ def test_process_flags(self): f.lbproc = value # set value # Write out pp file - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = (tmp_path / str(uuid4())).with_suffix(".pp") with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) @@ -196,16 +209,14 @@ def test_process_flags(self): if value in omit_process_flags_values: # Check ukmo__process_flags attribute not created - self.assertEqual(cube.attributes.get("ukmo__process_flags", None), None) + assert cube.attributes.get("ukmo__process_flags", None) is None else: # Check ukmo__process_flags attribute contains correct values - self.assertIn( - iris.fileformats.pp.lbproc_map[value], - cube.attributes["ukmo__process_flags"], + assert ( + iris.fileformats.pp.lbproc_map[value] + in cube.attributes["ukmo__process_flags"] ) - os.remove(temp_filename) - # Test multiple flag values multiple_bit_values = ((128, 32), (4096, 1024), (8192, 1024)) @@ -220,7 +231,7 @@ def test_process_flags(self): f.lbproc = sum(bit_values) # set value # Write out pp file - temp_filename = iris.util.create_temp_filename(".pp") + temp_filename = (tmp_path / str(uuid4())).with_suffix(".pp") with open(temp_filename, "wb") as temp_fh: f.save(temp_fh) @@ -228,14 +239,6 @@ def test_process_flags(self): cube = iris.load_cube(temp_filename) # Check the process flags created - self.assertEqual( - set(cube.attributes["ukmo__process_flags"]), - set(multiple_map[sum(bit_values)]), - "Mismatch between expected and actual process flags.", - ) - - os.remove(temp_filename) - - -if __name__ == "__main__": - tests.main() + assert set(cube.attributes["ukmo__process_flags"]) == set( + multiple_map[sum(bit_values)] + ), "Mismatch between expected and actual process flags." diff --git a/lib/iris/tests/test_util.py b/lib/iris/tests/test_util.py index 56774f89f8..dabe90581c 100644 --- a/lib/iris/tests/test_util.py +++ b/lib/iris/tests/test_util.py @@ -4,30 +4,28 @@ # See LICENSE in the root of the repository for full licensing details. """Test iris.util.""" -# import iris tests first so that some things can be initialised before -# importing anything else -import iris.tests as tests # isort:skip - import inspect from io import StringIO import cf_units import numpy as np +import pytest import iris.analysis import iris.coords +from iris.tests import _shared_utils import iris.tests.stock as stock import iris.util -class TestMonotonic(tests.IrisTest): - def assertMonotonic(self, array, direction=None, **kwargs): +class TestMonotonic: + def assert_monotonic(self, array, direction=None, **kwargs): if direction is not None: mono, dir = iris.util.monotonic(array, return_direction=True, **kwargs) if not mono: - self.fail("Array was not monotonic:/n %r" % array) + pytest.fail("Array was not monotonic:/n %r" % array) if dir != np.sign(direction): - self.fail( + pytest.fail( "Array was monotonic but not in the direction expected:" "/n + requested direction: %s/n + resultant direction: %s" % (direction, dir) @@ -35,51 +33,52 @@ def assertMonotonic(self, array, direction=None, **kwargs): else: mono = iris.util.monotonic(array, **kwargs) if not mono: - self.fail("Array was not monotonic:/n %r" % array) + pytest.fail("Array was not monotonic:/n %r" % array) - def assertNotMonotonic(self, array, **kwargs): + def assert_not_monotonic(self, array, **kwargs): mono = iris.util.monotonic(array, **kwargs) if mono: - self.fail("Array was monotonic when it shouldn't be:/n %r" % array) + pytest.fail("Array was monotonic when it shouldn't be:/n %r" % array) def test_monotonic_pve(self): a = np.array([3, 4, 5.3]) - self.assertMonotonic(a) - self.assertMonotonic(a, direction=1) + self.assert_monotonic(a) + self.assert_monotonic(a, direction=1) # test the reverse for negative monotonic. a = a[::-1] - self.assertMonotonic(a) - self.assertMonotonic(a, direction=-1) + self.assert_monotonic(a) + self.assert_monotonic(a, direction=-1) def test_not_monotonic(self): b = np.array([3, 5.3, 4]) - self.assertNotMonotonic(b) + self.assert_not_monotonic(b) def test_monotonic_strict(self): b = np.array([3, 5.3, 4]) - self.assertNotMonotonic(b, strict=True) - self.assertNotMonotonic(b) + self.assert_not_monotonic(b, strict=True) + self.assert_not_monotonic(b) b = np.array([3, 5.3, 5.3]) - self.assertNotMonotonic(b, strict=True) - self.assertMonotonic(b, direction=1) + self.assert_not_monotonic(b, strict=True) + self.assert_monotonic(b, direction=1) b = b[::-1] - self.assertNotMonotonic(b, strict=True) - self.assertMonotonic(b, direction=-1) + self.assert_not_monotonic(b, strict=True) + self.assert_monotonic(b, direction=-1) b = np.array([0.0]) - self.assertRaises(ValueError, iris.util.monotonic, b) - self.assertRaises(ValueError, iris.util.monotonic, b, strict=True) + pytest.raises(ValueError, iris.util.monotonic, b) + pytest.raises(ValueError, iris.util.monotonic, b, strict=True) b = np.array([0.0, 0.0]) - self.assertNotMonotonic(b, strict=True) - self.assertMonotonic(b) + self.assert_not_monotonic(b, strict=True) + self.assert_monotonic(b) -class TestClipString(tests.IrisTest): - def setUp(self): +class TestClipString: + @pytest.fixture(autouse=True) + def _setup(self): self.test_string = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." self.rider = "**^^**$$..--__" # A good chance at being unique and not in the string to be tested! @@ -90,43 +89,32 @@ def test_oversize_string(self): result = iris.util.clip_string(self.test_string, clip_length, self.rider) # Check the length is between what we requested ( + rider length) and the length of the original string - self.assertTrue( - clip_length + len(self.rider) <= len(result) < len(self.test_string), - "String was not clipped.", - ) + fail_message = "String was not clipped." + clip_rider_len = clip_length + len(self.rider) + assert clip_rider_len <= len(result) < len(self.test_string), fail_message # Also test the rider was added - self.assertTrue( - self.rider in result, - "Rider was not added to the string when it should have been.", - ) + fail_message = "Rider was not added to the string when it should have been." + assert self.rider in result, fail_message def test_undersize_string(self): # Test with a clip length that is longer than the string clip_length = 10999 result = iris.util.clip_string(self.test_string, clip_length, self.rider) - self.assertEqual( - len(result), - len(self.test_string), - "String was clipped when it should not have been.", - ) + fail_message = "String was clipped when it should not have been." + assert len(result) == len(self.test_string), fail_message # Also test that no rider was added on the end if the string was not clipped - self.assertFalse( - self.rider in result, - "Rider was adding to the string when it should not have been.", - ) + fail_message = "Rider was adding to the string when it should not have been." + assert self.rider not in result, fail_message def test_invalid_clip_lengths(self): # Clip values less than or equal to zero are not valid for clip_length in [0, -100]: result = iris.util.clip_string(self.test_string, clip_length, self.rider) - self.assertEqual( - len(result), - len(self.test_string), - "String was clipped when it should not have been.", - ) + fail_message = "String was clipped when it should not have been." + assert len(result) == len(self.test_string), fail_message def test_default_values(self): # Get the default values specified in the function @@ -137,12 +125,10 @@ def test_default_values(self): self.test_string, arg_dict["clip_length"], arg_dict["rider"] ) - self.assertLess(len(result), len(self.test_string), "String was not clipped.") + assert len(result) < len(self.test_string), "String was not clipped." rider_returned = result[-len(arg_dict["rider"]) :] - self.assertEqual( - rider_returned, arg_dict["rider"], "Default rider was not applied." - ) + assert rider_returned == arg_dict["rider"], "Default rider was not applied." def test_trim_string_with_no_spaces(self): clip_length = 200 @@ -155,16 +141,18 @@ def test_trim_string_with_no_spaces(self): expected_length = clip_length + len(self.rider) # Check the length of the returned string is equal to clip length + length of rider - self.assertEqual( - len(result), - expected_length, + assert len(result) == expected_length, ( "Mismatch in expected length of clipped string. Length was %s, " - "expected value is %s" % (len(result), expected_length), + "expected value is %s" % (len(result), expected_length) ) -@tests.skip_data -class TestDescribeDiff(iris.tests.IrisTest): +@_shared_utils.skip_data +class TestDescribeDiff: + @pytest.fixture(autouse=True) + def _setup(self, request): + self.request = request + def test_identical(self): test_cube_a = stock.realistic_4d() test_cube_b = stock.realistic_4d() @@ -173,7 +161,9 @@ def test_identical(self): iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio) return_str = return_sio.getvalue() - self.assertString(return_str, "compatible_cubes.str.txt") + _shared_utils.assert_string( + self.request, return_str, "compatible_cubes.str.txt" + ) def test_different(self): # test incompatible attributes @@ -187,7 +177,9 @@ def test_different(self): iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio) return_str = return_sio.getvalue() - self.assertString(return_str, "incompatible_attr.str.txt") + _shared_utils.assert_string( + self.request, return_str, "incompatible_attr.str.txt" + ) # test incompatible names test_cube_a = stock.realistic_4d() @@ -199,7 +191,9 @@ def test_different(self): iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio) return_str = return_sio.getvalue() - self.assertString(return_str, "incompatible_name.str.txt") + _shared_utils.assert_string( + self.request, return_str, "incompatible_name.str.txt" + ) # test incompatible unit test_cube_a = stock.realistic_4d() @@ -211,7 +205,9 @@ def test_different(self): iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio) return_str = return_sio.getvalue() - self.assertString(return_str, "incompatible_unit.str.txt") + _shared_utils.assert_string( + self.request, return_str, "incompatible_unit.str.txt" + ) # test incompatible methods test_cube_a = stock.realistic_4d() @@ -223,9 +219,11 @@ def test_different(self): iris.util.describe_diff(test_cube_a, test_cube_b, output_file=return_sio) return_str = return_sio.getvalue() - self.assertString(return_str, "incompatible_meth.str.txt") + _shared_utils.assert_string( + self.request, return_str, "incompatible_meth.str.txt" + ) - def test_output_file(self): + def test_output_file(self, tmp_path): # test incompatible attributes test_cube_a = stock.realistic_4d() test_cube_b = stock.realistic_4d().collapsed( @@ -237,13 +235,9 @@ def test_output_file(self): test_cube_a.standard_name = "relative_humidity" test_cube_a.units = cf_units.Unit("m") - with self.temp_filename() as filename: + with tmp_path / "tmp" as filename: with open(filename, "w") as f: iris.util.describe_diff(test_cube_a, test_cube_b, output_file=f) f.close() - self.assertFilesEqual(filename, "incompatible_cubes.str.txt") - - -if __name__ == "__main__": - tests.main() + _shared_utils.assert_files_equal(filename, "incompatible_cubes.str.txt")