diff --git a/.travis.yml b/.travis.yml
index 28fc24ca19..ec9e57df36 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -83,7 +83,6 @@ install:
fi
# prepare iris build directory
- - python setup.py --with-unpack build_ext --include-dirs=${PREFIX}/include --library-dirs=${PREFIX}/lib
- if [[ $TEST_TARGET -ne 'coding' ]]; then
IRIS=$(ls -d1 build/lib*/iris);
mkdir $IRIS/etc;
@@ -107,8 +106,8 @@ install:
fi
# iris
- - python setup.py --quiet --with-unpack build
- - python setup.py --quiet --with-unpack install
+ - python setup.py --quiet build
+ - python setup.py --quiet install
script:
- if [[ $TEST_TARGET == 'default' ]]; then
diff --git a/docs/iris/src/userguide/saving_iris_cubes.rst b/docs/iris/src/userguide/saving_iris_cubes.rst
index d1658ef70e..ecf2210810 100644
--- a/docs/iris/src/userguide/saving_iris_cubes.rst
+++ b/docs/iris/src/userguide/saving_iris_cubes.rst
@@ -82,7 +82,7 @@ For example, a GRIB2 message with a particular known long_name may need to be sa
Similarly a PP field may need to be written out with a specific value for LBEXP. This can be achieved by::
def tweaked_fields(cube):
- for cube, field in iris.fileformats.pp.as_pairs(cube):
+ for cube, field in iris.fileformats.pp.save_pairs_from_cube(cube):
# post process the PP field, prior to saving
if cube.name() == 'air_pressure':
field.lbexp = 'meaxp'
diff --git a/docs/iris/src/whatsnew/contributions_v2.0.0/incompatiblechange_2017-Oct-17_rename-pp-rules.txt b/docs/iris/src/whatsnew/contributions_v2.0.0/incompatiblechange_2017-Oct-17_rename-pp-rules.txt
new file mode 100644
index 0000000000..b3ff33c7f6
--- /dev/null
+++ b/docs/iris/src/whatsnew/contributions_v2.0.0/incompatiblechange_2017-Oct-17_rename-pp-rules.txt
@@ -0,0 +1,2 @@
+`iris.fileformats.pp_rules` has been renamed to `iris.fileformats.pp_load_rules`.
+This has been done for the sake of clarity following the introduction of `iris.fileformats.pp_save_rules`.
\ No newline at end of file
diff --git a/lib/iris/__init__.py b/lib/iris/__init__.py
index 88c66aa336..77fd7cda5a 100644
--- a/lib/iris/__init__.py
+++ b/lib/iris/__init__.py
@@ -454,11 +454,8 @@ def sample_data_path(*path_to_join):
if iris_sample_data is not None:
target = os.path.join(iris_sample_data.path, target)
else:
- wmsg = ("iris.config.SAMPLE_DATA_DIR was deprecated in v1.10.0 and "
- "will be removed in a future Iris release. Install the "
- "'iris_sample_data' package.")
- warn_deprecated(wmsg)
- target = os.path.join(iris.config.SAMPLE_DATA_DIR, target)
+ raise ImportError("Please install the 'iris_sample_data' package to "
+ "access sample data.")
if not glob.glob(target):
raise ValueError('Sample data file(s) at {!r} not found.\n'
'NB. This function is only for locating files in the '
diff --git a/lib/iris/analysis/__init__.py b/lib/iris/analysis/__init__.py
index a1386b2e2f..0b1d94fbc5 100644
--- a/lib/iris/analysis/__init__.py
+++ b/lib/iris/analysis/__init__.py
@@ -1015,7 +1015,8 @@ def post_process(self, collapsed_cube, data_result, coords, **kwargs):
return result
-def _percentile(data, axis, percent, **kwargs):
+def _percentile(data, axis, percent, fast_percentile_method=False,
+ **kwargs):
"""
The percentile aggregator is an additive operation. This means that
it *may* introduce a new dimension to the data for the statistic being
@@ -1024,18 +1025,34 @@ def _percentile(data, axis, percent, **kwargs):
If a new additive dimension is formed, then it will always be the last
dimension of the resulting percentile data payload.
+ Kwargs:
+
+ * fast_percentile_method (boolean) :
+ When set to True, uses the numpy.percentiles method as a faster
+ alternative to the scipy.mstats.mquantiles method. Does not handle
+ masked arrays.
+
"""
# Ensure that the target axis is the last dimension.
data = np.rollaxis(data, axis, start=data.ndim)
- quantiles = np.array(percent) / 100.
shape = data.shape[:-1]
# Flatten any leading dimensions.
if shape:
data = data.reshape([np.prod(shape), data.shape[-1]])
# Perform the percentile calculation.
- result = scipy.stats.mstats.mquantiles(data, quantiles, axis=-1, **kwargs)
+ if fast_percentile_method:
+ msg = 'Cannot use fast np.percentile method with masked array.'
+ if ma.isMaskedArray(data):
+ raise TypeError(msg)
+ result = np.percentile(data, percent, axis=-1)
+ result = result.T
+ else:
+ quantiles = np.array(percent) / 100.
+ result = scipy.stats.mstats.mquantiles(data, quantiles, axis=-1,
+ **kwargs)
if not ma.isMaskedArray(data) and not ma.is_masked(result):
result = np.asarray(result)
+
# Ensure to unflatten any leading dimensions.
if shape:
if not isinstance(percent, collections.Iterable):
@@ -2477,8 +2494,3 @@ def regridder(self, src_cube, target_grid):
from iris.analysis.trajectory import \
UnstructuredNearestNeigbourRegridder
return UnstructuredNearestNeigbourRegridder(src_cube, target_grid)
-
-
-# Import "iris.analysis.interpolate" to replicate older automatic imports.
-# NOTE: do this at end, as otherwise its import of 'Linear' will fail.
-from . import _interpolate_backdoor as interpolate
diff --git a/lib/iris/analysis/_interpolate_backdoor.py b/lib/iris/analysis/_interpolate_backdoor.py
deleted file mode 100644
index 70f9f657b9..0000000000
--- a/lib/iris/analysis/_interpolate_backdoor.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# (C) British Crown Copyright 2010 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Interpolation and re-gridding routines.
-
-The core definitions of the now deprecated 'iris.analysis.interpolate', with
-added deprecation wrappers.
-
-These contents are exposed as 'iris.analysis.interpolate', which is
-automatically available when 'iris.analysis' is imported.
-This is provided *only* because removing the automatic import broke some user
-code -- even though reliance on automatic imports is accepted bad practice.
-
-The "real" module 'iris.analysis.interpolate' can also be explicitly
-imported, and provides exactly the same definitions.
-The only difference is that the explicit import *itself* emits a deprecation
-warning.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import six
-
-import collections
-from functools import wraps
-
-import numpy as np
-import scipy
-import scipy.spatial
-from scipy.interpolate.interpolate import interp1d
-
-from iris._deprecation import (warn_deprecated as iris_warn_deprecated,
- ClassWrapperSameDocstring)
-from iris.analysis import Linear
-import iris.cube
-import iris.coord_systems
-import iris.coords
-import iris.exceptions
-from . import _interpolate_private as _interp
-
-
-_INTERPOLATE_DEPRECATION_WARNING = \
- "The module 'iris.analysis.interpolate' is deprecated."
-
-
-# Define a common callpoint for deprecation warnings.
-def _warn_deprecated(msg=None):
- if msg is None:
- msg = _INTERPOLATE_DEPRECATION_WARNING
- iris_warn_deprecated(msg)
-
-
-def nearest_neighbour_indices(cube, sample_points):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of '
- 'iris.analysis.interpolate.nearest_neighbour_indices() '
- 'with iris.coords.Coord.nearest_neighbour_index()).')
- _warn_deprecated(msg)
- return _interp.nearest_neighbour_indices(cube, sample_points)
-
-nearest_neighbour_indices.__doc__ = _interp.nearest_neighbour_indices.__doc__
-
-
-def extract_nearest_neighbour(cube, sample_points):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of '
- 'iris.analysis.interpolate.extract_nearest_neighbour() with '
- 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Nearest()).')
- _warn_deprecated(msg)
- return _interp.extract_nearest_neighbour(cube, sample_points)
-
-extract_nearest_neighbour.__doc__ = _interp.extract_nearest_neighbour.__doc__
-
-
-def nearest_neighbour_data_value(cube, sample_points):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of '
- 'iris.analysis.interpolate.nearest_neighbour_data_value() with '
- 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Nearest()).')
- _warn_deprecated(msg)
- return _interp.nearest_neighbour_data_value(cube, sample_points)
-
-nearest_neighbour_data_value.__doc__ = \
- _interp.nearest_neighbour_data_value.__doc__
-
-
-def regrid(source_cube, grid_cube, mode='bilinear', **kwargs):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of iris.analysis.interpolate.regrid() '
- 'with iris.cube.Cube.regrid().')
- _warn_deprecated(msg)
- return _interp.regrid(source_cube, grid_cube, mode=mode, **kwargs)
-
-regrid.__doc__ = _interp.regrid.__doc__
-
-
-def regrid_to_max_resolution(cubes, **kwargs):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of '
- 'iris.analysis.interpolate.regrid_to_max_resolution() '
- 'with iris.cube.Cube.regrid().')
- _warn_deprecated(msg)
- return _interp.regrid_to_max_resolution(cubes, **kwargs)
-
-regrid_to_max_resolution.__doc__ = _interp.regrid_to_max_resolution.__doc__
-
-
-def linear(cube, sample_points, extrapolation_mode='linear'):
- msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
- 'Please replace usage of iris.analysis.interpolate.linear() with '
- 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Linear()).')
- _warn_deprecated(msg)
- return _interp.linear(cube, sample_points,
- extrapolation_mode=extrapolation_mode)
-
-linear.__doc__ = _interp.linear.__doc__
-
-
-class Linear1dExtrapolator(six.with_metaclass(ClassWrapperSameDocstring,
- _interp.Linear1dExtrapolator)):
- @wraps(_interp.Linear1dExtrapolator.__init__)
- def __init__(self, interpolator):
- _warn_deprecated()
- super(Linear1dExtrapolator, self).__init__(interpolator)
diff --git a/lib/iris/analysis/_interpolate_private.py b/lib/iris/analysis/_interpolate_private.py
deleted file mode 100644
index 3990fb6414..0000000000
--- a/lib/iris/analysis/_interpolate_private.py
+++ /dev/null
@@ -1,873 +0,0 @@
-# (C) British Crown Copyright 2010 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-This is the 'original' content of :mod:`iris.analysis.interpolate`, which has
-now been deprecated.
-
-A rename was essential to provide a deprecation warning on import of the
-original name, while still providing this code for internal usage (for now)
-without triggering the deprecation notice.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import six
-
-import collections
-import warnings
-
-import numpy as np
-import scipy
-import scipy.spatial
-from scipy.interpolate.interpolate import interp1d
-
-from iris.analysis import Linear
-import iris.cube
-import iris.coord_systems
-import iris.coords
-from iris._deprecation import warn_deprecated
-import iris.exceptions
-
-
-def _ll_to_cart(lon, lat):
- # Based on cartopy.img_transform.ll_to_cart()
- x = np.sin(np.deg2rad(90 - lat)) * np.cos(np.deg2rad(lon))
- y = np.sin(np.deg2rad(90 - lat)) * np.sin(np.deg2rad(lon))
- z = np.cos(np.deg2rad(90 - lat))
- return (x, y, z)
-
-def _cartesian_sample_points(sample_points, sample_point_coord_names):
- # Replace geographic latlon with cartesian xyz.
- # Generates coords suitable for nearest point calculations with scipy.spatial.cKDTree.
- #
- # Input:
- # sample_points[coord][datum] : list of sample_positions for each datum, formatted for fast use of _ll_to_cart()
- # sample_point_coord_names[coord] : list of n coord names
- #
- # Output:
- # list of [x,y,z,t,etc] positions, formatted for kdtree
-
- # Find lat and lon coord indices
- i_lat = i_lon = None
- i_non_latlon = list(range(len(sample_point_coord_names)))
- for i, name in enumerate(sample_point_coord_names):
- if "latitude" in name:
- i_lat = i
- i_non_latlon.remove(i_lat)
- if "longitude" in name:
- i_lon = i
- i_non_latlon.remove(i_lon)
-
- if i_lat is None or i_lon is None:
- return sample_points.transpose()
-
- num_points = len(sample_points[0])
- cartesian_points = [None] * num_points
-
- # Get the point coordinates without the latlon
- for p in range(num_points):
- cartesian_points[p] = [sample_points[c][p] for c in i_non_latlon]
-
- # Add cartesian xyz coordinates from latlon
- x, y, z = _ll_to_cart(sample_points[i_lon], sample_points[i_lat])
- for p in range(num_points):
- cartesian_point = cartesian_points[p]
- cartesian_point.append(x[p])
- cartesian_point.append(y[p])
- cartesian_point.append(z[p])
-
- return cartesian_points
-
-
-def nearest_neighbour_indices(cube, sample_points):
- """
- Returns the indices to select the data value(s) closest to the given coordinate point values.
-
- The sample_points mapping does not have to include coordinate values corresponding to all data
- dimensions. Any dimensions unspecified will default to a full slice.
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
- >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0), ('longitude', 10)])
- (slice(None, None, None), 9, 12)
- >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0)])
- (slice(None, None, None), 9, slice(None, None, None))
-
- Args:
-
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
-
- Returns:
- The tuple of indices which will select the point in the cube closest to the supplied coordinate values.
-
- .. note::
-
- Nearest neighbour interpolation of multidimensional coordinates is not
- yet supported.
-
- .. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please replace usage of
- :func:`iris.analysis.interpolate.nearest_neighbour_indices`
- with :meth:`iris.coords.Coord.nearest_neighbour_index`.
-
- """
- if isinstance(sample_points, dict):
- msg = ('Providing a dictionary to specify points is deprecated. '
- 'Please provide a list of (coordinate, values) pairs.')
- warn_deprecated(msg)
- sample_points = list(sample_points.items())
-
- if sample_points:
- try:
- coord, values = sample_points[0]
- except ValueError:
- raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)
-
- points = []
- for coord, values in sample_points:
- if isinstance(coord, six.string_types):
- coord = cube.coord(coord)
- else:
- coord = cube.coord(coord)
- points.append((coord, values))
- sample_points = points
-
- # Build up a list of indices to span the cube.
- indices = [slice(None, None)] * cube.ndim
-
- # Build up a dictionary which maps the cube's data dimensions to a list (which will later
- # be populated by coordinates in the sample points list)
- dim_to_coord_map = {}
- for i in range(cube.ndim):
- dim_to_coord_map[i] = []
-
- # Iterate over all of the specifications provided by sample_points
- for coord, point in sample_points:
- data_dim = cube.coord_dims(coord)
-
- # If no data dimension then we don't need to make any modifications to indices.
- if not data_dim:
- continue
- elif len(data_dim) > 1:
- raise iris.exceptions.CoordinateMultiDimError("Nearest neighbour interpolation of multidimensional "
- "coordinates is not supported.")
- data_dim = data_dim[0]
-
- dim_to_coord_map[data_dim].append(coord)
-
- #calculate the nearest neighbour
- min_index = coord.nearest_neighbour_index(point)
-
- if getattr(coord, 'circular', False):
- warnings.warn("Nearest neighbour on a circular coordinate may not be picking the nearest point.", DeprecationWarning)
-
- # If the dimension has already been interpolated then assert that the index from this coordinate
- # agrees with the index already calculated, otherwise we have a contradicting specification
- if indices[data_dim] != slice(None, None) and min_index != indices[data_dim]:
- raise ValueError('The coordinates provided (%s) over specify dimension %s.' %
- (', '.join([coord.name() for coord in dim_to_coord_map[data_dim]]), data_dim))
-
- indices[data_dim] = min_index
-
- return tuple(indices)
-
-
-def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None):
- """
- See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.
-
- 'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*].
- The lengths of all the point-values sequences must be equal.
-
- This function is adapted for points sampling a multi-dimensional coord,
- and can currently only do nearest neighbour interpolation.
-
- Because this function can be slow for multidimensional coordinates,
- a 'cache' dictionary can be provided by the calling code.
-
- .. Note::
-
- If the points are longitudes/latitudes, these are handled correctly as
- points on the sphere, but the values must be in 'degrees'.
-
- """
-
- # Developer notes:
- # A "sample space cube" is made which only has the coords and dims we are sampling on.
- # We get the nearest neighbour using this sample space cube.
-
- if isinstance(sample_points, dict):
- msg = ('Providing a dictionary to specify points is deprecated. '
- 'Please provide a list of (coordinate, values) pairs.')
- warn_deprecated(msg)
- sample_points = list(sample_points.items())
-
- if sample_points:
- try:
- coord, value = sample_points[0]
- except ValueError:
- raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)
-
- # Convert names to coords in sample_point
- # Reformat sample point values for use in _cartesian_sample_points(), below.
- coord_values = []
- sample_point_coords = []
- sample_point_coord_names = []
- ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
- for coord, value in sample_points:
- coord = cube.coord(coord)
- if id(coord) not in ok_coord_ids:
- msg = ('Invalid sample coordinate {!r}: derived coordinates are'
- ' not allowed.'.format(coord.name()))
- raise ValueError(msg)
- sample_point_coords.append(coord)
- sample_point_coord_names.append(coord.name())
- value = np.array(value, ndmin=1)
- coord_values.append(value)
-
- coord_point_lens = np.array([len(value) for value in coord_values])
- if not np.all(coord_point_lens == coord_point_lens[0]):
- msg = 'All coordinates must have the same number of sample points.'
- raise ValueError(msg)
-
- coord_values = np.array(coord_values)
-
- # Which dims are we sampling?
- sample_dims = set()
- for coord in sample_point_coords:
- for dim in cube.coord_dims(coord):
- sample_dims.add(dim)
- sample_dims = sorted(list(sample_dims))
-
- # Extract a sub cube that lives in just the sampling space.
- sample_space_slice = [0] * cube.ndim
- for sample_dim in sample_dims:
- sample_space_slice[sample_dim] = slice(None, None)
- sample_space_slice = tuple(sample_space_slice)
- sample_space_cube = cube[sample_space_slice]
-
- #...with just the sampling coords
- for coord in sample_space_cube.coords():
- if not coord.name() in sample_point_coord_names:
- sample_space_cube.remove_coord(coord)
-
- # Order the sample point coords according to the sample space cube coords
- sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()]
- new_order = [sample_space_coord_names.index(name) for name in sample_point_coord_names]
- coord_values = np.array([coord_values[i] for i in new_order])
- sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]
-
- sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
- sample_space_coords_and_dims = [(coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords]
-
- if cache is not None and cube in cache:
- kdtree = cache[cube]
- else:
- # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
- sample_space_data_positions = np.empty((len(sample_space_coords_and_dims), sample_space_cube.data.size), dtype=float)
- for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
- for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims):
- # Index of this datum along this coordinate (could be nD).
- keys = tuple(ndi[ind] for ind in coord_dims) if coord_dims else slice(None, None)
- # Position of this datum along this coordinate.
- sample_space_data_positions[c][d] = coord.points[keys]
-
- # Convert to cartesian coordinates. Flatten for kdtree compatibility.
- cartesian_space_data_coords = _cartesian_sample_points(sample_space_data_positions, sample_point_coord_names)
-
- # Create a kdtree for the nearest-distance lookup to these 3d points.
- kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)
- # This can find the nearest datum point to any given target point,
- # which is the goal of this function.
-
- # Update cache
- if cache is not None:
- cache[cube] = kdtree
-
- # Convert the sample points to cartesian (3d) coords.
- # If there is no latlon within the coordinate there will be no change.
- # Otherwise, geographic latlon is replaced with cartesian xyz.
- cartesian_sample_points = _cartesian_sample_points(
- coord_values, sample_point_coord_names)
-
- # Use kdtree to get the nearest sourcepoint index for each target point.
- _, datum_index_lists = kdtree.query(cartesian_sample_points)
-
- # Convert flat indices back into multidimensional sample-space indices.
- sample_space_dimension_indices = np.unravel_index(
- datum_index_lists, sample_space_cube.data.shape)
- # Convert this from "pointwise list of index arrays for each dimension",
- # to "list of cube indices for each point".
- sample_space_ndis = np.array(sample_space_dimension_indices).transpose()
-
- # For the returned result, we must convert these indices into the source
- # (sample-space) cube, to equivalent indices into the target 'cube'.
-
- # Make a result array: (cube.ndim * ), per sample point.
- n_points = coord_values.shape[-1]
- main_cube_slices = np.empty((n_points, cube.ndim), dtype=object)
- # Initialise so all unused indices are ":".
- main_cube_slices[:] = slice(None)
-
- # Move result indices according to the source (sample) and target (cube)
- # dimension mappings.
- for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
- # Find the coord in the main cube
- main_coord = cube.coord(sample_coord.name())
- main_coord_dims = cube.coord_dims(main_coord)
- # Fill nearest-point data indices for each coord dimension.
- for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
- main_cube_slices[:, main_i] = sample_space_ndis[:, sample_i]
-
- # Return as a list of **tuples** : required for correct indexing usage.
- result = [tuple(inds) for inds in main_cube_slices]
- return result
-
-
-def extract_nearest_neighbour(cube, sample_points):
- """
- Returns a new cube using data value(s) closest to the given coordinate point values.
-
- The sample_points mapping does not have to include coordinate values corresponding to all data
- dimensions. Any dimensions unspecified will default to a full slice.
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
- >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0), ('longitude', 10)])
-
- >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0)])
-
-
- Args:
-
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
-
- Returns:
- A cube that represents uninterpolated data as near to the given points as possible.
-
- .. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please replace usage of
- :func:`iris.analysis.interpolate.extract_nearest_neighbour`
- with :meth:`iris.cube.Cube.interpolate` using the scheme
- :class:`iris.analysis.Nearest`.
-
- """
- return cube[nearest_neighbour_indices(cube, sample_points)]
-
-
-def nearest_neighbour_data_value(cube, sample_points):
- """
- Returns the data value closest to the given coordinate point values.
-
- The sample_points mapping must include coordinate values corresponding to all data
- dimensions.
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0), ('longitude', 10)])
- 299.21564
- >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0)])
- Traceback (most recent call last):
- ...
- ValueError: The sample points [('latitude', 0)] was not specific enough to return a single value from the cube.
-
-
- Args:
-
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
-
- Returns:
- The data value at the point in the cube closest to the supplied coordinate values.
-
- .. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please replace usage of
- :func:`iris.analysis.interpolate.nearest_neighbour_data_value`
- with :meth:`iris.cube.Cube.interpolate` using the scheme
- :class:`iris.analysis.Nearest`.
-
- """
- indices = nearest_neighbour_indices(cube, sample_points)
- for ind in indices:
- if isinstance(ind, slice):
- raise ValueError('The sample points given (%s) were not specific enough to return a '
- 'single value from the cube.' % sample_points)
-
- return cube.data[indices]
-
-
-def regrid(source_cube, grid_cube, mode='bilinear', **kwargs):
- """
- Returns a new cube with values derived from the source_cube on the horizontal grid specified
- by the grid_cube.
-
- Fundamental input requirements:
- 1) Both cubes must have a CoordSystem.
- 2) The source 'x' and 'y' coordinates must not share data dimensions with any other coordinates.
-
- In addition, the algorithm currently used requires:
- 3) Both CS instances must be compatible:
- i.e. of the same type, with the same attribute values, and with compatible coordinates.
- 4) No new data dimensions can be created.
- 5) Source cube coordinates to map to a single dimension.
-
- Args:
-
- * source_cube:
- An instance of :class:`iris.cube.Cube` which supplies the source data and metadata.
- * grid_cube:
- An instance of :class:`iris.cube.Cube` which supplies the horizontal grid definition.
-
- Kwargs:
-
- * mode (string):
- Regridding interpolation algorithm to be applied, which may be one of the following:
-
- * 'bilinear' for bi-linear interpolation (default), see :func:`iris.analysis.interpolate.linear`.
- * 'nearest' for nearest neighbour interpolation.
-
- Returns:
- A new :class:`iris.cube.Cube` instance.
-
- .. note::
-
- The masked status of values are currently ignored. See :func:\
-`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid`
- for regrid support with mask awareness.
-
- .. deprecated:: 1.10
-
- Please use :meth:`iris.cube.Cube.regrid` instead, with an appropriate
- regridding scheme:
-
- * For mode='bilinear', simply use the :class:`~iris.analysis.Linear`
- scheme.
-
- * For mode='nearest', use the :class:`~iris.analysis.Nearest` scheme,
- with extrapolation_mode='extrapolate', but be aware of the
- following possible differences:
-
- * Any missing result points, i.e. those which match source points
- which are masked or NaN, are returned as as NaN values by this
- routine. The 'Nearest' scheme, however, represents missing
- results as masked points in a masked array.
- *Which* points are missing is unchanged.
-
- * Longitude wrapping for this routine is controlled by the
- 'circular' property of the x coordinate.
- The 'Nearest' scheme, however, *always* wraps any coords with
- modular units, such as (correctly formed) longitudes.
- Thus, behaviour can be different if "x_coord.circular" is
- False : In that case, if the original non-longitude-wrapped
- operation is required, it can be replicated by converting all
- X and Y coordinates' units to '1' and removing their coordinate
- systems.
-
- """
- if mode == 'bilinear':
- scheme = Linear(**kwargs)
- return source_cube.regrid(grid_cube, scheme)
-
- # Condition 1
- source_cs = source_cube.coord_system(iris.coord_systems.CoordSystem)
- grid_cs = grid_cube.coord_system(iris.coord_systems.CoordSystem)
- if (source_cs is None) != (grid_cs is None):
- raise ValueError("The source and grid cubes must both have a CoordSystem or both have None.")
-
- # Condition 2: We can only have one x coordinate and one y coordinate with the source CoordSystem, and those coordinates
- # must be the only ones occupying their respective dimension
- source_x = source_cube.coord(axis='x', coord_system=source_cs)
- source_y = source_cube.coord(axis='y', coord_system=source_cs)
-
- source_x_dims = source_cube.coord_dims(source_x)
- source_y_dims = source_cube.coord_dims(source_y)
-
- source_x_dim = None
- if source_x_dims:
- if len(source_x_dims) > 1:
- raise ValueError('The source x coordinate may not describe more than one data dimension.')
- source_x_dim = source_x_dims[0]
- dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_x_dim) if coord is not source_x])
- if dim_sharers:
- raise ValueError('No coordinates may share a dimension (dimension %s) with the x '
- 'coordinate, but (%s) do.' % (source_x_dim, dim_sharers))
-
- source_y_dim = None
- if source_y_dims:
- if len(source_y_dims) > 1:
- raise ValueError('The source y coordinate may not describe more than one data dimension.')
- source_y_dim = source_y_dims[0]
- dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_y_dim) if coord is not source_y])
- if dim_sharers:
- raise ValueError('No coordinates may share a dimension (dimension %s) with the y '
- 'coordinate, but (%s) do.' % (source_y_dim, dim_sharers))
-
- if source_x_dim is not None and source_y_dim == source_x_dim:
- raise ValueError('The source x and y coords may not describe the same data dimension.')
-
-
- # Condition 3
- # Check for compatible horizontal CSs. Currently that means they're exactly the same except for the coordinate
- # values.
- # The same kind of CS ...
- compatible = (source_cs == grid_cs)
- if compatible:
- grid_x = grid_cube.coord(axis='x', coord_system=grid_cs)
- grid_y = grid_cube.coord(axis='y', coord_system=grid_cs)
- compatible = source_x.is_compatible(grid_x) and \
- source_y.is_compatible(grid_y)
- if not compatible:
- raise ValueError("The new grid must be defined on the same coordinate system, and have the same coordinate "
- "metadata, as the source.")
-
- # Condition 4
- if grid_cube.coord_dims(grid_x) and not source_x_dims or \
- grid_cube.coord_dims(grid_y) and not source_y_dims:
- raise ValueError("The new grid must not require additional data dimensions.")
-
- x_coord = grid_x.copy()
- y_coord = grid_y.copy()
-
-
- #
- # Adjust the data array to match the new grid.
- #
-
- # get the new shape of the data
- new_shape = list(source_cube.shape)
- if source_x_dims:
- new_shape[source_x_dims[0]] = grid_x.shape[0]
- if source_y_dims:
- new_shape[source_y_dims[0]] = grid_y.shape[0]
-
- new_data = np.empty(new_shape, dtype=source_cube.data.dtype)
-
- # Prepare the index pattern which will be used to insert a single "column" of data.
- # NB. A "column" is a slice constrained to a single XY point, which therefore extends over *all* the other axes.
- # For an XYZ cube this means a column only extends over Z and corresponds to the normal definition of "column".
- indices = [slice(None, None)] * new_data.ndim
-
- if mode == 'bilinear':
- # Perform bilinear interpolation, passing through any keywords.
- points_dict = [(source_x, list(x_coord.points)), (source_y, list(y_coord.points))]
- new_data = linear(source_cube, points_dict, **kwargs).data
- else:
- # Perform nearest neighbour interpolation on each column in turn.
- for iy, y in enumerate(y_coord.points):
- for ix, x in enumerate(x_coord.points):
- column_pos = [(source_x, x), (source_y, y)]
- column_data = extract_nearest_neighbour(source_cube, column_pos).data
- if source_y_dim is not None:
- indices[source_y_dim] = iy
- if source_x_dim is not None:
- indices[source_x_dim] = ix
- new_data[tuple(indices)] = column_data
-
- # Special case to make 0-dimensional results take the same form as NumPy
- if new_data.shape == ():
- new_data = new_data.flat[0]
-
- # Start with just the metadata and the re-sampled data...
- new_cube = iris.cube.Cube(new_data)
- new_cube.metadata = source_cube.metadata
-
- # ... and then copy across all the unaffected coordinates.
-
- # Record a mapping from old coordinate IDs to new coordinates,
- # for subsequent use in creating updated aux_factories.
- coord_mapping = {}
-
- def copy_coords(source_coords, add_method):
- for coord in source_coords:
- if coord is source_x or coord is source_y:
- continue
- dims = source_cube.coord_dims(coord)
- new_coord = coord.copy()
- add_method(new_coord, dims)
- coord_mapping[id(coord)] = new_coord
-
- copy_coords(source_cube.dim_coords, new_cube.add_dim_coord)
- copy_coords(source_cube.aux_coords, new_cube.add_aux_coord)
-
- for factory in source_cube.aux_factories:
- new_cube.add_aux_factory(factory.updated(coord_mapping))
-
- # Add the new coords
- if source_x in source_cube.dim_coords:
- new_cube.add_dim_coord(x_coord, source_x_dim)
- else:
- new_cube.add_aux_coord(x_coord, source_x_dims)
-
- if source_y in source_cube.dim_coords:
- new_cube.add_dim_coord(y_coord, source_y_dim)
- else:
- new_cube.add_aux_coord(y_coord, source_y_dims)
-
- return new_cube
-
-
-def regrid_to_max_resolution(cubes, **kwargs):
- """
- Returns all the cubes re-gridded to the highest horizontal resolution.
-
- Horizontal resolution is defined by the number of grid points/cells covering the horizontal plane.
- See :func:`iris.analysis.interpolation.regrid` regarding mode of interpolation.
-
- Args:
-
- * cubes:
- An iterable of :class:`iris.cube.Cube` instances.
-
- Returns:
- A list of new :class:`iris.cube.Cube` instances.
-
- .. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please replace usage of :func:`regrid_to_max_resolution` with
- :meth:`iris.cube.Cube.regrid`.
-
- """
- # TODO: This could be significantly improved for readability and functionality.
- resolution = lambda cube_: (cube_.shape[cube_.coord_dims(cube_.coord(axis="x"))[0]]) * (cube_.shape[cube_.coord_dims(cube_.coord(axis="y"))[0]])
- grid_cube = max(cubes, key=resolution)
- return [cube.regridded(grid_cube, **kwargs) for cube in cubes]
-
-
-def linear(cube, sample_points, extrapolation_mode='linear'):
- """
- Return a cube of the linearly interpolated points given the desired
- sample points.
-
- Given a list of tuple pairs mapping coordinates (or coordinate names)
- to their desired values, return a cube with linearly interpolated values.
- If more than one coordinate is specified, the linear interpolation will be
- carried out in sequence, thus providing n-linear interpolation
- (bi-linear, tri-linear, etc.).
-
- If the input cube's data is masked, the result cube will have a data
- mask interpolated to the new sample points
-
- .. testsetup::
-
- import numpy as np
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> sample_points = [('latitude', np.linspace(-90, 90, 10)),
- ... ('longitude', np.linspace(-180, 180, 20))]
- >>> iris.analysis.interpolate.linear(cube, sample_points)
-
-
- .. note::
-
- By definition, linear interpolation requires all coordinates to
- be 1-dimensional.
-
- .. note::
-
- If a specified coordinate is single valued its value will be
- extrapolated to the desired sample points by assuming a gradient of
- zero.
-
- Args:
-
- * cube
- The cube to be interpolated.
-
- * sample_points
- List of one or more tuple pairs mapping coordinate to desired
- points to interpolate. Points may be a scalar or a numpy array
- of values. Multi-dimensional coordinates are not supported.
-
- Kwargs:
-
- * extrapolation_mode - string - one of 'linear', 'nan' or 'error'
-
- * If 'linear' the point will be calculated by extending the
- gradient of closest two points.
- * If 'nan' the extrapolation point will be put as a NaN.
- * If 'error' a value error will be raised notifying of the
- attempted extrapolation.
-
- .. note::
-
- If the source cube's data, or any of its resampled coordinates,
- have an integer data type they will be promoted to a floating
- point data type in the result.
-
- .. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please replace usage of
- :func:`iris.analysis.interpolate.linear`
- with :meth:`iris.cube.Cube.interpolate` using the scheme
- :class:`iris.analysis.Linear`.
-
- """
- if isinstance(sample_points, dict):
- sample_points = list(sample_points.items())
-
- # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
- if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], six.string_types)):
- raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))
-
- scheme = Linear(extrapolation_mode)
- return cube.interpolate(sample_points, scheme)
-
-
-def _interp1d_rolls_y():
- """
- Determines if :class:`scipy.interpolate.interp1d` rolls its array `y` by
- comparing the shape of y passed into interp1d to the shape of its internal
- representation of y.
-
- SciPy v0.13.x+ no longer rolls the axis of its internal representation
- of y so we test for this occurring to prevent us subsequently
- extrapolating along the wrong axis.
-
- For further information on this change see, for example:
- * https://github.com/scipy/scipy/commit/0d906d0fc54388464603c63119b9e35c9a9c4601
- (the commit that introduced the change in behaviour).
- * https://github.com/scipy/scipy/issues/2621
- (a discussion on the change - note the issue is not resolved
- at time of writing).
-
- """
- y = np.arange(12).reshape(3, 4)
- f = interp1d(np.arange(3), y, axis=0)
- # If the initial shape of y and the shape internal to interp1d are *not*
- # the same then scipy.interp1d rolls y.
- return y.shape != f.y.shape
-
-
-class Linear1dExtrapolator(object):
- """
- Extension class to :class:`scipy.interpolate.interp1d` to provide linear extrapolation.
-
- See also: :mod:`scipy.interpolate`.
-
- .. deprecated :: 1.10
-
- """
- roll_y = _interp1d_rolls_y()
-
- def __init__(self, interpolator):
- """
- Given an already created :class:`scipy.interpolate.interp1d` instance, return a callable object
- which supports linear extrapolation.
-
- .. deprecated :: 1.10
-
- """
- self._interpolator = interpolator
- self.x = interpolator.x
- # Store the y values given to the interpolator.
- self.y = interpolator.y
- """
- The y values given to the interpolator object.
-
- .. note:: These are stored with the interpolator.axis last.
-
- """
- # Roll interpolator.axis to the end if scipy no longer does it for us.
- if not self.roll_y:
- self.y = np.rollaxis(self.y, self._interpolator.axis, self.y.ndim)
-
- def all_points_in_range(self, requested_x):
- """Given the x points, do all of the points sit inside the interpolation range."""
- test = (requested_x >= self.x[0]) & (requested_x <= self.x[-1])
- if isinstance(test, np.ndarray):
- test = test.all()
- return test
-
- def __call__(self, requested_x):
- if not self.all_points_in_range(requested_x):
- # cast requested_x to a numpy array if it is not already.
- if not isinstance(requested_x, np.ndarray):
- requested_x = np.array(requested_x)
-
- # we need to catch the special case of providing a single value...
- remember_that_i_was_0d = requested_x.ndim == 0
-
- requested_x = requested_x.flatten()
-
- gt = np.where(requested_x > self.x[-1])[0]
- lt = np.where(requested_x < self.x[0])[0]
- ok = np.where( (requested_x >= self.x[0]) & (requested_x <= self.x[-1]) )[0]
-
- data_shape = list(self.y.shape)
- data_shape[-1] = len(requested_x)
- result = np.empty(data_shape, dtype=self._interpolator(self.x[0]).dtype)
-
- # Make a variable to represent the slice into the resultant data. (This will be updated in each of gt, lt & ok)
- interpolator_result_index = [slice(None, None)] * self.y.ndim
-
- if len(ok) != 0:
- interpolator_result_index[-1] = ok
-
- r = self._interpolator(requested_x[ok])
- # Reshape the properly formed array to put the interpolator.axis last i.e. dims 0, 1, 2 -> 0, 2, 1 if axis = 1
- axes = list(range(r.ndim))
- del axes[self._interpolator.axis]
- axes.append(self._interpolator.axis)
-
- result[interpolator_result_index] = r.transpose(axes)
-
- if len(lt) != 0:
- interpolator_result_index[-1] = lt
-
- grad = (self.y[..., 1:2] - self.y[..., 0:1]) / (self.x[1] - self.x[0])
- result[interpolator_result_index] = self.y[..., 0:1] + (requested_x[lt] - self.x[0]) * grad
-
- if len(gt) != 0:
- interpolator_result_index[-1] = gt
-
- grad = (self.y[..., -1:] - self.y[..., -2:-1]) / (self.x[-1] - self.x[-2])
- result[interpolator_result_index] = self.y[..., -1:] + (requested_x[gt] - self.x[-1]) * grad
-
- axes = list(range(len(interpolator_result_index)))
- axes.insert(self._interpolator.axis, axes.pop(axes[-1]))
- result = result.transpose(axes)
-
- if remember_that_i_was_0d:
- new_shape = list(result.shape)
- del new_shape[self._interpolator.axis]
- result = result.reshape(new_shape)
-
- return result
- else:
- return self._interpolator(requested_x)
diff --git a/lib/iris/analysis/calculus.py b/lib/iris/analysis/calculus.py
index a18a4a2aed..991edac072 100644
--- a/lib/iris/analysis/calculus.py
+++ b/lib/iris/analysis/calculus.py
@@ -325,9 +325,10 @@ def _curl_regrid(cube, prototype):
assert isinstance(prototype, iris.cube.Cube)
if cube is None:
- return None
- # #301 use of resample would be better here.
- return cube.regridded(prototype)
+ result = None
+ else:
+ result = cube.regrid(prototype, iris.analysis.Linear())
+ return result
def _copy_cube_transformed(src_cube, data, coord_func):
@@ -442,7 +443,7 @@ def _trig_method(coord, trig_function):
return trig_coord
-def curl(i_cube, j_cube, k_cube=None, ignore=None):
+def curl(i_cube, j_cube, k_cube=None):
r"""
Calculate the 2-dimensional or 3-dimensional spherical or cartesian
curl of the given vector of cubes.
@@ -462,11 +463,6 @@ def curl(i_cube, j_cube, k_cube=None, ignore=None):
* k_cube
The k cube of the vector to operate on
- * ignore
- This argument is not used.
-
- .. deprecated:: 0.8
- The coordinates to ignore are determined automatically.
Return (i_cmpt_curl_cube, j_cmpt_curl_cube, k_cmpt_curl_cube)
@@ -524,11 +520,6 @@ def curl(i_cube, j_cube, k_cube=None, ignore=None):
where phi is longitude, theta is latitude.
"""
- if ignore is not None:
- ignore = None
- warn_deprecated('The ignore keyword to iris.analysis.calculus.curl '
- 'is deprecated, ignoring is now done automatically.')
-
# Get the vector quantity names.
# (i.e. ['easterly', 'northerly', 'vertical'])
vector_quantity_names, phenomenon_name = \
diff --git a/lib/iris/analysis/interpolate.py b/lib/iris/analysis/interpolate.py
deleted file mode 100644
index 92e6cabdb3..0000000000
--- a/lib/iris/analysis/interpolate.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# (C) British Crown Copyright 2010 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Interpolation and re-gridding routines.
-
-See also: :mod:`NumPy `, and :ref:`SciPy `.
-
-.. deprecated:: 1.10
-
- The module :mod:`iris.analysis.interpolate` is deprecated.
- Please use :meth:`iris.cube.Cube.regrid` or
- :meth:`iris.cube.Cube.interpolate` with the appropriate regridding and
- interpolation schemes from :mod:`iris.analysis` instead.
-
-"""
-# The actual content of this module is all taken from
-# 'iris.analysis._interpolate_backdoor'.
-# The only difference is that this module also emits a deprecation warning when
-# it is imported.
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-from iris.analysis._interpolate_backdoor import *
-from iris.analysis._interpolate_backdoor import _warn_deprecated
-
-
-# List all the content exported from _interpolate_backdoor, to ensure we build
-# docs for them.
-__all__ = [
- 'nearest_neighbour_indices',
- 'extract_nearest_neighbour',
- 'nearest_neighbour_data_value',
- 'regrid',
- 'regrid_to_max_resolution',
- 'linear']
-
-# Issue a deprecation message when the module is loaded.
-_warn_deprecated()
diff --git a/lib/iris/analysis/maths.py b/lib/iris/analysis/maths.py
index f0c35e1235..abd6e3a633 100644
--- a/lib/iris/analysis/maths.py
+++ b/lib/iris/analysis/maths.py
@@ -31,7 +31,6 @@
import numpy as np
from numpy import ma
-from iris._deprecation import warn_deprecated
import iris.analysis
import iris.coords
import iris.cube
@@ -225,7 +224,7 @@ def _assert_matching_units(cube, other, operation_name):
raise iris.exceptions.NotYetImplementedError(msg)
-def add(cube, other, dim=None, ignore=True, in_place=False):
+def add(cube, other, dim=None, in_place=False):
"""
Calculate the sum of two cubes, or the sum of a cube and a
coordinate or scalar value.
@@ -265,10 +264,10 @@ def add(cube, other, dim=None, ignore=True, in_place=False):
else:
op = operator.add
return _add_subtract_common(op, 'add', cube, other, new_dtype, dim=dim,
- ignore=ignore, in_place=in_place)
+ in_place=in_place)
-def subtract(cube, other, dim=None, ignore=True, in_place=False):
+def subtract(cube, other, dim=None, in_place=False):
"""
Calculate the difference between two cubes, or the difference between
a cube and a coordinate or scalar value.
@@ -308,11 +307,11 @@ def subtract(cube, other, dim=None, ignore=True, in_place=False):
else:
op = operator.sub
return _add_subtract_common(op, 'subtract', cube, other, new_dtype,
- dim=dim, ignore=ignore, in_place=in_place)
+ dim=dim, in_place=in_place)
def _add_subtract_common(operation_function, operation_name, cube, other,
- new_dtype, dim=None, ignore=True, in_place=False):
+ new_dtype, dim=None, in_place=False):
"""
Function which shares common code between addition and subtraction
of cubes.
@@ -328,8 +327,6 @@ def _add_subtract_common(operation_function, operation_name, cube, other,
case of scalar masked arrays
dim - dimension along which to apply `other` if it's a
coordinate that is not found in `cube`
- ignore - The value of this argument is ignored.
- .. deprecated:: 0.8
in_place - whether or not to apply the operation in place to
`cube` and `cube.data`
@@ -342,14 +339,6 @@ def _add_subtract_common(operation_function, operation_name, cube, other,
# operation with
coord_comp = iris.analysis.coord_comparison(cube, other)
- # provide a deprecation warning if the ignore keyword has been set
- if ignore is not True:
- msg = ('The "ignore" keyword has been deprecated in '
- 'add/subtract. This functionality is now automatic. '
- 'The provided value to "ignore" has been ignored, '
- 'and has been automatically calculated.')
- warn_deprecated(msg)
-
bad_coord_grps = (coord_comp['ungroupable_and_dimensioned'] +
coord_comp['resamplable'])
if bad_coord_grps:
diff --git a/lib/iris/analysis/trajectory.py b/lib/iris/analysis/trajectory.py
index c5433c91fc..54c3df1684 100644
--- a/lib/iris/analysis/trajectory.py
+++ b/lib/iris/analysis/trajectory.py
@@ -27,14 +27,13 @@
import math
import numpy as np
+from scipy.spatial import cKDTree
-from cf_units import Unit
import iris.analysis
import iris.coord_systems
import iris.coords
-from iris.analysis._interpolate_private import \
- _nearest_neighbour_indices_ndcoords, linear as linear_regrid
+from iris.analysis import Linear
from iris.analysis._interpolation import snapshot_grid
from iris.util import _meshgrid
@@ -256,7 +255,7 @@ def interpolate(cube, sample_points, method=None):
if method in ["linear", None]:
for i in range(trajectory_size):
point = [(coord, values[i]) for coord, values in sample_points]
- column = linear_regrid(cube, point)
+ column = cube.interpolate(point, Linear())
new_cube.data[..., i] = column.data
# Fill in the empty squashed (non derived) coords.
for column_coord in column.dim_coords + column.aux_coords:
@@ -383,6 +382,229 @@ def interpolate(cube, sample_points, method=None):
return new_cube
+def _ll_to_cart(lon, lat):
+ # Based on cartopy.img_transform.ll_to_cart().
+ x = np.sin(np.deg2rad(90 - lat)) * np.cos(np.deg2rad(lon))
+ y = np.sin(np.deg2rad(90 - lat)) * np.sin(np.deg2rad(lon))
+ z = np.cos(np.deg2rad(90 - lat))
+ return (x, y, z)
+
+
+def _cartesian_sample_points(sample_points, sample_point_coord_names):
+ """
+ Replace geographic lat/lon with cartesian xyz.
+ Generates coords suitable for nearest point calculations with
+ `scipy.spatial.cKDTree`.
+
+ Args:
+
+ * sample_points[coord][datum]:
+ list of sample_positions for each datum, formatted for fast use of
+ :func:`_ll_to_cart()`.
+
+ * sample_point_coord_names[coord]:
+ list of n coord names
+
+ Returns:
+ list of [x,y,z,t,etc] positions, formatted for kdtree.
+
+ """
+ # Find lat and lon coord indices
+ i_lat = i_lon = None
+ i_non_latlon = list(range(len(sample_point_coord_names)))
+ for i, name in enumerate(sample_point_coord_names):
+ if "latitude" in name:
+ i_lat = i
+ i_non_latlon.remove(i_lat)
+ if "longitude" in name:
+ i_lon = i
+ i_non_latlon.remove(i_lon)
+
+ if i_lat is None or i_lon is None:
+ return sample_points.transpose()
+
+ num_points = len(sample_points[0])
+ cartesian_points = [None] * num_points
+
+ # Get the point coordinates without the latlon
+ for p in range(num_points):
+ cartesian_points[p] = [sample_points[c][p] for c in i_non_latlon]
+
+ # Add cartesian xyz coordinates from latlon
+ x, y, z = _ll_to_cart(sample_points[i_lon], sample_points[i_lat])
+ for p in range(num_points):
+ cartesian_point = cartesian_points[p]
+ cartesian_point.append(x[p])
+ cartesian_point.append(y[p])
+ cartesian_point.append(z[p])
+
+ return cartesian_points
+
+
+def _nearest_neighbour_indices_ndcoords(cube, sample_points, cache=None):
+ """
+ Returns the indices to select the data value(s) closest to the given
+ coordinate point values.
+
+ 'sample_points' is of the form [[coord-or-coord-name, point-value(s)]*].
+ The lengths of all the point-values sequences must be equal.
+
+ This function is adapted for points sampling a multi-dimensional coord,
+ and can currently only do nearest neighbour interpolation.
+
+ Because this function can be slow for multidimensional coordinates,
+ a 'cache' dictionary can be provided by the calling code.
+
+ .. Note::
+
+ If the points are longitudes/latitudes, these are handled correctly as
+ points on the sphere, but the values must be in 'degrees'.
+
+ Developer notes:
+ A "sample space cube" is made which only has the coords and dims we are
+ sampling on.
+ We get the nearest neighbour using this sample space cube.
+
+ """
+ if sample_points:
+ try:
+ coord, value = sample_points[0]
+ except (KeyError, ValueError):
+ emsg = ('Sample points must be a list of '
+ '(coordinate, value) pairs, got {!r}.')
+ raise TypeError(emsg.format(sample_points))
+
+ # Convert names to coords in sample_point and reformat sample point values
+ # for use in `_cartesian_sample_points()`.
+ coord_values = []
+ sample_point_coords = []
+ sample_point_coord_names = []
+ ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
+ for coord, value in sample_points:
+ coord = cube.coord(coord)
+ if id(coord) not in ok_coord_ids:
+ msg = ('Invalid sample coordinate {!r}: derived coordinates are'
+ ' not allowed.'.format(coord.name()))
+ raise ValueError(msg)
+ sample_point_coords.append(coord)
+ sample_point_coord_names.append(coord.name())
+ value = np.array(value, ndmin=1)
+ coord_values.append(value)
+
+ coord_point_lens = np.array([len(value) for value in coord_values])
+ if not np.all(coord_point_lens == coord_point_lens[0]):
+ msg = 'All coordinates must have the same number of sample points.'
+ raise ValueError(msg)
+
+ coord_values = np.array(coord_values)
+
+ # Which dims are we sampling?
+ sample_dims = set()
+ for coord in sample_point_coords:
+ for dim in cube.coord_dims(coord):
+ sample_dims.add(dim)
+ sample_dims = sorted(list(sample_dims))
+
+ # Extract a sub cube that lives in just the sampling space.
+ sample_space_slice = [0] * cube.ndim
+ for sample_dim in sample_dims:
+ sample_space_slice[sample_dim] = slice(None, None)
+ sample_space_slice = tuple(sample_space_slice)
+ sample_space_cube = cube[sample_space_slice]
+
+ # Just the sampling coords.
+ for coord in sample_space_cube.coords():
+ if not coord.name() in sample_point_coord_names:
+ sample_space_cube.remove_coord(coord)
+
+ # Order the sample point coords according to the sample space cube coords.
+ sample_space_coord_names = \
+ [coord.name() for coord in sample_space_cube.coords()]
+ new_order = [sample_space_coord_names.index(name)
+ for name in sample_point_coord_names]
+ coord_values = np.array([coord_values[i] for i in new_order])
+ sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]
+
+ sample_space_coords = \
+ sample_space_cube.dim_coords + sample_space_cube.aux_coords
+ sample_space_coords_and_dims = \
+ [(coord, sample_space_cube.coord_dims(coord))
+ for coord in sample_space_coords]
+
+ if cache is not None and cube in cache:
+ kdtree = cache[cube]
+ else:
+ # Create a "sample space position" for each
+ # `datum.sample_space_data_positions[coord_index][datum_index]`.
+ sample_space_data_positions = \
+ np.empty((len(sample_space_coords_and_dims),
+ sample_space_cube.data.size),
+ dtype=float)
+ for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
+ for c, (coord, coord_dims) in \
+ enumerate(sample_space_coords_and_dims):
+ # Index of this datum along this coordinate (could be nD).
+ if coord_dims:
+ keys = tuple(ndi[ind] for ind in coord_dims)
+ else:
+ keys = slice(None, None)
+ # Position of this datum along this coordinate.
+ sample_space_data_positions[c][d] = coord.points[keys]
+
+ # Convert to cartesian coordinates. Flatten for kdtree compatibility.
+ cartesian_space_data_coords = \
+ _cartesian_sample_points(sample_space_data_positions,
+ sample_point_coord_names)
+
+ # Create a kdtree for the nearest-distance lookup to these 3d points.
+ kdtree = cKDTree(cartesian_space_data_coords)
+ # This can find the nearest datum point to any given target point,
+ # which is the goal of this function.
+
+ # Update cache.
+ if cache is not None:
+ cache[cube] = kdtree
+
+ # Convert the sample points to cartesian (3d) coords.
+ # If there is no latlon within the coordinate there will be no change.
+ # Otherwise, geographic latlon is replaced with cartesian xyz.
+ cartesian_sample_points = _cartesian_sample_points(
+ coord_values, sample_point_coord_names)
+
+ # Use kdtree to get the nearest sourcepoint index for each target point.
+ _, datum_index_lists = kdtree.query(cartesian_sample_points)
+
+ # Convert flat indices back into multidimensional sample-space indices.
+ sample_space_dimension_indices = np.unravel_index(
+ datum_index_lists, sample_space_cube.data.shape)
+ # Convert this from "pointwise list of index arrays for each dimension",
+ # to "list of cube indices for each point".
+ sample_space_ndis = np.array(sample_space_dimension_indices).transpose()
+
+ # For the returned result, we must convert these indices into the source
+ # (sample-space) cube, to equivalent indices into the target 'cube'.
+
+ # Make a result array: (cube.ndim * ), per sample point.
+ n_points = coord_values.shape[-1]
+ main_cube_slices = np.empty((n_points, cube.ndim), dtype=object)
+ # Initialise so all unused indices are ":".
+ main_cube_slices[:] = slice(None)
+
+ # Move result indices according to the source (sample) and target (cube)
+ # dimension mappings.
+ for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
+ # Find the coord in the main cube
+ main_coord = cube.coord(sample_coord.name())
+ main_coord_dims = cube.coord_dims(main_coord)
+ # Fill nearest-point data indices for each coord dimension.
+ for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
+ main_cube_slices[:, main_i] = sample_space_ndis[:, sample_i]
+
+ # Return as a list of **tuples** : required for correct indexing usage.
+ result = [tuple(inds) for inds in main_cube_slices]
+ return result
+
+
class UnstructuredNearestNeigbourRegridder(object):
"""
Encapsulate the operation of :meth:`iris.analysis.trajectory.interpolate`
diff --git a/lib/iris/config.py b/lib/iris/config.py
index 2bb82924a2..a20b7bb5a0 100644
--- a/lib/iris/config.py
+++ b/lib/iris/config.py
@@ -23,15 +23,6 @@
----------
-.. py:data:: iris.config.SAMPLE_DATA_DIR
-
- Local directory where sample data exists. Defaults to "sample_data"
- sub-directory of the Iris package install directory. The sample data
- directory supports the Iris gallery. Directory contents accessed via
- :func:`iris.sample_data_path`.
-
- .. deprecated:: 1.10
-
.. py:data:: iris.config.TEST_DATA_DIR
Local directory where test data exists. Defaults to "test_data"
@@ -47,20 +38,6 @@
The [optional] name of the logger to notify when first imported.
-.. py:data:: iris.config.RULE_LOG_DIR
-
- The [optional] full path to the rule logging directory used by
- :func:`iris.fileformats.pp.load()` and
- :func:`iris.fileformats.pp.save()`.
-
- .. deprecated:: 1.10
-
-.. py:data:: iris.config.RULE_LOG_IGNORE
-
- The [optional] list of users to ignore when logging rules.
-
- .. deprecated:: 1.10
-
----------
"""
@@ -123,10 +100,6 @@ def get_dir_option(section, option, default=None):
_RESOURCE_SECTION = 'Resources'
-SAMPLE_DATA_DIR = get_dir_option(
- _RESOURCE_SECTION, 'sample_data_dir',
- default=os.path.join(os.path.dirname(__file__), 'sample_data'))
-
TEST_DATA_DIR = get_dir_option(_RESOURCE_SECTION, 'test_data_dir',
default=os.path.join(os.path.dirname(__file__),
'test_data'))
@@ -148,13 +121,6 @@ def get_dir_option(section, option, default=None):
# Logging options
_LOGGING_SECTION = 'Logging'
-
-RULE_LOG_DIR = get_dir_option(_LOGGING_SECTION, 'rule_dir')
-
-
-RULE_LOG_IGNORE = get_option(_LOGGING_SECTION, 'rule_ignore')
-
-
IMPORT_LOGGER = get_option(_LOGGING_SECTION, 'import_logger')
diff --git a/lib/iris/coords.py b/lib/iris/coords.py
index d651770c6e..e8e6ebe9b8 100644
--- a/lib/iris/coords.py
+++ b/lib/iris/coords.py
@@ -1664,16 +1664,17 @@ def _new_points_requirements(self, points):
"""
Confirm that a new set of coord points adheres to the requirements for
:class:`~iris.coords.DimCoord` points, being:
- * points are 1D,
+ * points are scalar or 1D,
* points are numeric, and
* points are monotonic.
"""
- if points.ndim != 1:
- raise ValueError('The points array must be 1-dimensional.')
+ if points.ndim not in (0, 1):
+ raise ValueError(
+ 'The points array must be scalar or 1-dimensional.')
if not np.issubdtype(points.dtype, np.number):
raise ValueError('The points array must be numeric.')
- if len(points) > 1 and not iris.util.monotonic(points, strict=True):
+ if points.size > 1 and not iris.util.monotonic(points, strict=True):
raise ValueError('The points array must be strictly monotonic.')
def _points_setter(self, points):
@@ -1684,6 +1685,9 @@ def _points_setter(self, points):
# so that we can make it read-only.
points = np.array(points, copy=copy)
+ # Check validity requirements for dimension-coordinate points.
+ self._new_points_requirements(points)
+
# Invoke the generic points setter.
super(DimCoord, self)._points_setter(points)
@@ -1692,9 +1696,6 @@ def _points_setter(self, points):
points = self._points_dm.core_data()
# N.B. always a *real* array, as we realised 'points' at the start.
- # Check validity requirements for dimension-coordinate points.
- self._new_points_requirements(points)
-
# Make the array read-only.
points.flags.writeable = False
@@ -1702,15 +1703,16 @@ def _points_setter(self, points):
def _new_bounds_requirements(self, bounds):
"""
- Confirm that a new set of coord points adheres to the requirements for
- :class:`~iris.coords.DimCoord` points, being:
- * points are 1D,
- * points are numeric, and
- * points are monotonic.
+ Confirm that a new set of coord bounds adheres to the requirements for
+ :class:`~iris.coords.DimCoord` bounds, being:
+ * bounds are compatible in shape with the points
+ * bounds are numeric, and
+ * bounds are monotonic in the first dimension.
"""
# Ensure the bounds are a compatible shape.
- if self.shape != bounds.shape[:-1]:
+ if self.shape != bounds.shape[:-1] and \
+ not (self.shape == (1,) and bounds.ndim == 1):
raise ValueError(
"The shape of the bounds array should be "
"points.shape + (n_bounds,)")
@@ -1718,22 +1720,23 @@ def _new_bounds_requirements(self, bounds):
if not np.issubdtype(bounds.dtype, np.number):
raise ValueError('The bounds array must be numeric.')
- n_bounds = bounds.shape[-1]
- n_points = bounds.shape[0]
- if n_points > 1:
+ if bounds.ndim > 1:
+ n_bounds = bounds.shape[-1]
+ n_points = bounds.shape[0]
+ if n_points > 1:
- directions = set()
- for b_index in range(n_bounds):
- monotonic, direction = iris.util.monotonic(
- bounds[:, b_index], strict=True, return_direction=True)
- if not monotonic:
- raise ValueError('The bounds array must be strictly '
- 'monotonic.')
- directions.add(direction)
+ directions = set()
+ for b_index in range(n_bounds):
+ monotonic, direction = iris.util.monotonic(
+ bounds[:, b_index], strict=True, return_direction=True)
+ if not monotonic:
+ raise ValueError('The bounds array must be strictly '
+ 'monotonic.')
+ directions.add(direction)
- if len(directions) != 1:
- raise ValueError('The direction of monotonicity must be '
- 'consistent across all bounds')
+ if len(directions) != 1:
+ raise ValueError('The direction of monotonicity must be '
+ 'consistent across all bounds')
def _bounds_setter(self, bounds):
if bounds is not None:
@@ -1742,6 +1745,9 @@ def _bounds_setter(self, bounds):
bounds = as_concrete_data(bounds)
bounds = np.array(bounds, copy=copy)
+ # Check validity requirements for dimension-coordinate bounds.
+ self._new_bounds_requirements(bounds)
+
# Invoke the generic bounds setter.
super(DimCoord, self)._bounds_setter(bounds)
@@ -1750,9 +1756,6 @@ def _bounds_setter(self, bounds):
bounds = self._bounds_dm.core_data()
# N.B. always a *real* array, as we realised 'bounds' at the start.
- # Check validity requirements for dimension-coordinate bounds.
- self._new_bounds_requirements(bounds)
-
# Ensure the array is read-only.
bounds.flags.writeable = False
diff --git a/lib/iris/cube.py b/lib/iris/cube.py
index 67f6ecfebf..5ec6ee463d 100644
--- a/lib/iris/cube.py
+++ b/lib/iris/cube.py
@@ -46,7 +46,6 @@
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
-import iris.analysis._interpolate_private
import iris.aux_factory
import iris.coord_systems
import iris.coords
@@ -3001,19 +3000,18 @@ def __hash__(self):
return hash(id(self))
def __add__(self, other):
- return iris.analysis.maths.add(self, other, ignore=True)
+ return iris.analysis.maths.add(self, other)
def __iadd__(self, other):
- return iris.analysis.maths.add(self, other, ignore=True, in_place=True)
+ return iris.analysis.maths.add(self, other, in_place=True)
__radd__ = __add__
def __sub__(self, other):
- return iris.analysis.maths.subtract(self, other, ignore=True)
+ return iris.analysis.maths.subtract(self, other)
def __isub__(self, other):
- return iris.analysis.maths.subtract(self, other,
- ignore=True, in_place=True)
+ return iris.analysis.maths.subtract(self, other, in_place=True)
__mul__ = iris.analysis.maths.multiply
__rmul__ = iris.analysis.maths.multiply
@@ -3034,23 +3032,6 @@ def __itruediv__(self, other):
__pow__ = iris.analysis.maths.exponentiate
# END OPERATOR OVERLOADS
- # START ANALYSIS ROUTINES
-
- regridded = iris.util._wrap_function_for_method(
- iris.analysis._interpolate_private.regrid,
- """
- Returns a new cube with values derived from this cube on the
- horizontal grid specified by the grid_cube.
-
- .. deprecated:: 1.10
- Please replace usage of :meth:`~Cube.regridded` with
- :meth:`~Cube.regrid`. See :meth:`iris.analysis.interpolate.regrid`
- for details of exact usage equivalents.
-
- """)
-
- # END ANALYSIS ROUTINES
-
def collapsed(self, coords, aggregator, **kwargs):
"""
Collapse one or more dimensions over the cube given the coordinate/s
diff --git a/lib/iris/etc/mosig_codes.txt b/lib/iris/etc/mosig_codes.txt
deleted file mode 100644
index acce018dfc..0000000000
--- a/lib/iris/etc/mosig_codes.txt
+++ /dev/null
@@ -1,1468 +0,0 @@
-# (C) British Crown Copyright 2010 - 2012, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-# MOSIG rule #216
-IF
-f.lbuser[6] == 06
-f.lbuser[3] == 103
-THEN
-CMAttribute("standard_name", "upward_eastward_momentum_flux_in_air_due_to_nonorographic_westward_gravity_waves")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #217
-IF
-f.lbuser[6] == 06
-f.lbuser[3] == 201
-THEN
-CMAttribute("standard_name", "upward_eastward_momentum_flux_in_air_due_to_orographic_gravity_waves")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #214
-IF
-f.lbuser[6] == 03
-f.lbuser[3] == 394
-THEN
-CMAttribute("standard_name", "surface_downward_eastward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #215
-IF
-f.lbuser[6] == 06
-f.lbuser[3] == 101
-THEN
-CMAttribute("standard_name", "upward_eastward_momentum_flux_in_air_due_to_nonorographic_eastward_gravity_waves")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #212
-IF
-f.lbuser[6] == 03
-f.lbuser[3] == 337
-THEN
-CMAttribute("standard_name", "downward_heat_flux_in_soil")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #213
-IF
-f.lbuser[6] == 03
-f.lbuser[3] == 392
-THEN
-CMAttribute("standard_name", "surface_downward_eastward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #210
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1435
-THEN
-CMAttribute("standard_name", "surface_downwelling_shortwave_flux_in_air")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #211
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3334
-THEN
-CMAttribute("standard_name", "water_potential_evaporation_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #165
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 102
-THEN
-CMAttribute("standard_name", "sea_water_salinity")
-CMAttribute("units", "1")
-
-# MOSIG rule #218
-IF
-f.lbuser[6] == 06
-f.lbuser[3] == 207
-THEN
-CMAttribute("standard_name", "tendency_of_eastward_wind_due_to_orographic_gravity_wave_drag")
-CMAttribute("units", "m s-2")
-
-# MOSIG rule #219
-IF
-f.lbuser[6] == 15
-f.lbuser[3] == 243
-THEN
-CMAttribute("standard_name", "eastward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #133
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15220
-THEN
-CMAttribute("standard_name", "square_of_eastward_wind")
-CMAttribute("units", "???")
-
-# MOSIG rule #132
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15219
-THEN
-CMAttribute("standard_name", "square_of_air_temperature")
-CMAttribute("units", "???")
-
-# MOSIG rule #131
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15214
-THEN
-CMAttribute("standard_name", "ertel_potential_vorticity")
-CMAttribute("units", "???")
-
-# MOSIG rule #137
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15224
-THEN
-CMAttribute("standard_name", "product_of_eastward_wind_and_omega")
-CMAttribute("units", "???")
-
-# MOSIG rule #136
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15223
-THEN
-CMAttribute("standard_name", "product_of_omega_and_air_temperature")
-CMAttribute("units", "???")
-
-# MOSIG rule #135
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15222
-THEN
-CMAttribute("standard_name", "lagrangian_tendency_of_air_pressure")
-CMAttribute("units", "Pa s-1")
-
-# MOSIG rule #134
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15221
-THEN
-CMAttribute("standard_name", "square_of_northward_wind")
-CMAttribute("units", "???")
-
-# MOSIG rule #139
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15226
-THEN
-CMAttribute("standard_name", "specific_humidity")
-CMAttribute("units", "???")
-
-# MOSIG rule #138
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15225
-THEN
-CMAttribute("standard_name", "product_of_northward_wind_and_omega")
-CMAttribute("units", "???")
-
-# MOSIG rule #27
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1201
-THEN
-CMAttribute("standard_name", "surface_net_downward_shortwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #20
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 205
-THEN
-CMAttribute("standard_name", "land_area_fraction")
-CMAttribute("units", "%")
-
-# MOSIG rule #160
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30406
-THEN
-# TODO: Is there a CF standard name for this?
-CMAttribute("long_name", "atmosphere_cloud_ice_content")
-CMAttribute("units", "kg m-2")
-
-# MOSIG rule #29
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1203
-THEN
-CMAttribute("standard_name", "surface_net_downward_shortwave_flux_where_open_sea")
-CMAttribute("units", "???")
-
-# MOSIG rule #161
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30417
-THEN
-CMAttribute("standard_name", "surface_air_pressure")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #4
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 4
-THEN
-CMAttribute("standard_name", "air_potential_temperature")
-CMAttribute("units", "K")
-
-# MOSIG rule #8
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 13
-THEN
-CMAttribute("standard_name", "convective_cloud_area_fraction")
-CMAttribute("units", "???")
-
-# MOSIG rule #119
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8231
-THEN
-CMAttribute("standard_name", "surface_snow_melt_flux_where_land")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #120
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8234
-THEN
-CMAttribute("standard_name", "surface_runoff_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #122
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8235
-THEN
-CMAttribute("standard_name", "subsurface_runoff_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #123
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 9201
-THEN
-CMAttribute("standard_name", "large_scale_cloud_area_fraction_of_atmosphere_layer")
-CMAttribute("units", "???")
-
-# MOSIG rule #126
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 12201
-THEN
-CMAttribute("standard_name", "lagrangian_tendency_of_air_pressure")
-CMAttribute("units", "Pa s-1")
-
-# MOSIG rule #127
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15201
-THEN
-CMAttribute("standard_name", "eastward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #129
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15202
-THEN
-CMAttribute("standard_name", "northward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #167
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 121
-THEN
-CMAttribute("standard_name", "baroclinic_eastward_sea_water_velocity")
-CMAttribute("units", "cm s-1")
-
-# MOSIG rule #59
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2238
-THEN
-CMAttribute("standard_name", "tropopause_downwelling_longwave_flux")
-CMAttribute("units", "???")
-
-# MOSIG rule #55
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2232
-THEN
-CMAttribute("standard_name", "tendency_of_air_temperature_due_to_longwave_heating")
-CMAttribute("units", "???")
-
-# MOSIG rule #54
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2208
-THEN
-CMAttribute("standard_name", "surface_downwelling_longwave_flux_in_air_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #57
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2237
-THEN
-CMAttribute("standard_name", "tropopause_net_downward_longwave_flux")
-CMAttribute("units", "???")
-
-# MOSIG rule #56
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2233
-THEN
-CMAttribute("standard_name", "tendency_of_air_temperature_due_to_longwave_heating_assuming_clear_sky")
-CMAttribute("units", "???")
-
-# MOSIG rule #51
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2205
-THEN
-CMAttribute("standard_name", "toa_outgoing_longwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #53
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2207
-THEN
-CMAttribute("standard_name", "surface_downwelling_longwave_flux_in_air")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #52
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2206
-THEN
-CMAttribute("standard_name", "toa_outgoing_longwave_flux_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #259
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 146
-THEN
-CMAttribute("standard_name", "sea_ice_area_fraction")
-CMAttribute("units", "%")
-
-# MOSIG rule #201
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32211
-THEN
-CMAttribute("standard_name", "tendency_of_sea_ice_area_fraction_due_to_thermodynamics")
-CMAttribute("units", "???")
-
-# MOSIG rule #199
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32209
-THEN
-CMAttribute("standard_name", "eastward_sea_ice_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #198
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32202
-THEN
-CMAttribute("standard_name", "tendency_of_sea_ice_thickness_due_to_dynamics")
-CMAttribute("units", "???")
-
-# MOSIG rule #200
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32210
-THEN
-CMAttribute("standard_name", "northward_sea_ice_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #195
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 30321
-THEN
-CMAttribute("standard_name", "northward_sea_water_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #197
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32201
-THEN
-CMAttribute("standard_name", "tendency_of_sea_ice_area_fraction_due_to_dynamics")
-CMAttribute("units", "???")
-
-# MOSIG rule #178
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 148
-THEN
-CMAttribute("standard_name", "eastward_sea_ice_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #190
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 30201
-THEN
-CMAttribute("standard_name", "upward_sea_water_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #193
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 30320
-THEN
-CMAttribute("standard_name", "eastward_sea_water_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #192
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 30211
-THEN
-CMAttribute("standard_name", "northward_ocean_heat_transport")
-CMAttribute("units", "PW")
-
-# MOSIG rule #115
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8225
-THEN
-CMAttribute("standard_name", "soil_temperature")
-CMAttribute("units", "???")
-
-# MOSIG rule #252
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8223
-THEN
-CMAttribute("standard_name", "soil_moisture_content")
-CMAttribute("units", "kg m-2")
-
-# MOSIG rule #89
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 4204
-THEN
-# TODO: Is there a CF standard name for this?
-CMAttribute("long_name", "large_scale_snowfall_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #111
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8209
-THEN
-CMAttribute("standard_name", "canopy_water_amount")
-CMAttribute("units", "???")
-
-# MOSIG rule #110
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8208
-THEN
-CMAttribute("standard_name", "soil_moisture_content")
-CMAttribute("units", "???")
-
-# MOSIG rule #82
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3261
-THEN
-CMAttribute("standard_name", "gross_primary_productivity_of_carbon")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #83
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3262
-THEN
-CMAttribute("standard_name", "net_primary_productivity_of_carbon")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #80
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3249
-THEN
-CMAttribute("standard_name", "wind_speed")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #81
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3258
-THEN
-CMAttribute("standard_name", "surface_snow_melt_heat_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #86
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3332
-THEN
-CMAttribute("standard_name", "toa_outgoing_longwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #87
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 4203
-THEN
-# TODO: Is there a CF standard name for this?
-CMAttribute("long_name", "large_scale_rainfall_rate")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #84
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3295
-THEN
-CMAttribute("standard_name", "surface_snow_area_fraction_where_land")
-CMAttribute("units", "%")
-
-# MOSIG rule #85
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3298
-THEN
-CMAttribute("standard_name", "water_sublimation_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #207
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 408
-THEN
-CMAttribute("standard_name", "air_pressure")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #206
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 31
-THEN
-CMAttribute("standard_name", "sea_ice_thickness")
-CMAttribute("units", "%")
-
-# MOSIG rule #226
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33001
-THEN
-CMAttribute("standard_name", "mole_fraction_of_ozone_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #3
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3
-THEN
-CMAttribute("standard_name", "northward_wind")
-CMAttribute("units", "???")
-
-# MOSIG rule #225
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30314
-THEN
-CMAttribute("standard_name", "tendency_of_eastward_wind_due_to_eliassen_palm_flux_divergence")
-CMAttribute("units", "m s-2")
-
-# MOSIG rule #245
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33150
-THEN
-CMAttribute("standard_name", "age_of_stratospheric_air")
-CMAttribute("units", "Years")
-
-# MOSIG rule #244
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33058
-THEN
-CMAttribute("standard_name", "mole_fraction_of_atomic_nitrogen_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #108
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8023
-THEN
-CMAttribute("standard_name", "surface_snow_amount")
-CMAttribute("units", "kg m-2")
-
-# MOSIG rule #109
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8202
-THEN
-CMAttribute("standard_name", "surface_snow_melt_flux_where_land")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #241
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33054
-THEN
-CMAttribute("standard_name", "mole_fraction_of_chlorine_nitrate_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #240
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33051
-THEN
-CMAttribute("standard_name", "mole_fraction_of_hypochlorous_acid_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #243
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33056
-THEN
-CMAttribute("standard_name", "mole_fraction_of_cfc12_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #242
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33055
-THEN
-CMAttribute("standard_name", "mole_fraction_of_cfc11_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #103
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5214
-THEN
-CMAttribute("standard_name", "rainfall_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #101
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5213
-THEN
-CMAttribute("standard_name", "mass_fraction_of_convective_cloud_liquid_water_in_air")
-CMAttribute("units", "1")
-
-# MOSIG rule #106
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5233
-THEN
-CMAttribute("standard_name", "mass_fraction_of_convective_cloud_liquid_water_in_air")
-CMAttribute("units", "1")
-
-# MOSIG rule #104
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5215
-THEN
-CMAttribute("standard_name", "snowfall_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #105
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5216
-THEN
-CMAttribute("standard_name", "precipitation_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #39
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1233
-THEN
-CMAttribute("standard_name", "tendency_of_air_temperature_due_to_shortwave_heating_assuming_clear_sky")
-CMAttribute("units", "???")
-
-# MOSIG rule #38
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1232
-THEN
-CMAttribute("standard_name", "tendency_of_air_temperature_due_to_shortwave_heating")
-CMAttribute("units", "???")
-
-# MOSIG rule #30
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1207
-THEN
-CMAttribute("standard_name", "toa_incoming_shortwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #37
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1211
-THEN
-CMAttribute("standard_name", "surface_upwelling_shortwave_flux_in_air_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #36
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1210
-THEN
-CMAttribute("standard_name", "surface_downwelling_shortwave_flux_in_air_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #35
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1209
-THEN
-CMAttribute("standard_name", "toa_outgoing_shortwave_flux_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #34
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1208
-THEN
-CMAttribute("standard_name", "toa_outgoing_shortwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #246
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33270
-THEN
-CMAttribute("standard_name", "age_of_stratospheric_air")
-CMAttribute("units", "Years")
-
-# MOSIG rule #205
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 43
-THEN
-CMAttribute("standard_name", "soil_porosity")
-CMAttribute("units", "1")
-
-# MOSIG rule #223
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30312
-THEN
-CMAttribute("standard_name", "northward_eliassen_palm_flux_in_air")
-CMAttribute("units", "m3 s-2")
-
-# MOSIG rule #176
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 147
-THEN
-CMAttribute("standard_name", "sea_ice_thickness")
-CMAttribute("units", "m")
-
-# MOSIG rule #60
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3201
-THEN
-CMAttribute("standard_name", "downward_heat_flux_in_sea_ice")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #61
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3202
-THEN
-CMAttribute("standard_name", "downward_heat_flux_in_soil")
-CMAttribute("units", "???")
-
-# MOSIG rule #62
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3217
-THEN
-CMAttribute("standard_name", "surface_upward_sensible_heat_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #63
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3219
-THEN
-CMAttribute("standard_name", "surface_downward_eastward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #64
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3220
-THEN
-CMAttribute("standard_name", "surface_downward_northward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #65
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3223
-THEN
-CMAttribute("standard_name", "water_evaporation_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #67
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3224
-THEN
-CMAttribute("standard_name", "wind_mixing_energy_flux_into_ocean")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #68
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3225
-THEN
-CMAttribute("standard_name", "eastward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #69
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3226
-THEN
-CMAttribute("standard_name", "northward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #173
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 143
-THEN
-CMAttribute("standard_name", "upward_sea_ice_basal_heat_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #172
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 137
-THEN
-CMAttribute("standard_name", "ocean_mixed_layer_thickness")
-CMAttribute("units", "m")
-
-# MOSIG rule #170
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 132
-THEN
-CMAttribute("standard_name", "tendency_of_ocean_barotropic_streamfunction")
-CMAttribute("units", "???")
-
-# MOSIG rule #203
-IF
-f.lbuser[6] == 03
-f.lbuser[3] == 177
-THEN
-CMAttribute("standard_name", "prescribed_heat_flux_into_slab_ocean")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #222
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30311
-THEN
-CMAttribute("standard_name", "northward_transformed_eulerian_mean_air_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #181
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 150
-THEN
-CMAttribute("standard_name", "surface_downward_eastward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #182
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 151
-THEN
-CMAttribute("standard_name", "surface_downward_northward_stress")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #183
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 152
-THEN
-CMAttribute("standard_name", "wind_mixing_energy_flux_into_ocean")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #180
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 149
-THEN
-CMAttribute("standard_name", "northward_sea_ice_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #2
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2
-THEN
-CMAttribute("standard_name", "eastward_wind")
-CMAttribute("units", "???")
-
-# MOSIG rule #187
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 186
-THEN
-CMAttribute("standard_name", "water_flux_correction_where_ocean")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #220
-IF
-f.lbuser[6] == 15
-f.lbuser[3] == 244
-THEN
-CMAttribute("standard_name", "northward_wind")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #186
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 171
-THEN
-CMAttribute("standard_name", "snowfall_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #188
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 190
-THEN
-CMAttribute("standard_name", "surface_melt_heat_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #189
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 191
-THEN
-CMAttribute("standard_name", "downward_heat_flux_in_sea_ice")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #202
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 32212
-THEN
-CMAttribute("standard_name", "tendency_of_sea_ice_thickness_due_to_thermodynamics")
-CMAttribute("units", "???")
-
-# MOSIG rule #221
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30310
-THEN
-CMAttribute("standard_name", "northward_transformed_eulerian_mean_air_velocity")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #185
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 166
-THEN
-CMAttribute("standard_name", "water_flux_into_ocean_from_rivers")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #168
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 122
-THEN
-CMAttribute("standard_name", "baroclinic_northward_sea_water_velocity")
-CMAttribute("units", "cm s-1")
-
-# MOSIG rule #169
-IF
-f.lbuser[6] == 02
-f.lbuser[3] == 130
-THEN
-CMAttribute("standard_name", "ocean_barotropic_streamfunction")
-CMAttribute("units", "m3 s-1")
-
-# MOSIG rule #229
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33006
-THEN
-CMAttribute("standard_name", "mole_fraction_of_peroxynitric_acid_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #228
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33005
-THEN
-CMAttribute("standard_name", "mole_fraction_of_dinitrogen_pentoxide_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #90
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5205
-THEN
-CMAttribute("standard_name", "convective_rainfall_rate")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #93
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5209
-THEN
-CMAttribute("standard_name", "air_temperature")
-CMAttribute("units", "???")
-
-# MOSIG rule #92
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5206
-THEN
-CMAttribute("standard_name", "convective_snowfall_flux")
-CMAttribute("units", "kg m-2 s-1")
-
-# MOSIG rule #94
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 5212
-THEN
-CMAttribute("standard_name", "convective_cloud_area_fraction_of_atmosphere_layer")
-CMAttribute("units", "???")
-
-# MOSIG rule #11
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 25
-THEN
-CMAttribute("standard_name", "atmosphere_boundary_layer_thickness")
-CMAttribute("units", "???")
-
-# MOSIG rule #10
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 24
-THEN
-CMAttribute("standard_name", "surface_temperature")
-CMAttribute("units", "K")
-
-# MOSIG rule #13
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 29
-THEN
-CMAttribute("standard_name", "northward_sea_water_velocity")
-CMAttribute("units", "???")
-
-# MOSIG rule #12
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 28
-THEN
-CMAttribute("standard_name", "eastward_sea_water_velocity")
-CMAttribute("units", "???")
-
-# MOSIG rule #14
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30
-THEN
-CMAttribute("standard_name", "land_area_fraction")
-CMAttribute("units", "%")
-
-# MOSIG rule #17
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33
-THEN
-CMAttribute("standard_name", "surface_altitude")
-CMAttribute("units", "m")
-
-# MOSIG rule #16
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 32
-THEN
-CMAttribute("standard_name", "sea_ice_thickness")
-CMAttribute("units", "m")
-
-# MOSIG rule #19
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 60
-THEN
-CMAttribute("standard_name", "mole_fraction_of_o3_in_air")
-CMAttribute("units", "1e-9")
-
-# MOSIG rule #117
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8230
-THEN
-CMAttribute("standard_name", "mass_fraction_of_frozen_water_in_soil_moisture")
-CMAttribute("units", "???")
-
-# MOSIG rule #116
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 8229
-THEN
-CMAttribute("standard_name", "mass_fraction_of_unfrozen_water_in_soil_moisture")
-CMAttribute("units", "???")
-
-# MOSIG rule #204
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 40
-THEN
-CMAttribute("standard_name", "volume_fraction_of_water_in_soil_at_wilting_point")
-CMAttribute("units", "1")
-
-# MOSIG rule #151
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 16224
-THEN
-CMAttribute("standard_name", "square_of_height")
-CMAttribute("units", "???")
-
-# MOSIG rule #150
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 16222
-THEN
-CMAttribute("standard_name", "air_pressure_at_sea_level")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #238
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33048
-THEN
-CMAttribute("standard_name", "mole_fraction_of_bromine_nitrate_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #239
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33049
-THEN
-CMAttribute("standard_name", "mole_fraction_of_nitrous_oxide_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #234
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33042
-THEN
-CMAttribute("standard_name", "mole_fraction_of_chlorine_monoxide_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #235
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33043
-THEN
-CMAttribute("standard_name", "mole_fraction_of_dichlorine_peroxide_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #236
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33044
-THEN
-CMAttribute("standard_name", "mole_fraction_of_chlorine_dioxide_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #237
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33047
-THEN
-CMAttribute("standard_name", "mole_fraction_of_bromine_chloride_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #230
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33007
-THEN
-CMAttribute("standard_name", "mole_fraction_of_chlorine_nitrate_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #231
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33009
-THEN
-CMAttribute("standard_name", "mole_fraction_of_methane_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #233
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33041
-THEN
-CMAttribute("standard_name", "mole_fraction_of_atomic_chlorine_in_air")
-CMAttribute("units", "mole mole-1")
-
-# MOSIG rule #224
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 30313
-THEN
-CMAttribute("standard_name", "upward_eliassen_palm_flux_in_air")
-CMAttribute("units", "m3 s-2")
-
-# MOSIG rule #48
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2203
-THEN
-CMAttribute("standard_name", "surface_net_downward_longwave_flux_where_open_sea")
-CMAttribute("units", "???")
-
-# MOSIG rule #44
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1242
-THEN
-CMAttribute("standard_name", "large_scale_cloud_liquid_water_content_of_atmosphere_layer")
-CMAttribute("units", "???")
-
-# MOSIG rule #45
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 2201
-THEN
-CMAttribute("standard_name", "surface_net_downward_longwave_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #42
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1237
-THEN
-CMAttribute("standard_name", "net_downward_shortwave_flux_in_air")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #43
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1238
-THEN
-CMAttribute("standard_name", "tropopause_upwelling_shortwave_flux")
-CMAttribute("units", "???")
-
-# MOSIG rule #40
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1235
-THEN
-CMAttribute("standard_name", "surface_downwelling_shortwave_flux_in_air")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #1
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1
-THEN
-CMAttribute("standard_name", "surface_air_pressure")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #5
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 10
-THEN
-CMAttribute("standard_name", "specific_humidity")
-CMAttribute("units", "1")
-
-# MOSIG rule #9
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 23
-THEN
-CMAttribute("standard_name", "surface_snow_amount")
-CMAttribute("units", "kg m-2")
-
-# MOSIG rule #146
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 16202
-THEN
-CMAttribute("standard_name", "geopotential_height")
-CMAttribute("units", "m")
-
-# MOSIG rule #147
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 16203
-THEN
-CMAttribute("standard_name", "air_temperature")
-CMAttribute("units", "K")
-
-# MOSIG rule #144
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15239
-THEN
-CMAttribute("standard_name", "product_of_eastward_wind_and_geopotential_height")
-CMAttribute("units", "???")
-
-# MOSIG rule #145
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15240
-THEN
-CMAttribute("standard_name", "product_of_northward_wind_and_geopotential_height")
-CMAttribute("units", "???")
-
-# MOSIG rule #142
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15235
-THEN
-CMAttribute("standard_name", "product_of_omega_and_specific_humidity")
-CMAttribute("units", "???")
-
-# MOSIG rule #143
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15238
-THEN
-CMAttribute("standard_name", "geopotential_height")
-CMAttribute("units", "???")
-
-# MOSIG rule #140
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15227
-THEN
-CMAttribute("standard_name", "product_of_eastward_wind_and_specific_humidity")
-CMAttribute("units", "???")
-
-# MOSIG rule #141
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 15228
-THEN
-CMAttribute("standard_name", "product_of_northward_wind_and_specific_humidity")
-CMAttribute("units", "???")
-
-# MOSIG rule #209
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 1410
-THEN
-CMAttribute("standard_name", "surface_downwelling_shortwave_flux_in_air_assuming_clear_sky")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #208
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 409
-THEN
-CMAttribute("standard_name", "surface_air_pressure")
-CMAttribute("units", "Pa")
-
-# MOSIG rule #149
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 16204
-THEN
-CMAttribute("standard_name", "relative_humidity")
-CMAttribute("units", "%")
-
-# MOSIG rule #77
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3237
-THEN
-CMAttribute("standard_name", "specific_humidity")
-CMAttribute("units", "1")
-
-# MOSIG rule #76
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3236
-THEN
-CMAttribute("standard_name", "air_temperature")
-CMAttribute("units", "K")
-
-# MOSIG rule #72
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3234
-THEN
-CMAttribute("standard_name", "surface_upward_latent_heat_flux")
-CMAttribute("units", "W m-2")
-
-# MOSIG rule #71
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3227
-THEN
-CMAttribute("standard_name", "wind_speed")
-CMAttribute("units", "m s-1")
-
-# MOSIG rule #79
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3245
-THEN
-CMAttribute("standard_name", "relative_humidity")
-CMAttribute("units", "???")
-
-# MOSIG rule #78
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 3238
-THEN
-CMAttribute("standard_name", "soil_temperature")
-CMAttribute("units", "???")
-
-# MOSIG rule #227
-IF
-f.lbuser[6] == 1
-f.lbuser[3] == 33004
-THEN
-CMAttribute("standard_name", "mole_fraction_of_nitrogen_trioxide_in_air")
-CMAttribute("units", "mole mole-1")
-
diff --git a/lib/iris/etc/pp_save_rules.txt b/lib/iris/etc/pp_save_rules.txt
deleted file mode 100644
index b13bbb6ecf..0000000000
--- a/lib/iris/etc/pp_save_rules.txt
+++ /dev/null
@@ -1,772 +0,0 @@
-# (C) British Crown Copyright 2010 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-################################################################
-### stuff that's missing from the default pp, or always true ###
-################################################################
-
-IF
- True
-THEN
- pp.lbproc = 0 # Processing. Start at 0.
-
-IF
- cm.coord_system("GeogCS") is not None or cm.coord_system(None) is None
-THEN
- pp.bplat = 90
- pp.bplon = 0
-
-IF
- cm.coord_system("RotatedGeogCS") is not None
-THEN
- pp.bplat = cm.coord_system("RotatedGeogCS").grid_north_pole_latitude
- pp.bplon = cm.coord_system("RotatedGeogCS").grid_north_pole_longitude
-
-
-#UM - no version number
-IF
- not 'um_version' in cm.attributes
- 'source' in cm.attributes
- len(cm.attributes['source'].rsplit("Data from Met Office Unified Model", 1)) > 1
- len(cm.attributes['source'].rsplit("Data from Met Office Unified Model", 1)[1]) == 0
-THEN
- pp.lbsrce = 1111
-
-#UM - with version number
-IF
- not 'um_version' in cm.attributes
- 'source' in cm.attributes
- len(cm.attributes['source'].rsplit("Data from Met Office Unified Model", 1)) > 1
- len(cm.attributes['source'].rsplit("Data from Met Office Unified Model", 1)[1]) > 0
-THEN
- pp.lbsrce = int(float(cm.attributes['source'].rsplit("Data from Met Office Unified Model", 1)[1]) * 1000000) + 1111 # UM version
-
-#UM - from 'um_version' attribute
-IF
- 'um_version' in cm.attributes
-THEN
- pp.lbsrce = 1111 + 10000 * int(cm.attributes['um_version'].split('.')[1]) + 1000000 * int(cm.attributes['um_version'].split('.')[0])
-
-IF
- 'STASH' in cm.attributes
- isinstance(cm.attributes['STASH'], iris.fileformats.pp.STASH)
-THEN
- pp.lbuser[3] = 1000 * (cm.attributes['STASH'].section or 0) + (cm.attributes['STASH'].item or 0)
- pp.lbuser[6] = (cm.attributes['STASH'].model or 0)
-
-
-######################################################
-### time - lbtim, t1, t2 and lbft (but not lbproc) ###
-######################################################
-
-#no forecast
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'forecast_period') is None
- scalar_coord(cm, 'forecast_reference_time') is None
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 0
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').points[0])
- pp.t2 = netcdftime.datetime(0, 0, 0)
-
-
-#forecast
-IF
- scalar_coord(cm, 'time') is not None
- not scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'forecast_period') is not None
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 1
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').points[0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').points[0] - scalar_coord(cm, 'forecast_period').points[0])
- pp.lbft = scalar_coord(cm, 'forecast_period').points[0]
-
-
-#time mean (non-climatological)
-# XXX This only works when we have a single timestep
-IF
- # XXX How do we know *which* time to use if there are more than
- # one? *Can* there be more than one?
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
-THEN
- pp.lbtim.ib = 2
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-IF
- # Handle missing forecast period using time and forecast reference time.
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is None
- scalar_coord(cm, 'forecast_reference_time') is not None
-THEN
- pp.lbtim.ib = 2
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
- pp.lbft = scalar_coord(cm, 'time').units.convert(scalar_coord(cm, 'time').bounds[0, 1], 'hours since epoch') - scalar_coord(cm, 'forecast_reference_time').units.convert(scalar_coord(cm, 'forecast_reference_time').points[0], 'hours since epoch')
-
-IF
- # XXX Note the repetition of the previous rule's constraints
- # This can be addressed through REQUIRES/PROVIDES extensions
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is not None or scalar_coord(cm, 'forecast_reference_time') is not None
- scalar_cell_method(cm, 'mean', 'time') is not None
- scalar_cell_method(cm, 'mean', 'time').intervals != ()
- scalar_cell_method(cm, 'mean', 'time').intervals[0].endswith('hour')
-THEN
- pp.lbtim.ia = int(scalar_cell_method(cm, 'mean', 'time').intervals[0][:-5])
-
-IF
- # XXX Note the repetition of the previous rule's constraints
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is not None or scalar_coord(cm, 'forecast_reference_time') is not None
- scalar_cell_method(cm, 'mean', 'time') is None or scalar_cell_method(cm, 'mean', 'time').intervals == () or not scalar_cell_method(cm, 'mean', 'time').intervals[0].endswith('hour')
-THEN
- pp.lbtim.ia = 0
-
-IF
- # If the cell methods contain a minimum then overwrite lbtim.ia with this
- # interval
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is not None or scalar_coord(cm, 'forecast_reference_time') is not None
- scalar_cell_method(cm, 'minimum', 'time') is not None
- scalar_cell_method(cm, 'minimum', 'time').intervals != ()
- scalar_cell_method(cm, 'minimum', 'time').intervals[0].endswith('hour')
-THEN
- # set lbtim.ia with the integer part of the cell method's interval
- # e.g. if interval is '24 hour' then lbtim.ia becomes 24
- pp.lbtim.ia = int(scalar_cell_method(cm, 'minimum', 'time').intervals[0][:-5])
-
-IF
- # If the cell methods contain a maximum then overwrite lbtim.ia with this
- # interval
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'clim_season') is None
- scalar_coord(cm, 'forecast_period') is not None or scalar_coord(cm, 'forecast_reference_time') is not None
- scalar_cell_method(cm, 'maximum', 'time') is not None
- scalar_cell_method(cm, 'maximum', 'time').intervals != ()
- scalar_cell_method(cm, 'maximum', 'time').intervals[0].endswith('hour')
-THEN
- # set lbtim.ia with the integer part of the cell method's interval
- # e.g. if interval is '1 hour' then lbtim.ia becomes 1
- pp.lbtim.ia = int(scalar_cell_method(cm, 'maximum', 'time').intervals[0][:-5])
-
-#climatiological time mean - single year
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0]).year == scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1]).year
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
- scalar_coord(cm, 'clim_season') is not None
- 'clim_season' in cm.cell_methods[-1].coord_names
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 2
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0, 0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0, 1])
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-
-#climatiological time mean - spanning years - djf
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0]).year != scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1]).year
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
- scalar_coord(cm, 'clim_season') is not None
- 'clim_season' in cm.cell_methods[-1].coord_names
- scalar_coord(cm, 'clim_season').points[0] == 'djf'
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 3
-
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
-
- pp.t1 = netcdftime.datetime( pp.t1.year if pp.t1.month==12 else pp.t1.year-1, 12, 1, 0, 0, 0 )
- pp.t2 = netcdftime.datetime( pp.t2.year, 3, 1, 0, 0, 0 )
-
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,0] != scalar_coord(cm, 'time').units.date2num(pp.t1), "modified t1 for climatological seasonal mean")
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,1] != scalar_coord(cm, 'time').units.date2num(pp.t2), "modified t2 for climatological seasonal mean")
-
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-#climatiological time mean - spanning years - mam
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0]).year != scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1]).year
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
- scalar_coord(cm, 'clim_season') is not None
- 'clim_season' in cm.cell_methods[-1].coord_names
- scalar_coord(cm, 'clim_season').points[0] == 'mam'
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 3
-
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
-
- pp.t1 = netcdftime.datetime( pp.t1.year, 3, 1, 0, 0, 0 )
- pp.t2 = netcdftime.datetime( pp.t2.year, 6, 1, 0, 0, 0 )
-
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,0] != scalar_coord(cm, 'time').units.date2num(pp.t1), "modified t1 for climatological seasonal mean")
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,1] != scalar_coord(cm, 'time').units.date2num(pp.t2), "modified t2 for climatological seasonal mean")
-
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-#climatiological time mean - spanning years - jja
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0]).year != scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1]).year
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
- scalar_coord(cm, 'clim_season') is not None
- 'clim_season' in cm.cell_methods[-1].coord_names
- scalar_coord(cm, 'clim_season').points[0] == 'jja'
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 3
-
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
-
- pp.t1 = netcdftime.datetime( pp.t1.year, 6, 1, 0, 0, 0 )
- pp.t2 = netcdftime.datetime( pp.t2.year, 9, 1, 0, 0, 0 )
-
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,0] != scalar_coord(cm, 'time').units.date2num(pp.t1), "modified t1 for climatological seasonal mean")
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,1] != scalar_coord(cm, 'time').units.date2num(pp.t2), "modified t2 for climatological seasonal mean")
-
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-#climatiological time mean - spanning years - son
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').has_bounds()
- scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0]).year != scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1]).year
- scalar_coord(cm, 'forecast_period') is not None
- scalar_coord(cm, 'forecast_period').has_bounds()
- scalar_coord(cm, 'clim_season') is not None
- 'clim_season' in cm.cell_methods[-1].coord_names
- scalar_coord(cm, 'clim_season').points[0] == 'son'
-THEN
- pp.lbtim.ia = 0
- pp.lbtim.ib = 3
-
- pp.t1 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,0])
- pp.t2 = scalar_coord(cm, 'time').units.num2date(scalar_coord(cm, 'time').bounds[0,1])
-
- pp.t1 = netcdftime.datetime( pp.t1.year, 9, 1, 0, 0, 0 )
- pp.t2 = netcdftime.datetime( pp.t2.year, 12, 1, 0, 0, 0 )
-
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,0] != scalar_coord(cm, 'time').units.date2num(pp.t1), "modified t1 for climatological seasonal mean")
- self.conditional_warning(scalar_coord(cm, 'time').bounds[0,1] != scalar_coord(cm, 'time').units.date2num(pp.t2), "modified t2 for climatological seasonal mean")
-
- pp.lbft = scalar_coord(cm, 'forecast_period').units.convert(scalar_coord(cm, 'forecast_period').bounds[0, 1], 'hours')
-
-#360 day calendar
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').units.calendar == '360_day'
-THEN
- pp.lbtim.ic = 2
-
-
-#gregorian calendar
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').units.calendar == 'gregorian'
-THEN
- pp.lbtim.ic = 1
-
-
-#365 day calendar
-IF
- scalar_coord(cm, 'time') is not None
- scalar_coord(cm, 'time').units.calendar == '365_day'
-THEN
- pp.lbtim.ic = 4
-
-#####################
-### grid and pole ###
-#####################
-
-IF
- vector_coord(cm, 'longitude') and not is_regular(vector_coord(cm, 'longitude'))
-THEN
- pp.bzx = 0
- pp.bdx = 0
- pp.lbnpt = vector_coord(cm, 'longitude').shape[0]
- pp.x = vector_coord(cm, 'longitude').points
-
-IF
- vector_coord(cm, 'grid_longitude') and not is_regular(vector_coord(cm, 'grid_longitude'))
-THEN
- pp.bzx = 0
- pp.bdx = 0
- pp.lbnpt = vector_coord(cm, 'grid_longitude').shape[0]
- pp.x = vector_coord(cm, 'grid_longitude').points
-
-IF
- vector_coord(cm, 'latitude') and not is_regular(vector_coord(cm, 'latitude'))
-THEN
- pp.bzy = 0
- pp.bdy = 0
- pp.lbrow = vector_coord(cm, 'latitude').shape[0]
- pp.y = vector_coord(cm, 'latitude').points
-
-IF
- vector_coord(cm, 'grid_latitude') and not is_regular(vector_coord(cm, 'grid_latitude'))
-THEN
- pp.bzy = 0
- pp.bdy = 0
- pp.lbrow = vector_coord(cm, 'grid_latitude').shape[0]
- pp.y = vector_coord(cm, 'grid_latitude').points
-
-IF
- vector_coord(cm, 'longitude') and is_regular(vector_coord(cm, 'longitude'))
-THEN
- pp.bzx = vector_coord(cm, 'longitude').points[0] - regular_step(vector_coord(cm, 'longitude'))
- pp.bdx = regular_step(vector_coord(cm, 'longitude'))
- pp.lbnpt = len(vector_coord(cm, 'longitude').points)
-
-IF
- vector_coord(cm, 'grid_longitude') and is_regular(vector_coord(cm, 'grid_longitude'))
-THEN
- pp.bzx = vector_coord(cm, 'grid_longitude').points[0] - regular_step(vector_coord(cm, 'grid_longitude'))
- pp.bdx = regular_step(vector_coord(cm, 'grid_longitude'))
- pp.lbnpt = len(vector_coord(cm, 'grid_longitude').points)
-
-IF
- vector_coord(cm, 'latitude') and is_regular(vector_coord(cm, 'latitude'))
-THEN
- pp.bzy = vector_coord(cm, 'latitude').points[0] - regular_step(vector_coord(cm, 'latitude'))
- pp.bdy = regular_step(vector_coord(cm, 'latitude'))
- pp.lbrow = len(vector_coord(cm, 'latitude').points)
-
-IF
- vector_coord(cm, 'grid_latitude') and is_regular(vector_coord(cm, 'grid_latitude'))
-THEN
- pp.bzy = vector_coord(cm, 'grid_latitude').points[0] - regular_step(vector_coord(cm, 'grid_latitude'))
- pp.bdy = regular_step(vector_coord(cm, 'grid_latitude'))
- pp.lbrow = len(vector_coord(cm, 'grid_latitude').points)
-
-
-#rotated?
-IF
-# iris.fileformats.pp.is_cross_section(cm) == False
- cm.coord_system("RotatedGeogCS") is not None
-THEN
- pp.lbcode = int(pp.lbcode) + 100
-
-
-#lon global
-IF
- vector_coord(cm, 'longitude') is not None
- vector_coord(cm, 'longitude').circular
-THEN
- pp.lbhem = 0
-
-IF
- vector_coord(cm, 'grid_longitude') is not None
- vector_coord(cm, 'grid_longitude').circular
-THEN
- pp.lbhem = 0
-
-#lon not global
-IF
- vector_coord(cm, 'longitude') is not None
- not vector_coord(cm, 'longitude').circular
-THEN
- pp.lbhem = 3
-
-IF
- vector_coord(cm, 'grid_longitude') is not None
- not vector_coord(cm, 'grid_longitude').circular
-THEN
- pp.lbhem = 3
-
-
-
-#####################################################
-############ non-standard cross-sections ############
-#####################################################
-
-# Ticket #1037, x=latitude, y=air_pressure - non-standard cross-section with bounds
-IF
- vector_coord(cm, 'air_pressure') is not None
- not vector_coord(cm, 'air_pressure').circular
- vector_coord(cm, 'air_pressure').has_bounds()
- vector_coord(cm, 'latitude') is not None
- not vector_coord(cm, 'latitude').circular
- vector_coord(cm, 'latitude').has_bounds()
-THEN
- pp.lbcode = 10000 + int(100*10) + 1
- pp.bgor = 0
- pp.y = vector_coord(cm, 'air_pressure').points
- pp.y_lower_bound = vector_coord(cm, 'air_pressure').bounds[:,0]
- pp.y_upper_bound = vector_coord(cm, 'air_pressure').bounds[:,1]
- pp.x = vector_coord(cm, 'latitude').points
- pp.x_lower_bound = vector_coord(cm, 'latitude').bounds[:,0]
- pp.x_upper_bound = vector_coord(cm, 'latitude').bounds[:,1]
- pp.lbrow = vector_coord(cm, 'air_pressure').shape[0]
- pp.lbnpt = vector_coord(cm, 'latitude').shape[0]
- pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
-
-# Ticket #1037, x=latitude, y=depth - non-standard cross-section with bounds
-IF
- vector_coord(cm, 'depth') is not None
- not vector_coord(cm, 'depth').circular
- vector_coord(cm, 'depth').has_bounds()
- vector_coord(cm, 'latitude') is not None
- not vector_coord(cm, 'latitude').circular
- vector_coord(cm, 'latitude').has_bounds()
-THEN
- pp.lbcode = 10000 + int(100*10) + 4
- pp.bgor = 0
- pp.y = vector_coord(cm, 'depth').points
- pp.y_lower_bound = vector_coord(cm, 'depth').bounds[:,0]
- pp.y_upper_bound = vector_coord(cm, 'depth').bounds[:,1]
- pp.x = vector_coord(cm, 'latitude').points
- pp.x_lower_bound = vector_coord(cm, 'latitude').bounds[:,0]
- pp.x_upper_bound = vector_coord(cm, 'latitude').bounds[:,1]
- pp.lbrow = vector_coord(cm, 'depth').shape[0]
- pp.lbnpt = vector_coord(cm, 'latitude').shape[0]
- pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
-
-# Ticket #1037, x=latitude, y=ETA - non-standard cross-section with bounds
-IF
- vector_coord(cm, 'eta') is not None
- not vector_coord(cm, 'eta').circular
- vector_coord(cm, 'eta').has_bounds()
- vector_coord(cm, 'latitude') is not None
- not vector_coord(cm, 'latitude').circular
- vector_coord(cm, 'latitude').has_bounds()
-THEN
- pp.lbcode = 10000 + int(100*10) + 3
- pp.bgor = 0
- pp.y = vector_coord(cm, 'eta').points
- pp.y_lower_bound = vector_coord(cm, 'eta').bounds[:,0]
- pp.y_upper_bound = vector_coord(cm, 'eta').bounds[:,1]
- pp.x = vector_coord(cm, 'latitude').points
- pp.x_lower_bound = vector_coord(cm, 'latitude').bounds[:,0]
- pp.x_upper_bound = vector_coord(cm, 'latitude').bounds[:,1]
- pp.lbrow = vector_coord(cm, 'eta').shape[0]
- pp.lbnpt = vector_coord(cm, 'latitude').shape[0]
- pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
-
-# Ticket #1037, x=days (360 calendar), y=depth - non-standard cross-section with bounds
-IF
- vector_coord(cm, 'depth') is not None
- not vector_coord(cm, 'depth').circular
- vector_coord(cm, 'depth').has_bounds()
- vector_coord(cm, 'time') is not None
- not vector_coord(cm, 'time').circular
- vector_coord(cm, 'time').has_bounds()
-THEN
- pp.lbcode = 10000 + int(100*23) + 4
- pp.bgor = 0
- pp.y = vector_coord(cm, 'depth').points
- pp.y_lower_bound = vector_coord(cm, 'depth').bounds[:,0]
- pp.y_upper_bound = vector_coord(cm, 'depth').bounds[:,1]
- pp.x = vector_coord(cm, 'time').points
- pp.x_lower_bound = vector_coord(cm, 'time').bounds[:,0]
- pp.x_upper_bound = vector_coord(cm, 'time').bounds[:,1]
- pp.lbrow = vector_coord(cm, 'depth').shape[0]
- pp.lbnpt = vector_coord(cm, 'time').shape[0]
- pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
-
-
-# Ticket #1037, x=days (360 calendar), y=air_pressure - non-standard cross-section with bounds
-IF
- vector_coord(cm, 'air_pressure') is not None
- not vector_coord(cm, 'air_pressure').circular
- vector_coord(cm, 'air_pressure').has_bounds()
- vector_coord(cm, 'time') is not None
- not vector_coord(cm, 'time').circular
- vector_coord(cm, 'time').has_bounds()
-THEN
- pp.lbcode = 10000 + int(100*23) + 1
- pp.bgor = 0
- pp.y = vector_coord(cm, 'air_pressure').points
- pp.y_lower_bound = vector_coord(cm, 'air_pressure').bounds[:,0]
- pp.y_upper_bound = vector_coord(cm, 'air_pressure').bounds[:,1]
- pp.x = vector_coord(cm, 'time').points
- pp.x_lower_bound = vector_coord(cm, 'time').bounds[:,0]
- pp.x_upper_bound = vector_coord(cm, 'time').bounds[:,1]
- pp.lbrow = vector_coord(cm, 'air_pressure').shape[0]
- pp.lbnpt = vector_coord(cm, 'time').shape[0]
- pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
-
-
-
-
-
-#####################################################
-### lbproc (must start at 0 before rules are run) ###
-#####################################################
-
-IF
- cm.attributes.get("ukmo__process_flags", None)
-THEN
- pp.lbproc += sum([iris.fileformats.pp.lbproc_map[name] for name in cm.attributes["ukmo__process_flags"]])
-
-#zonal-mean
-IF
- # Look for a CellMethod which is a "mean" over "longitude".
- scalar_cell_method(cm, 'mean', 'longitude') is not None
-THEN
- pp.lbproc += 64
-
-IF
- # Look for a CellMethod which is a "mean" over "grid longitude".
- scalar_cell_method(cm, 'mean', 'grid_longitude') is not None
-THEN
- pp.lbproc += 64
-
-#time-mean
-IF
- # Look for a CellMethod which is a "mean" over "time".
- scalar_cell_method(cm, 'mean', 'time') is not None
-THEN
- pp.lbproc += 128
-
-#time-minimum
-IF
- # Look for a CellMethod which is a "minimum" over "time".
- scalar_cell_method(cm, 'minimum', 'time') is not None
-THEN
- pp.lbproc += 4096
-
-#time-maximum
-IF
- # Look for a CellMethod which is a "maximum" over "time".
- scalar_cell_method(cm, 'maximum', 'time') is not None
-THEN
- pp.lbproc += 8192
-
-##########################
-### vertical - lbuser5 ###
-##########################
-
-IF
- scalar_coord(cm, 'pseudo_level') is not None
- not scalar_coord(cm, 'pseudo_level').bounds
-THEN
- pp.lbuser[4] = scalar_coord(cm, 'pseudo_level').points[0]
-
-
-################################
-### vertical - lbvc and blev ###
-################################
-
-#single height level
-IF
- scalar_coord(cm, 'height') is not None
- not scalar_coord(cm, 'height').bounds
- scalar_coord(cm, 'height').points[0] == 1.5
- cm.name() == 'air_temperature'
-THEN
- pp.lbvc = 129
- pp.blev = -1
-
-IF
- pp.lbvc == 0
- scalar_coord(cm, 'height') is not None
- not scalar_coord(cm, 'height').bounds
-THEN
- pp.lbvc = 1
- pp.blev = cm.coord('height').points[0]
-
-
-#single air_pressure level
-IF
- scalar_coord(cm, 'air_pressure') is not None
- not scalar_coord(cm, 'air_pressure').bounds
-THEN
- pp.lbvc = 8
- pp.blev = scalar_coord(cm, 'air_pressure').points[0]
-
-#single "pressure" level
-#TODO: "pressure" is in the PP load rules awaiting more info
-IF
- scalar_coord(cm, 'pressure') is not None
- not scalar_coord(cm, 'pressure').bounds
-THEN
- pp.lbvc = 8
- pp.blev = scalar_coord(cm, 'pressure').points[0]
-
-
-# single depth level (non cross section)
-IF
- scalar_coord(cm, 'model_level_number') is not None
- not scalar_coord(cm, 'model_level_number').bounds
- scalar_coord(cm, 'depth') is not None
- not scalar_coord(cm, 'depth').bounds
-THEN
- pp.lbvc = 2
- pp.lblev = scalar_coord(cm, 'model_level_number').points[0]
- pp.blev = scalar_coord(cm, 'depth').points[0]
-
-# single depth level (Non-dimensional soil model level)
-IF
- scalar_coord(cm, 'soil_model_level_number') is not None
- not scalar_coord(cm, 'soil_model_level_number').has_bounds()
- # The following `is None` checks ensure this rule does not get run
- # if any of the previous LBVC setting rules have run. It gives these
- # rules something of an IF-THEN-ELSE structure.
- scalar_coord(cm, 'air_pressure') is None
- scalar_coord(cm, 'depth') is None
- scalar_coord(cm, 'height') is None
- scalar_coord(cm, 'pressure') is None
- cm.standard_name is not None
- 'soil' in cm.standard_name
-THEN
- pp.lbvc = 6
- pp.lblev = scalar_coord(cm, 'soil_model_level_number').points[0]
- pp.blev = pp.lblev
- pp.brsvd[0] = 0
- pp.brlev = 0
-
-# single depth level (soil depth)
-IF
- scalar_coord(cm, 'depth') is not None
- scalar_coord(cm, 'depth').has_bounds()
- # The following `is None` checks ensure this rule does not get run
- # if any of the previous LBVC setting rules have run. It gives these
- # rules something of an IF-THEN-ELSE structure.
- scalar_coord(cm, 'air_pressure') is None
- scalar_coord(cm, 'soil_model_level_number') is None
- scalar_coord(cm, 'model_level_number') is None
- scalar_coord(cm, 'height') is None
- scalar_coord(cm, 'pressure') is None
- cm.standard_name is not None
- 'soil' in cm.standard_name
-THEN
- pp.lbvc = 6
- pp.blev = scalar_coord(cm, 'depth').points[0]
- pp.brsvd[0] = scalar_coord(cm, 'depth').bounds[0, 0]
- pp.brlev = scalar_coord(cm, 'depth').bounds[0, 1]
-
-# single potential-temperature level
-IF
- scalar_coord(cm, 'air_potential_temperature') is not None
- not scalar_coord(cm, 'air_potential_temperature').bounds
- # The following `is None` checks ensure this rule does not get run
- # if any of the previous LBVC setting rules have run. It gives these
- # rules something of an IF-THEN-ELSE structure.
- scalar_coord(cm, 'air_pressure') is None
- scalar_coord(cm, 'depth') is None
- scalar_coord(cm, 'height') is None
- scalar_coord(cm, 'pressure') is None
- scalar_coord(cm, 'model_level_number') is None
-THEN
- pp.lbvc = 19
- pp.lblev = scalar_coord(cm, 'air_potential_temperature').points[0]
- pp.blev = scalar_coord(cm, 'air_potential_temperature').points[0]
-
-# single hybrid_height level (without aux factory e.g. due to missing orography)
-IF
- not has_aux_factory(cm, iris.aux_factory.HybridHeightFactory)
- scalar_coord(cm, 'model_level_number') is not None
- scalar_coord(cm, 'model_level_number').bounds is None
- scalar_coord(cm, 'level_height') is not None
- scalar_coord(cm, 'level_height').bounds is not None
- scalar_coord(cm, 'sigma') is not None
- scalar_coord(cm, 'sigma').bounds is not None
-THEN
- pp.lbvc = 65
- pp.lblev = scalar_coord(cm, 'model_level_number').points[0]
- pp.blev = scalar_coord(cm, 'level_height').points[0]
- pp.brlev = scalar_coord(cm, 'level_height').bounds[0, 0]
- pp.brsvd[0] = scalar_coord(cm, 'level_height').bounds[0, 1]
- pp.bhlev = scalar_coord(cm, 'sigma').points[0]
- pp.bhrlev = scalar_coord(cm, 'sigma').bounds[0, 0]
- pp.brsvd[1] = scalar_coord(cm, 'sigma').bounds[0, 1]
-
-# single hybrid_height level (with aux factory)
-IF
- has_aux_factory(cm, iris.aux_factory.HybridHeightFactory)
- scalar_coord(cm, 'model_level_number') is not None
- scalar_coord(cm, 'model_level_number').bounds is None
- aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['delta'] is not None
- aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['delta'].bounds is not None
- aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['sigma'] is not None
- aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['sigma'].bounds is not None
-THEN
- pp.lbvc = 65
- pp.lblev = scalar_coord(cm, 'model_level_number').points[0]
- pp.blev = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['delta'].points[0]
- pp.brlev = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['delta'].bounds[0, 0]
- pp.brsvd[0] = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['delta'].bounds[0, 1]
- pp.bhlev = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['sigma'].points[0]
- pp.bhrlev = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['sigma'].bounds[0, 0]
- pp.brsvd[1] = aux_factory(cm, iris.aux_factory.HybridHeightFactory).dependencies['sigma'].bounds[0, 1]
-
-# single hybrid pressure level
-IF
- has_aux_factory(cm, iris.aux_factory.HybridPressureFactory)
- scalar_coord(cm, 'model_level_number') is not None
- scalar_coord(cm, 'model_level_number').bounds is None
- aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['delta'] is not None
- aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['delta'].bounds is not None
- aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['sigma'] is not None
- aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['sigma'].bounds is not None
-THEN
- pp.lbvc = 9
- pp.lblev = scalar_coord(cm, 'model_level_number').points[0]
-
- # Note that sigma and delta are swapped around from the hybrid height rules above.
- pp.blev = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['sigma'].points[0]
- pp.brlev = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['sigma'].bounds[0, 0]
- pp.brsvd[0] = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['sigma'].bounds[0, 1]
-
- pp.bhlev = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['delta'].points[0]
- pp.bhrlev = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['delta'].bounds[0, 0]
- pp.brsvd[1] = aux_factory(cm, iris.aux_factory.HybridPressureFactory).dependencies['delta'].bounds[0, 1]
-
-
-# CFname mega rule
-IF
- (cm.standard_name, cm.long_name, str(cm.units)) in iris.fileformats.um_cf_map.CF_TO_LBFC
-THEN
- pp.lbfc = iris.fileformats.um_cf_map.CF_TO_LBFC[(cm.standard_name, cm.long_name, str(cm.units))]
-
-IF
- 'STASH' in cm.attributes
- str(cm.attributes['STASH']) in iris.fileformats._ff_cross_references.STASH_TRANS
-THEN
- pp.lbfc = iris.fileformats._ff_cross_references.STASH_TRANS[str(cm.attributes['STASH'])].field_code
diff --git a/lib/iris/experimental/animate.py b/lib/iris/experimental/animate.py
index 9b4c46b5f4..ac60400ba7 100644
--- a/lib/iris/experimental/animate.py
+++ b/lib/iris/experimental/animate.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2013 - 2015, Met Office
+# (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -40,8 +40,12 @@ def animate(cube_iterator, plot_func, fig=None, **kwargs):
Each animation frame corresponds to each :class:`iris.cube.Cube`
object. See :meth:`iris.cube.Cube.slices`.
- * plot_func (:mod:`~iris.plot` or :mod:`~iris.quickplot` plot):
- Plotting function used to animate.
+ * plot_func (:mod:`iris.plot` or :mod:`iris.quickplot` plotting function):
+ Plotting function used to animate. Must accept the signature
+ ``plot_func(cube, vmin=vmin, vmax=vmax, coords=coords)``.
+ :func:`~iris.plot.contourf`, :func:`~iris.plot.contour`,
+ :func:`~iris.plot.pcolor` and :func:`~iris.plot.pcolormesh`
+ all conform to this signature.
Kwargs:
diff --git a/lib/iris/experimental/fieldsfile.py b/lib/iris/experimental/fieldsfile.py
deleted file mode 100644
index a8c7a195ea..0000000000
--- a/lib/iris/experimental/fieldsfile.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-High-speed loading of structured FieldsFiles.
-
-.. deprecated:: 1.10
-
- This module has now been *deprecated*.
- Please use :mod:`iris.fileformats.um.structured_um_loading` instead.
-
-"""
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import os
-
-from iris._deprecation import warn_deprecated
-
-# Issue a deprecation message when the module is loaded.
-warn_deprecated("The module 'iris.experimental.fieldsfile' is deprecated. "
- "Please use iris.fileformats.um.structured_um_loading"
- "as a replacement.")
-
-from iris.coords import DimCoord
-from iris.cube import CubeList
-from iris.exceptions import TranslationError
-from iris.fileformats import FORMAT_AGENT
-from iris.fileformats.um import um_to_pp
-from iris.fileformats.pp import load as pp_load
-from iris.fileformats.pp_rules import (_convert_time_coords,
- _convert_vertical_coords,
- _convert_scalar_realization_coords,
- _convert_scalar_pseudo_level_coords,
- _all_other_rules)
-from iris.fileformats.rules import ConversionMetadata, Loader, load_cubes
-from iris.fileformats.um._fast_load_structured_fields import \
- group_structured_fields
-
-
-# Seed the preferred order of candidate dimension coordinates.
-_HINT_COORDS = ['time', 'forecast_reference_time', 'model_level_number']
-_HINTS = {name: i for i, name in zip(range(len(_HINT_COORDS)), _HINT_COORDS)}
-
-_FF_SPEC_NAME = 'UM Fieldsfile'
-_PP_SPEC_NAME = 'UM Post Processing file'
-
-
-def _structured_loader(fname):
- with open(fname, 'rb') as fh:
- spec = FORMAT_AGENT.get_spec(os.path.basename(fname), fh)
- if spec.name.startswith(_FF_SPEC_NAME):
- result = um_to_pp
- elif spec.name.startswith(_PP_SPEC_NAME):
- result = pp_load
- else:
- emsg = 'Require {!r} to be a structured FieldsFile or a PP file.'
- raise ValueError(emsg.format(fname))
- return result
-
-
-def _collations_from_filename(filename):
- loader = _structured_loader(filename)
- fields = iter(loader(filename))
- return group_structured_fields(fields)
-
-
-def load(filenames, callback=None):
- """
- Load structured FieldsFiles and PP files.
-
- Args:
-
- * filenames:
- One or more filenames.
-
-
- Kwargs:
-
- * callback:
- A modifier/filter function. Please see the module documentation
- for :mod:`iris`.
-
- .. note::
-
- Unlike the standard :func:`iris.load` operation, the callback is
- applied to the final result cubes, not individual input fields.
-
- Returns:
- An :class:`iris.cube.CubeList`.
-
-
- This is a streamlined load operation, to be used only on fieldsfiles or PP
- files whose fields repeat regularly over the same vertical levels and
- times. The results aim to be equivalent to those generated by
- :func:`iris.load`, but the operation is substantially faster for input that
- is structured.
-
- The structured input files should conform to the following requirements:
-
- * the file must contain fields for all possible combinations of the
- vertical levels and time points found in the file.
-
- * the fields must occur in a regular repeating order within the file.
-
- (For example: a sequence of fields for NV vertical levels, repeated
- for NP different forecast periods, repeated for NT different forecast
- times).
-
- * all other metadata must be identical across all fields of the same
- phenomenon.
-
- Each group of fields with the same values of LBUSER4, LBUSER7 and LBPROC
- is identified as a separate phenomenon: These groups are processed
- independently and returned as separate result cubes.
-
- .. note::
-
- Each input file is loaded independently. Thus a single result cube can
- not combine data from multiple input files.
-
- .. note::
-
- The resulting time-related coordinates ('time', 'forecast_time' and
- 'forecast_period') may be mapped to shared cube dimensions and in some
- cases can also be multidimensional. However, the vertical level
- information *must* have a simple one-dimensional structure, independent
- of the time points, otherwise an error will be raised.
-
- .. note::
-
- Where input data does *not* have a fully regular arrangement, the
- corresponding result cube will have a single anonymous extra dimension
- which indexes over all the input fields.
-
- This can happen if, for example, some fields are missing; or have
- slightly different metadata; or appear out of order in the file.
-
- .. warning::
-
- Any non-regular metadata variation in the input should be strictly
- avoided, as not all irregularities are detected, which can cause
- erroneous results.
-
-
- """
- warn_deprecated(
- "The module 'iris.experimental.fieldsfile' is deprecated. "
- "Please use the 'iris.fileformats.um.structured_um_loading' facility "
- "as a replacement."
- "\nA call to 'iris.experimental.fieldsfile.load' can be replaced with "
- "'iris.load_raw', within a 'structured_um_loading' context.")
- loader = Loader(_collations_from_filename, {}, _convert_collation, None)
- return CubeList(load_cubes(filenames, callback, loader, None))
-
-
-def _adjust_dims(coords_and_dims, n_dims):
- def adjust(dims):
- if dims is not None:
- dims += n_dims
- return dims
- return [(coord, adjust(dims)) for coord, dims in coords_and_dims]
-
-
-def _bind_coords(coords_and_dims, dim_coord_dims, dim_coords_and_dims,
- aux_coords_and_dims):
- def key_func(item):
- return _HINTS.get(item[0].name(), len(_HINTS))
- # Target the first DimCoord for a dimension at dim_coords,
- # and target everything else at aux_coords.
- for coord, dims in sorted(coords_and_dims, key=key_func):
- if (isinstance(coord, DimCoord) and dims is not None and
- len(dims) == 1 and dims[0] not in dim_coord_dims):
- dim_coords_and_dims.append((coord, dims))
- dim_coord_dims.add(dims[0])
- else:
- aux_coords_and_dims.append((coord, dims))
-
-
-def _convert_collation(collation):
- """
- Converts a FieldCollation into the corresponding items of Cube
- metadata.
-
- Args:
-
- * collation:
- A FieldCollation object.
-
- Returns:
- A :class:`iris.fileformats.rules.ConversionMetadata` object.
-
- """
- # For all the scalar conversions all fields in the collation will
- # give the same result, so the choice is arbitrary.
- field = collation.fields[0]
-
- # All the "other" rules.
- (references, standard_name, long_name, units, attributes, cell_methods,
- dim_coords_and_dims, aux_coords_and_dims) = _all_other_rules(field)
-
- # Adjust any dimension bindings to account for the extra leading
- # dimensions added by the collation.
- if collation.vector_dims_shape:
- n_collation_dims = len(collation.vector_dims_shape)
- dim_coords_and_dims = _adjust_dims(dim_coords_and_dims,
- n_collation_dims)
- aux_coords_and_dims = _adjust_dims(aux_coords_and_dims,
- n_collation_dims)
-
- # "Normal" (non-cross-sectional) time values
- vector_headers = collation.element_arrays_and_dims
- # If the collation doesn't define a vector of values for a
- # particular header then it must be constant over all fields in the
- # collation. In which case it's safe to get the value from any field.
- t1, t1_dims = vector_headers.get('t1', (field.t1, ()))
- t2, t2_dims = vector_headers.get('t2', (field.t2, ()))
- lbft, lbft_dims = vector_headers.get('lbft', (field.lbft, ()))
- coords_and_dims = _convert_time_coords(field.lbcode, field.lbtim,
- field.time_unit('hours'),
- t1, t2, lbft,
- t1_dims, t2_dims, lbft_dims)
- dim_coord_dims = set()
- _bind_coords(coords_and_dims, dim_coord_dims, dim_coords_and_dims,
- aux_coords_and_dims)
-
- # "Normal" (non-cross-sectional) vertical levels
- blev, blev_dims = vector_headers.get('blev', (field.blev, ()))
- lblev, lblev_dims = vector_headers.get('lblev', (field.lblev, ()))
- bhlev, bhlev_dims = vector_headers.get('bhlev', (field.bhlev, ()))
- bhrlev, bhrlev_dims = vector_headers.get('bhrlev', (field.bhrlev, ()))
- brsvd1, brsvd1_dims = vector_headers.get('brsvd1', (field.brsvd[0], ()))
- brsvd2, brsvd2_dims = vector_headers.get('brsvd2', (field.brsvd[1], ()))
- brlev, brlev_dims = vector_headers.get('brlev', (field.brlev, ()))
- # Find all the non-trivial dimension values
- dims = set(filter(None, [blev_dims, lblev_dims, bhlev_dims, bhrlev_dims,
- brsvd1_dims, brsvd2_dims, brlev_dims]))
- if len(dims) > 1:
- raise TranslationError('Unsupported multiple values for vertical '
- 'dimension.')
- if dims:
- v_dims = dims.pop()
- if len(v_dims) > 1:
- raise TranslationError('Unsupported multi-dimension vertical '
- 'headers.')
- else:
- v_dims = ()
- coords_and_dims, factories = _convert_vertical_coords(field.lbcode,
- field.lbvc,
- blev, lblev,
- field.stash,
- bhlev, bhrlev,
- brsvd1, brsvd2,
- brlev, v_dims)
- _bind_coords(coords_and_dims, dim_coord_dims, dim_coords_and_dims,
- aux_coords_and_dims)
-
- # Realization (aka ensemble) (--> scalar coordinates)
- aux_coords_and_dims.extend(_convert_scalar_realization_coords(
- lbrsvd4=field.lbrsvd[3]))
-
- # Pseudo-level coordinate (--> scalar coordinates)
- aux_coords_and_dims.extend(_convert_scalar_pseudo_level_coords(
- lbuser5=field.lbuser[4]))
-
- return ConversionMetadata(factories, references, standard_name, long_name,
- units, attributes, cell_methods,
- dim_coords_and_dims, aux_coords_and_dims)
diff --git a/lib/iris/fileformats/_pp_lbproc_pairs.py b/lib/iris/fileformats/_pp_lbproc_pairs.py
new file mode 100644
index 0000000000..28e35a8e86
--- /dev/null
+++ b/lib/iris/fileformats/_pp_lbproc_pairs.py
@@ -0,0 +1,48 @@
+# (C) British Crown Copyright 2017, Met Office
+#
+# This file is part of Iris.
+#
+# Iris is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Iris is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Iris. If not, see .
+
+from __future__ import (absolute_import, division, print_function)
+from six.moves import (filter, input, map, range, zip) # noqa
+import six
+
+import itertools
+
+
+# LBPROC codes and their English equivalents
+LBPROC_PAIRS = ((1, "Difference from another experiment"),
+ (2, "Difference from zonal (or other spatial) mean"),
+ (4, "Difference from time mean"),
+ (8, "X-derivative (d/dx)"),
+ (16, "Y-derivative (d/dy)"),
+ (32, "Time derivative (d/dt)"),
+ (64, "Zonal mean field"),
+ (128, "Time mean field"),
+ (256, "Product of two fields"),
+ (512, "Square root of a field"),
+ (1024, "Difference between fields at levels BLEV and BRLEV"),
+ (2048, "Mean over layer between levels BLEV and BRLEV"),
+ (4096, "Minimum value of field during time period"),
+ (8192, "Maximum value of field during time period"),
+ (16384, "Magnitude of a vector, not specifically wind speed"),
+ (32768, "Log10 of a field"),
+ (65536, "Variance of a field"),
+ (131072, "Mean over an ensemble of parallel runs"))
+
+# lbproc_map is dict mapping lbproc->English and English->lbproc
+# essentially a one to one mapping
+LBPROC_MAP = {x: y for x, y in
+ itertools.chain(LBPROC_PAIRS, ((y, x) for x, y in LBPROC_PAIRS))}
diff --git a/lib/iris/fileformats/ff.py b/lib/iris/fileformats/ff.py
deleted file mode 100644
index 3b61dcb873..0000000000
--- a/lib/iris/fileformats/ff.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Provides UK Met Office Fields File (FF) format specific capabilities.
-
-.. deprecated:: 1.10
-
- This module has now been *deprecated*.
- Please use :mod:`iris.fileformats.um` instead :
- That contains equivalents for the key features of this module.
-
- The following replacements may be used.
-
- * for :class:`FF2PP`, use :meth:`iris.fileformats.um.um_to_pp`
- * for :meth:`load_cubes`, use :meth:`iris.fileformats.um.load_cubes`
- * for :meth:`load_cubes_32bit_ieee`, use
- :meth:`iris.fileformats.um.load_cubes_32bit_ieee`.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import six
-
-import warnings
-
-from iris._deprecation import warn_deprecated
-
-# Issue a deprecation message when the module is loaded.
-warn_deprecated("The module 'iris.fileformats.ff' is deprecated. "
- "Please use iris.fileformats.um as a replacement, which "
- "contains equivalents for all important features.")
-
-# Directly import various simple data items from the 'old' ff module.
-from iris.fileformats._ff import (
- IMDI,
- FF_HEADER_DEPTH,
- DEFAULT_FF_WORD_DEPTH,
- _FF_LOOKUP_TABLE_TERMINATE,
- UM_FIXED_LENGTH_HEADER,
- UM_TO_FF_HEADER_OFFSET,
- FF_HEADER,
- _FF_HEADER_POINTERS,
- _LBUSER_DTYPE_LOOKUP,
- X_COORD_U_GRID,
- Y_COORD_V_GRID,
- HANDLED_GRIDS,
- REAL_EW_SPACING,
- REAL_NS_SPACING,
- REAL_FIRST_LAT,
- REAL_FIRST_LON,
- REAL_POLE_LAT,
- REAL_POLE_LON,
- Grid,
- ArakawaC,
- NewDynamics,
- ENDGame,
- FFHeader,
- FF2PP,
- load_cubes,
- load_cubes_32bit_ieee
-)
-
-# Ensure we reproduce documentation as it appeared in v1.9,
-# but with a somewhat improved order of appearance.
-__all__ = (
- 'load_cubes',
- 'load_cubes_32bit_ieee',
- 'FF2PP',
- 'Grid',
- 'ArakawaC',
- 'NewDynamics',
- 'ENDGame',
- 'FFHeader',
-)
diff --git a/lib/iris/fileformats/pp.py b/lib/iris/fileformats/pp.py
index a046f709c2..2e318cba46 100644
--- a/lib/iris/fileformats/pp.py
+++ b/lib/iris/fileformats/pp.py
@@ -26,7 +26,6 @@
import abc
import collections
from copy import deepcopy
-import itertools
import operator
import os
import re
@@ -41,8 +40,14 @@
from iris._deprecation import warn_deprecated
from iris._lazy_data import as_concrete_data, as_lazy_data, is_lazy_data
import iris.config
+import iris.fileformats.pp_load_rules
+from iris.fileformats.pp_save_rules import verify
+
+# NOTE: this is for backwards-compatitibility *ONLY*
+# We could simply remove it for v2.0 ?
+from iris.fileformats._pp_lbproc_pairs import (LBPROC_PAIRS,
+ LBPROC_MAP as lbproc_map)
import iris.fileformats.rules
-import iris.fileformats.pp_rules
import iris.coord_systems
@@ -51,26 +56,15 @@
except ImportError:
mo_pack = None
-try:
- from iris.fileformats import _old_pp_packing as pp_packing
-except ImportError:
- pp_packing = None
-
-__all__ = ['load', 'save', 'load_cubes', 'PPField',
- 'reset_load_rules', 'add_save_rules',
- 'as_fields', 'load_pairs_from_fields', 'as_pairs',
- 'save_pairs_from_cube', 'reset_save_rules',
- 'save_fields', 'STASH', 'EARTH_RADIUS']
+__all__ = ['load', 'save', 'load_cubes', 'PPField', 'as_fields',
+ 'load_pairs_from_fields', 'save_pairs_from_cube', 'save_fields',
+ 'STASH', 'EARTH_RADIUS']
EARTH_RADIUS = 6371229.0
-# Cube->PP rules are loaded on first use
-_save_rules = None
-
-
PP_HEADER_DEPTH = 256
PP_WORD_DEPTH = 4
NUM_LONG_HEADERS = 45
@@ -228,31 +222,6 @@
'default': np.dtype('>f4'),
}
-# LBPROC codes and their English equivalents
-LBPROC_PAIRS = ((1, "Difference from another experiment"),
- (2, "Difference from zonal (or other spatial) mean"),
- (4, "Difference from time mean"),
- (8, "X-derivative (d/dx)"),
- (16, "Y-derivative (d/dy)"),
- (32, "Time derivative (d/dt)"),
- (64, "Zonal mean field"),
- (128, "Time mean field"),
- (256, "Product of two fields"),
- (512, "Square root of a field"),
- (1024, "Difference between fields at levels BLEV and BRLEV"),
- (2048, "Mean over layer between levels BLEV and BRLEV"),
- (4096, "Minimum value of field during time period"),
- (8192, "Maximum value of field during time period"),
- (16384, "Magnitude of a vector, not specifically wind speed"),
- (32768, "Log10 of a field"),
- (65536, "Variance of a field"),
- (131072, "Mean over an ensemble of parallel runs"))
-
-# lbproc_map is dict mapping lbproc->English and English->lbproc
-# essentially a one to one mapping
-lbproc_map = {x: y for x, y in
- itertools.chain(LBPROC_PAIRS, ((y, x) for x, y in LBPROC_PAIRS))}
-
class STASH(collections.namedtuple('STASH', 'model section item')):
"""
@@ -304,7 +273,9 @@ def from_msi(msi):
if not isinstance(msi, six.string_types):
raise TypeError('Expected STASH code MSI string, got %r' % (msi,))
- msi_match = re.match('^\s*m(.*)s(.*)i(.*)\s*$', msi, re.IGNORECASE)
+ msi_match = re.match(
+ '^\s*m(\d+|\?+)s(\d+|\?+)i(\d+|\?+)\s*$', msi,
+ re.IGNORECASE)
if msi_match is None:
raise ValueError('Expected STASH code MSI string "mXXsXXiXXX", '
@@ -543,127 +514,6 @@ def __ge__(self, other):
return self._compare(other, operator.ge)
-class BitwiseInt(SplittableInt):
- """
- A class to hold an integer, of fixed bit-length, which can easily get/set
- each bit individually.
-
- .. deprecated:: 1.8
-
- Please use `int` instead.
-
- .. note::
-
- Uses a fixed number of bits.
- Will raise an Error when attempting to access an out-of-range flag.
-
- >>> a = BitwiseInt(511)
- >>> a.flag1
- 1
- >>> a.flag8
- 1
- >>> a.flag128
- 1
- >>> a.flag256
- 1
- >>> a.flag512
- AttributeError: 'BitwiseInt' object has no attribute 'flag512'
- >>> a.flag512 = 1
- AttributeError: Cannot set a flag that does not exist: flag512
-
- """
-
- def __init__(self, value, num_bits=None):
- # intentionally empty docstring as all covered in the class docstring.
- """ """
- warn_deprecated('BitwiseInt is deprecated - please use `int` instead.')
-
- SplittableInt.__init__(self, value)
- self.flags = ()
-
- # do we need to calculate the number of bits based on the given value?
- self._num_bits = num_bits
- if self._num_bits is None:
- self._num_bits = 0
- while((value >> self._num_bits) > 0):
- self._num_bits += 1
- else:
- # make sure the number of bits is enough to store the given value.
- if (value >> self._num_bits) > 0:
- raise ValueError("Not enough bits to store value")
-
- self._set_flags_from_value()
-
- def _set_flags_from_value(self):
- all_flags = []
-
- # Set attributes "flag[n]" to 0 or 1
- for i in range(self._num_bits):
- flag_name = 1 << i
- flag_value = ((self._value >> i) & 1)
- object.__setattr__(self, 'flag%d' % flag_name, flag_value)
-
- # Add to list off all flags
- if flag_value:
- all_flags.append(flag_name)
-
- self.flags = tuple(all_flags)
-
- def _set_value_from_flags(self):
- self._value = 0
- for i in range(self._num_bits):
- bit_value = pow(2, i)
- flag_name = "flag%i" % bit_value
- flag_value = object.__getattribute__(self, flag_name)
- self._value += flag_value * bit_value
-
- def __iand__(self, value):
- """Perform an &= operation."""
- self._value &= value
- self._set_flags_from_value()
- return self
-
- def __ior__(self, value):
- """Perform an |= operation."""
- self._value |= value
- self._set_flags_from_value()
- return self
-
- def __iadd__(self, value):
- """Perform an inplace add operation"""
- self._value += value
- self._set_flags_from_value()
- return self
-
- def __setattr__(self, name, value):
- # Allow setting of the attribute flags
- # Are we setting a flag?
- if name.startswith("flag") and name != "flags":
- # true and false become 1 and 0
- if not isinstance(value, bool):
- raise TypeError("Can only set bits to True or False")
-
- # Setting an existing flag?
- if hasattr(self, name):
- # which flag?
- flag_value = int(name[4:])
- # on or off?
- if value:
- self |= flag_value
- else:
- self &= ~flag_value
-
- # Fail if an attempt has been made to set a flag that does not
- # exist
- else:
- raise AttributeError("Cannot set a flag that does not"
- " exist: %s" % name)
-
- # If we're not setting a flag, then continue as normal
- else:
- SplittableInt.__setattr__(self, name, value)
-
-
def _make_flag_getter(value):
def getter(self):
warn_deprecated('The `flag` attributes are deprecated - please use '
@@ -698,7 +548,7 @@ def __new__(cls, classname, bases, class_dict):
return type.__new__(cls, classname, bases, class_dict)
-class _LBProc(six.with_metaclass(_FlagMetaclass, BitwiseInt)):
+class _LBProc(six.with_metaclass(_FlagMetaclass, SplittableInt)):
# Use a metaclass to define the `flag1`, `flag2`, `flag4, etc.
# properties.
def __init__(self, value):
@@ -715,87 +565,9 @@ def __init__(self, value):
'splittable integers object')
self._value = value
- def __len__(self):
- """
- Base ten length.
-
- .. deprecated:: 1.8
-
- The value of a BitwiseInt only makes sense in base-two.
-
- """
- warn_deprecated('Length is deprecated')
- return len(str(self._value))
-
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
- def __getitem__(self, key):
- """
- Base ten indexing support.
-
- .. deprecated:: 1.8
-
- The value of an _LBProc only makes sense in base-two.
-
- """
- warn_deprecated('Indexing is deprecated')
- try:
- value = int('0' + str(self._value)[::-1][key][::-1])
- except IndexError:
- value = 0
- # If the key returns a list of values, then combine them
- # together to an integer.
- if isinstance(value, list):
- value = sum(10**i * val for i, val in enumerate(value))
- return value
-
- def __setitem__(self, key, value):
- """
- Base ten indexing support.
-
- .. deprecated:: 1.8
-
- The value of an _LBProc only makes sense in base-two.
-
- """
- warn_deprecated('Indexing is deprecated')
- if (not isinstance(value, int) or value < 0):
- msg = 'Can only set {} as a positive integer value.'.format(key)
- raise ValueError(msg)
-
- if isinstance(key, slice):
- if ((key.start is not None and key.start < 0) or
- (key.step is not None and key.step < 0) or
- (key.stop is not None and key.stop < 0)):
- raise ValueError('Cannot assign a value with slice '
- 'objects containing negative indices.')
-
- # calculate the current length of the value of this string
- current_length = len(range(*key.indices(len(self))))
-
- # Get indices for as many digits as have been requested.
- # Putting the upper limit on the number of digits at 100.
- indices = range(*key.indices(100))
- if len(indices) < len(str(value)):
- fmt = 'Cannot put {} into {} as it has too many digits.'
- raise ValueError(fmt.format(value, key))
-
- # Iterate over each of the indices in the slice, zipping
- # them together with the associated digit.
- filled_value = str(value).zfill(current_length)
- for index, digit in zip(indices, filled_value[::-1]):
- # assign each digit to the associated index
- self.__setitem__(index, int(digit))
- else:
- if value > 9:
- raise ValueError('Can only set a single digit')
- # Setting a single digit.
- factor = 10 ** key
- head, tail = divmod(self._value, factor)
- head = head // 10
- self._value = (head * 10 + value) * factor + tail
-
def __iadd__(self, value):
self._value += value
return self
@@ -820,13 +592,6 @@ def __repr__(self):
def __str__(self):
return str(self._value)
- @property
- def flags(self):
- warn_deprecated('The `flags` attribute is deprecated - please use '
- 'integer bitwise operators instead.')
- return tuple(2 ** i for i in range(self.NUM_BITS)
- if self._value & 2 ** i)
-
class PPDataProxy(object):
"""A reference to the data payload of a single PP field."""
@@ -932,13 +697,6 @@ def _data_bytes_to_shaped_array(data_bytes, lbpack, boundary_packing,
decompress_wgdos = mo_pack.decompress_wgdos
except AttributeError:
decompress_wgdos = mo_pack.unpack_wgdos
- elif pp_packing is not None:
- msg = 'iris.fileformats.pp_packing has been ' \
- 'deprecated and will be removed in a future release. ' \
- 'Install mo_pack to make use of the new unpacking ' \
- 'functionality.'
- warn_deprecated(msg)
- decompress_wgdos = pp_packing.wgdos_unpack
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
@@ -947,13 +705,6 @@ def _data_bytes_to_shaped_array(data_bytes, lbpack, boundary_packing,
elif lbpack.n1 == 4:
if mo_pack is not None and hasattr(mo_pack, 'decompress_rle'):
decompress_rle = mo_pack.decompress_rle
- elif pp_packing is not None:
- msg = 'iris.fileformats.pp_packing has been ' \
- 'deprecated and will be removed in a future release. ' \
- 'Install/upgrade mo_pack to make use of the new unpacking ' \
- 'functionality.'
- warn_deprecated(msg)
- decompress_rle = pp_packing.rle_decode
else:
msg = 'Unpacking PP fields with LBPACK of {} ' \
'requires mo_pack to be installed'.format(lbpack.n1)
@@ -1998,78 +1749,6 @@ def _field_gen(filename, read_data_bytes, little_ended=False):
yield pp_field
-def reset_load_rules():
- """
- Resets the PP load process to use only the standard conversion rules.
-
- .. deprecated:: 1.7
-
- """
- warn_deprecated('reset_load_rules was deprecated in v1.7.')
-
-
-def _ensure_save_rules_loaded():
- """Makes sure the standard save rules are loaded."""
-
- # Uses these module-level variables
- global _save_rules
-
- if _save_rules is None:
- # Load the pp save rules
- rules_filename = os.path.join(iris.config.CONFIG_PATH,
- 'pp_save_rules.txt')
- with iris.fileformats.rules._disable_deprecation_warnings():
- _save_rules = iris.fileformats.rules.RulesContainer(
- rules_filename, iris.fileformats.rules.ProcedureRule)
-
-
-def add_save_rules(filename):
- """
- Registers a rules file for use during the PP save process.
-
- Registered files are processed after the standard conversion rules, and in
- the order they were registered.
-
- .. deprecated:: 1.10
-
- If you need to customise pp field saving, please refer to the functions
- :func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
- for an alternative solution.
-
- """
- warn_deprecated(
- 'custom pp save rules are deprecated from v1.10.\n'
- 'If you need to customise pp field saving, please refer to the '
- 'functions iris.fileformats.pp.as_fields, '
- 'iris.fileformats.pp.save_pairs_from_cube and '
- 'iris.fileformats.pp.save_fields for an alternative solution.')
- _ensure_save_rules_loaded()
- _save_rules.import_rules(filename)
-
-
-def reset_save_rules():
- """
- Resets the PP save process to use only the standard conversion rules.
-
- .. deprecated:: 1.10
-
- If you need to customise pp field saving, please refer to the functions
- :func:`as_fields`, :func:`save_pairs_from_cube` and :func:`save_fields`
- for an alternative solution.
-
- """
- warn_deprecated(
- 'custom pp save rules are deprecated from v1.10.\n'
- 'If you need to customise pp field saving, please refer to the '
- 'functions iris.fileformats.pp.as_fields, '
- 'iris.fileformats.pp.save_pairs_from_cube and '
- 'iris.fileformats.pp.save_fields for an alternative solution.')
- # Uses this module-level variable
- global _save_rules
-
- _save_rules = None
-
-
# Stash codes not to be filtered (reference altitude and pressure fields).
_STASH_ALLOW = [STASH(1, 0, 33), STASH(1, 0, 1)]
@@ -2238,7 +1917,8 @@ def load_pairs_from_fields(pp_fields):
"""
load_pairs_from_fields = iris.fileformats.rules.load_pairs_from_fields
- return load_pairs_from_fields(pp_fields, iris.fileformats.pp_rules.convert)
+ return load_pairs_from_fields(pp_fields,
+ iris.fileformats.pp_load_rules.convert)
def _load_cubes_variable_loader(filenames, callback, loading_function,
@@ -2265,7 +1945,7 @@ def _load_cubes_variable_loader(filenames, callback, loading_function,
else:
loader = iris.fileformats.rules.Loader(
loading_function, loading_function_kwargs or {},
- iris.fileformats.pp_rules.convert)
+ iris.fileformats.pp_load_rules.convert)
result = iris.fileformats.rules.load_cubes(filenames, callback, loader,
pp_filter)
@@ -2310,22 +1990,9 @@ def save(cube, target, append=False, field_coords=None):
save_fields(fields, target, append=append)
-def as_pairs(cube, field_coords=None, target=None):
- """
- .. deprecated:: 1.10
- Please use :func:`iris.fileformats.pp.save_pairs_from_cube` for the same
- functionality.
-
- """
- warn_deprecated('as_pairs is deprecated in v1.10; please use'
- ' save_pairs_from_cube instead.')
- return save_pairs_from_cube(cube, field_coords=field_coords,
- target=target)
-
-
def save_pairs_from_cube(cube, field_coords=None, target=None):
"""
- Use the PP saving rules (and any user rules) to convert a cube or
+ Use the PP saving rules to convert a cube or
iterable of cubes to an iterable of (2D cube, PP field) pairs.
Args:
@@ -2384,8 +2051,6 @@ def save_pairs_from_cube(cube, field_coords=None, target=None):
# On the flip side, record which Cube metadata has been "used" and flag up
# unused?
- _ensure_save_rules_loaded()
-
n_dims = len(cube.shape)
if n_dims < 2:
raise ValueError('Unable to save a cube of fewer than 2 dimensions.')
@@ -2435,18 +2100,7 @@ def save_pairs_from_cube(cube, field_coords=None, target=None):
# Run the PP save rules on the slice2D, to fill the PPField,
# recording the rules that were used
- rules_result = _save_rules.verify(slice2D, pp_field)
- verify_rules_ran = rules_result.matching_rules
-
- # Log the rules used
- if target is None:
- target = 'None'
- elif not isinstance(target, six.string_types):
- target = target.name
-
- with iris.fileformats.rules._disable_deprecation_warnings():
- iris.fileformats.rules.log('PP_SAVE', str(target),
- verify_rules_ran)
+ pp_field = verify(slice2D, pp_field)
yield (slice2D, pp_field)
diff --git a/lib/iris/fileformats/pp_rules.py b/lib/iris/fileformats/pp_load_rules.py
similarity index 99%
rename from lib/iris/fileformats/pp_rules.py
rename to lib/iris/fileformats/pp_load_rules.py
index a86c5f358e..f709ac30c9 100644
--- a/lib/iris/fileformats/pp_rules.py
+++ b/lib/iris/fileformats/pp_load_rules.py
@@ -30,9 +30,10 @@
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.fileformats.rules import (ConversionMetadata, Factory, Reference,
ReferenceTarget)
+import iris.fileformats.pp
+from iris.fileformats._pp_lbproc_pairs import LBPROC_MAP
from iris.fileformats.um_cf_map import (LBFC_TO_CF, STASH_TO_CF,
STASHCODE_IMPLIED_HEIGHTS)
-import iris.fileformats.pp
###############################################################################
@@ -1027,7 +1028,7 @@ def _all_other_rules(f):
if unhandled_lbproc:
attributes["ukmo__process_flags"] = tuple(sorted(
[name
- for value, name in six.iteritems(iris.fileformats.pp.lbproc_map)
+ for value, name in six.iteritems(LBPROC_MAP)
if isinstance(value, int) and f.lbproc & value]))
if (f.lbsrce % 10000) == 1111:
diff --git a/lib/iris/fileformats/pp_packing.py b/lib/iris/fileformats/pp_packing.py
deleted file mode 100644
index 2dd2d86e2f..0000000000
--- a/lib/iris/fileformats/pp_packing.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# (C) British Crown Copyright 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-This extension module provides access to the underlying libmo_unpack library
-functionality.
-
-.. deprecated:: 1.10
- :mod:`iris.fileformats.pp_packing` is deprecated.
- Please install mo_pack (https://github.com/SciTools/mo_pack) instead.
- This provides additional pack/unpacking functionality.
-
-"""
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import six
-
-from iris._deprecation import warn_deprecated
-from iris.fileformats import _old_pp_packing as old_pp_packing
-
-
-_DEPRECATION_DOCSTRING_SUFFIX = """
-.. deprecated:: 1.10
- :mod:`iris.fileformats.pp_packing` is deprecated.
- Please install mo_pack (https://github.com/SciTools/mo_pack) instead.
- This provides additional pack/unpacking functionality.
-
-"""
-
-_DEPRECATION_WARNING = (
- 'Module "iris.fileformats.pp_packing" is deprecated. '
- 'Please install mo_pack (https://github.com/SciTools/mo_pack) instead. '
- 'This provides additional pack/unpacking functionality.')
-
-
-# Emit a deprecation warning when anyone tries to import this.
-# For quiet, can still use _old_pp_packing instead, as fileformats.pp does.
-warn_deprecated(_DEPRECATION_WARNING)
-
-
-# Define simple wrappers for functions in pp_packing.
-# N.B. signatures must match the originals !
-def wgdos_unpack(data, lbrow, lbnpt, bmdi):
- warn_deprecated(_DEPRECATION_WARNING)
- return old_pp_packing.wgdos_unpack(data, lbrow, lbnpt, bmdi)
-
-
-def rle_decode(data, lbrow, lbnpt, bmdi):
- warn_deprecated(_DEPRECATION_WARNING)
- return old_pp_packing.rle_decode(data, lbrow, lbnpt, bmdi)
-
-
-def _add_fixed_up_docstring(new_fn, original_fn):
- # Add docstring to a wrapper function, based on the original function.
- # This would be simpler if Sphinx were less fussy about formatting.
- docstring = original_fn.__doc__
- lines = [line for line in docstring.split('\n')]
- # Strip off last blank lines, and add deprecation notice.
- while len(lines[-1].strip()) == 0:
- lines = lines[:-1]
- docstring = '\n'.join(lines)
- docstring += _DEPRECATION_DOCSTRING_SUFFIX
- new_fn.__doc__ = docstring
-
-
-_add_fixed_up_docstring(wgdos_unpack, old_pp_packing.wgdos_unpack)
-_add_fixed_up_docstring(rle_decode, old_pp_packing.rle_decode)
diff --git a/lib/iris/fileformats/pp_save_rules.py b/lib/iris/fileformats/pp_save_rules.py
new file mode 100644
index 0000000000..7fcc4ab639
--- /dev/null
+++ b/lib/iris/fileformats/pp_save_rules.py
@@ -0,0 +1,807 @@
+# (C) British Crown Copyright 2017, Met Office
+#
+# This file is part of Iris.
+#
+# Iris is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Iris is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Iris. If not, see .
+
+from __future__ import (absolute_import, division, print_function)
+from six.moves import (filter, input, map, range, zip) # noqa
+import six
+
+import warnings
+
+import iris
+from iris.fileformats._ff_cross_references import STASH_TRANS
+from iris.aux_factory import HybridHeightFactory, HybridPressureFactory
+from iris.fileformats.um_cf_map import CF_TO_LBFC
+from iris.fileformats._pp_lbproc_pairs import LBPROC_MAP
+from iris.fileformats.rules import (aux_factory,
+ has_aux_factory,
+ scalar_cell_method,
+ scalar_coord,
+ vector_coord)
+from iris.util import is_regular, regular_step
+import netcdftime
+
+
+def _basic_coord_system_rules(cube, pp):
+ """
+ Rules for setting the coord system of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ if (cube.coord_system("GeogCS") is not None or
+ cube.coord_system(None) is None):
+ pp.bplat = 90
+ pp.bplon = 0
+ elif cube.coord_system("RotatedGeogCS") is not None:
+ pp.bplat = cube.coord_system("RotatedGeogCS").grid_north_pole_latitude
+ pp.bplon = cube.coord_system("RotatedGeogCS").grid_north_pole_longitude
+ return pp
+
+
+def _um_version_rules(cube, pp):
+ from_um_str = "Data from Met Office Unified Model"
+ source_attr = cube.attributes.get('source')
+ if source_attr is not None:
+ um_version = source_attr.rsplit(from_um_str, 1)
+
+ if ('um_version' not in cube.attributes and
+ 'source' in cube.attributes and
+ len(um_version) > 1 and
+ len(um_version[1]) == 0):
+ # UM - no version number.
+ pp.lbsrce = 1111
+ elif ('um_version' not in cube.attributes and
+ 'source' in cube.attributes and
+ len(um_version) > 1 and
+ len(um_version[1]) > 0):
+ # UM - with version number.
+ pp.lbsrce = int(float(um_version[1]) * 1000000) + 1111
+ elif 'um_version' in cube.attributes:
+ # UM - from 'um_version' attribute.
+ um_ver_minor = int(cube.attributes['um_version'].split('.')[1])
+ um_ver_major = int(cube.attributes['um_version'].split('.')[0])
+ pp.lbsrce = 1111 + 10000 * um_ver_minor + 1000000 * um_ver_major
+ return pp
+
+
+def _stash_rules(cube, pp):
+ """
+ Attributes rules for setting the STASH attribute of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ if 'STASH' in cube.attributes:
+ stash = cube.attributes['STASH']
+ if isinstance(stash, iris.fileformats.pp.STASH):
+ pp.lbuser[3] = 1000 * (stash.section or 0) + (stash.item or 0)
+ pp.lbuser[6] = (stash.model or 0)
+ return pp
+
+
+def _general_time_rules(cube, pp):
+ """
+ Rules for setting time metadata of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ time_coord = scalar_coord(cube, 'time')
+ fp_coord = scalar_coord(cube, 'forecast_period')
+ frt_coord = scalar_coord(cube, 'forecast_reference_time')
+ clim_season_coord = scalar_coord(cube, 'clim_season')
+
+ cm_time_mean = scalar_cell_method(cube, 'mean', 'time')
+ cm_time_min = scalar_cell_method(cube, 'minimum', 'time')
+ cm_time_max = scalar_cell_method(cube, 'maximum', 'time')
+
+ # No forecast.
+ if time_coord is not None and fp_coord is None and frt_coord is None:
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 0
+ pp.t1 = time_coord.units.num2date(time_coord.points[0])
+ pp.t2 = netcdftime.datetime(0, 0, 0)
+
+ # Forecast.
+ if (time_coord is not None and
+ not time_coord.has_bounds() and
+ fp_coord is not None):
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 1
+ pp.t1 = time_coord.units.num2date(time_coord.points[0])
+ pp.t2 = time_coord.units.num2date(time_coord.points[0] -
+ fp_coord.points[0])
+ pp.lbft = fp_coord.points[0]
+
+ # Time mean (non-climatological).
+ # XXX This only works when we have a single timestep.
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ fp_coord is not None and
+ fp_coord.has_bounds()):
+ # XXX How do we know *which* time to use if there are more than
+ # one? *Can* there be more than one?
+ pp.lbtim.ib = 2
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ fp_coord is None and
+ frt_coord is not None):
+ # Handle missing forecast period using time and forecast ref time.
+ pp.lbtim.ib = 2
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ stop = time_coord.units.convert(time_coord.bounds[0, 1],
+ 'hours since epoch')
+ start = frt_coord.units.convert(frt_coord.points[0],
+ 'hours since epoch')
+ pp.lbft = stop - start
+
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ (fp_coord is not None or frt_coord is not None) and
+ cm_time_mean is not None and
+ cm_time_mean.intervals != () and
+ cm_time_mean.intervals[0].endswith('hour')):
+ pp.lbtim.ia = int(cm_time_mean.intervals[0][:-5])
+
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ (fp_coord is not None or frt_coord is not None) and
+ (cm_time_mean is None or cm_time_mean.intervals == () or
+ not cm_time_mean.intervals[0].endswith('hour'))):
+ pp.lbtim.ia = 0
+
+ # If the cell methods contain a minimum then overwrite lbtim.ia with this
+ # interval.
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ (fp_coord is not None or frt_coord is not None) and
+ cm_time_min is not None and
+ cm_time_min.intervals != () and
+ cm_time_min.intervals[0].endswith('hour')):
+ # Set lbtim.ia with the integer part of the cell method's interval
+ # e.g. if interval is '24 hour' then lbtim.ia becomes 24.
+ pp.lbtim.ia = int(cm_time_min.intervals[0][:-5])
+
+ # If the cell methods contain a maximum then overwrite lbtim.ia with this
+ # interval.
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ clim_season_coord is None and
+ (fp_coord is not None or frt_coord is not None) and
+ cm_time_max is not None and
+ cm_time_max.intervals != () and
+ cm_time_max.intervals[0].endswith('hour')):
+ # Set lbtim.ia with the integer part of the cell method's interval
+ # e.g. if interval is '1 hour' then lbtim.ia becomes 1.
+ pp.lbtim.ia = int(cm_time_max.intervals[0][:-5])
+
+ if time_coord is not None and time_coord.has_bounds():
+ lower_bound_yr =\
+ time_coord.units.num2date(time_coord.bounds[0, 0]).year
+ upper_bound_yr =\
+ time_coord.units.num2date(time_coord.bounds[0, 1]).year
+ else:
+ lower_bound_yr = None
+ upper_bound_yr = None
+
+ # Climatological time means.
+ if (time_coord is not None and
+ time_coord.has_bounds() and
+ lower_bound_yr == upper_bound_yr and
+ fp_coord is not None and
+ fp_coord.has_bounds() and
+ clim_season_coord is not None and
+ 'clim_season' in cube.cell_methods[-1].coord_names):
+ # Climatological time mean - single year.
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 2
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ elif (time_coord is not None and
+ time_coord.has_bounds() and
+ lower_bound_yr != upper_bound_yr and
+ fp_coord is not None and
+ fp_coord.has_bounds() and
+ clim_season_coord is not None and
+ 'clim_season' in cube.cell_methods[-1].coord_names and
+ clim_season_coord.points[0] == 'djf'):
+ # Climatological time mean - spanning years - djf.
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 3
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ if pp.t1.month == 12:
+ pp.t1 = netcdftime.datetime(pp.t1.year)
+ else:
+ pp.t1 = netcdftime.datetime(pp.t1.year-1, 12, 1, 0, 0, 0)
+ pp.t2 = netcdftime.datetime(pp.t2.year, 3, 1, 0, 0, 0)
+ _conditional_warning(
+ time_coord.bounds[0, 0] != time_coord.units.date2num(pp.t1),
+ "modified t1 for climatological seasonal mean")
+ _conditional_warning(
+ time_coord.bounds[0, 1] != time_coord.units.date2num(pp.t2),
+ "modified t2 for climatological seasonal mean")
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ elif (time_coord is not None and
+ time_coord.has_bounds() and
+ lower_bound_yr != upper_bound_yr and
+ fp_coord is not None and
+ fp_coord.has_bounds() and
+ clim_season_coord is not None and
+ 'clim_season' in cube.cell_methods[-1].coord_names and
+ clim_season_coord.points[0] == 'mam'):
+ # Climatological time mean - spanning years - mam.
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 3
+ # TODO: wut?
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ pp.t1 = netcdftime.datetime(pp.t1.year, 3, 1, 0, 0, 0)
+ pp.t2 = netcdftime.datetime(pp.t2.year, 6, 1, 0, 0, 0)
+ _conditional_warning(
+ time_coord.bounds[0, 0] != time_coord.units.date2num(pp.t1),
+ "modified t1 for climatological seasonal mean")
+ _conditional_warning(
+ time_coord.bounds[0, 1] != time_coord.units.date2num(pp.t2),
+ "modified t2 for climatological seasonal mean")
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ elif (time_coord is not None and
+ time_coord.has_bounds() and
+ lower_bound_yr != upper_bound_yr and
+ fp_coord is not None and
+ fp_coord.has_bounds() and
+ clim_season_coord is not None and
+ 'clim_season' in cube.cell_methods[-1].coord_names and
+ clim_season_coord.points[0] == 'jja'):
+ # Climatological time mean - spanning years - jja.
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 3
+ # TODO: wut?
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ pp.t1 = netcdftime.datetime(pp.t1.year, 6, 1, 0, 0, 0)
+ pp.t2 = netcdftime.datetime(pp.t2.year, 9, 1, 0, 0, 0)
+ _conditional_warning(
+ time_coord.bounds[0, 0] != time_coord.units.date2num(pp.t1),
+ "modified t1 for climatological seasonal mean")
+ _conditional_warning(
+ time_coord.bounds[0, 1] != time_coord.units.date2num(pp.t2),
+ "modified t2 for climatological seasonal mean")
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ elif (time_coord is not None and
+ time_coord.has_bounds() and
+ lower_bound_yr != upper_bound_yr and
+ fp_coord is not None and
+ fp_coord.has_bounds() and
+ clim_season_coord is not None and
+ 'clim_season' in cube.cell_methods[-1].coord_names and
+ clim_season_coord.points[0] == 'son'):
+ # Climatological time mean - spanning years - son.
+ pp.lbtim.ia = 0
+ pp.lbtim.ib = 3
+ # TODO: wut?
+ pp.t1 = time_coord.units.num2date(time_coord.bounds[0, 0])
+ pp.t2 = time_coord.units.num2date(time_coord.bounds[0, 1])
+ pp.t1 = netcdftime.datetime(pp.t1.year, 9, 1, 0, 0, 0)
+ pp.t2 = netcdftime.datetime(pp.t2.year, 12, 1, 0, 0, 0)
+ _conditional_warning(
+ time_coord.bounds[0, 0] != time_coord.units.date2num(pp.t1),
+ "modified t1 for climatological seasonal mean")
+ _conditional_warning(
+ time_coord.bounds[0, 1] != time_coord.units.date2num(pp.t2),
+ "modified t2 for climatological seasonal mean")
+ pp.lbft = fp_coord.units.convert(fp_coord.bounds[0, 1], 'hours')
+
+ return pp
+
+
+def _calendar_rules(cube, pp):
+ """
+ Rules for setting the calendar of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ time_coord = scalar_coord(cube, 'time')
+ if time_coord is not None:
+ if time_coord.units.calendar == '360_day':
+ pp.lbtim.ic = 2
+ elif time_coord.units.calendar == 'gregorian':
+ pp.lbtim.ic = 1
+ elif time_coord.units.calendar == '365_day':
+ pp.lbtim.ic = 4
+ return pp
+
+
+def _grid_and_pole_rules(cube, pp):
+ """
+ Rules for setting the horizontal grid and pole location of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ lon_coord = vector_coord(cube, 'longitude')
+ grid_lon_coord = vector_coord(cube, 'grid_longitude')
+ lat_coord = vector_coord(cube, 'latitude')
+ grid_lat_coord = vector_coord(cube, 'grid_latitude')
+
+ if lon_coord and not is_regular(lon_coord):
+ pp.bzx = 0
+ pp.bdx = 0
+ pp.lbnpt = lon_coord.shape[0]
+ pp.x = lon_coord.points
+ elif grid_lon_coord and not is_regular(grid_lon_coord):
+ pp.bzx = 0
+ pp.bdx = 0
+ pp.lbnpt = grid_lon_coord.shape[0]
+ pp.x = grid_lon_coord.points
+ elif lon_coord and is_regular(lon_coord):
+ pp.bzx = lon_coord.points[0] - regular_step(lon_coord)
+ pp.bdx = regular_step(lon_coord)
+ pp.lbnpt = len(lon_coord.points)
+ elif grid_lon_coord and is_regular(grid_lon_coord):
+ pp.bzx = grid_lon_coord.points[0] - regular_step(grid_lon_coord)
+ pp.bdx = regular_step(grid_lon_coord)
+ pp.lbnpt = len(grid_lon_coord.points)
+
+ if lat_coord and not is_regular(lat_coord):
+ pp.bzy = 0
+ pp.bdy = 0
+ pp.lbrow = lat_coord.shape[0]
+ pp.y = lat_coord.points
+ elif grid_lat_coord and not is_regular(grid_lat_coord):
+ pp.bzy = 0
+ pp.bdy = 0
+ pp.lbrow = grid_lat_coord.shape[0]
+ pp.y = grid_lat_coord.points
+ elif lat_coord and is_regular(lat_coord):
+ pp.bzy = lat_coord.points[0] - regular_step(lat_coord)
+ pp.bdy = regular_step(lat_coord)
+ pp.lbrow = len(lat_coord.points)
+ elif grid_lat_coord and is_regular(grid_lat_coord):
+ pp.bzy = grid_lat_coord.points[0] - regular_step(grid_lat_coord)
+ pp.bdy = regular_step(grid_lat_coord)
+ pp.lbrow = len(grid_lat_coord.points)
+
+ # Check if we have a rotated coord system.
+ if cube.coord_system("RotatedGeogCS") is not None:
+ pp.lbcode = int(pp.lbcode) + 100
+
+ # Check if we have a circular x-coord.
+ for lon_coord in (lon_coord, grid_lon_coord):
+ if lon_coord is not None:
+ if lon_coord.circular:
+ pp.lbhem = 0
+ else:
+ pp.lbhem = 3
+
+ return pp
+
+
+def _non_std_cross_section_rules(cube, pp):
+ """
+ Rules for applying non-standard cross-sections to the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ # Define commonly-used coords.
+ air_pres_coord = vector_coord(cube, 'air_pressure')
+ depth_coord = vector_coord(cube, 'depth')
+ eta_coord = vector_coord(cube, 'eta')
+ lat_coord = vector_coord(cube, 'latitude')
+ time_coord = vector_coord(cube, 'time')
+
+ # Non-standard cross-section with bounds - x=latitude, y=air_pressure.
+ if (air_pres_coord is not None and
+ not air_pres_coord.circular and
+ air_pres_coord.has_bounds() and
+ lat_coord is not None and
+ not lat_coord.circular and
+ lat_coord.has_bounds()):
+ pp.lbcode = 10000 + int(100*10) + 1
+ pp.bgor = 0
+ pp.y = air_pres_coord.points
+ pp.y_lower_bound = air_pres_coord.bounds[:, 0]
+ pp.y_upper_bound = air_pres_coord.bounds[:, 1]
+ pp.x = lat_coord.points
+ pp.x_lower_bound = lat_coord.bounds[:, 0]
+ pp.x_upper_bound = lat_coord.bounds[:, 1]
+ pp.lbrow = air_pres_coord.shape[0]
+ pp.lbnpt = lat_coord.shape[0]
+ pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
+
+ # Non-standard cross-section with bounds - x=latitude, y=depth.
+ if (depth_coord is not None and
+ not depth_coord.circular and
+ depth_coord.has_bounds() and
+ lat_coord is not None and
+ not lat_coord.circular and
+ lat_coord.has_bounds()):
+ pp.lbcode = 10000 + int(100*10) + 4
+ pp.bgor = 0
+ pp.y = depth_coord.points
+ pp.y_lower_bound = depth_coord.bounds[:, 0]
+ pp.y_upper_bound = depth_coord.bounds[:, 1]
+ pp.x = lat_coord.points
+ pp.x_lower_bound = lat_coord.bounds[:, 0]
+ pp.x_upper_bound = lat_coord.bounds[:, 1]
+ pp.lbrow = depth_coord.shape[0]
+ pp.lbnpt = lat_coord.shape[0]
+ pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
+
+ # Non-standard cross-section with bounds - x=latitude, y=eta.
+ if (eta_coord is not None and
+ not eta_coord.circular and
+ eta_coord.has_bounds() and
+ lat_coord is not None and
+ not lat_coord.circular and
+ lat_coord.has_bounds()):
+ pp.lbcode = 10000 + int(100*10) + 3
+ pp.bgor = 0
+ pp.y = eta_coord.points
+ pp.y_lower_bound = eta_coord.bounds[:, 0]
+ pp.y_upper_bound = eta_coord.bounds[:, 1]
+ pp.x = lat_coord.points
+ pp.x_lower_bound = lat_coord.bounds[:, 0]
+ pp.x_upper_bound = lat_coord.bounds[:, 1]
+ pp.lbrow = eta_coord.shape[0]
+ pp.lbnpt = lat_coord.shape[0]
+ pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
+
+ # Non-standard cross-section with bounds - x=days (360 calendar), y=depth.
+ if (depth_coord is not None and
+ not depth_coord.circular and
+ depth_coord.has_bounds() and
+ time_coord is not None and
+ not time_coord.circular and
+ time_coord.has_bounds()):
+ pp.lbcode = 10000 + int(100*23) + 4
+ pp.bgor = 0
+ pp.y = depth_coord.points
+ pp.y_lower_bound = depth_coord.bounds[:, 0]
+ pp.y_upper_bound = depth_coord.bounds[:, 1]
+ pp.x = time_coord.points
+ pp.x_lower_bound = time_coord.bounds[:, 0]
+ pp.x_upper_bound = time_coord.bounds[:, 1]
+ pp.lbrow = depth_coord.shape[0]
+ pp.lbnpt = time_coord.shape[0]
+ pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
+
+ # Non-standard cross-section with bounds -
+ # x=days (360 calendar), y=air_pressure.
+ if (air_pres_coord is not None and
+ not air_pres_coord.circular and
+ air_pres_coord.has_bounds() and
+ time_coord is not None and
+ not time_coord.circular and
+ time_coord.has_bounds()):
+ pp.lbcode = 10000 + int(100*23) + 1
+ pp.bgor = 0
+ pp.y = air_pres_coord.points
+ pp.y_lower_bound = air_pres_coord.bounds[:, 0]
+ pp.y_upper_bound = air_pres_coord.bounds[:, 1]
+ pp.x = time_coord.points
+ pp.x_lower_bound = time_coord.bounds[:, 0]
+ pp.x_upper_bound = time_coord.bounds[:, 1]
+ pp.lbrow = air_pres_coord.shape[0]
+ pp.lbnpt = time_coord.shape[0]
+ pp.bzx = pp.bzy = pp.bdx = pp.bdy = 0
+
+ return pp
+
+
+def _lbproc_rules(cube, pp):
+ """
+ Rules for setting the horizontal grid and pole location of the PP field.
+
+ Note: `pp.lbproc` must be set to 0 before these rules are run.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ # Basic setting (this may be overridden by subsequent rules).
+ pp.lbproc = 0
+
+ if cube.attributes.get("ukmo__process_flags", None):
+ pp.lbproc += sum([LBPROC_MAP[name]
+ for name in cube.attributes["ukmo__process_flags"]])
+
+ # Zonal-mean: look for a CellMethod which is a "mean" over "longitude" or
+ # "grid_longitude".
+ if (scalar_cell_method(cube, 'mean', 'longitude') is not None or
+ scalar_cell_method(cube, 'mean', 'grid_longitude') is not None):
+ pp.lbproc += 64
+
+ # Time-mean: look for a CellMethod which is a "mean" over "time".
+ if scalar_cell_method(cube, 'mean', 'time') is not None:
+ pp.lbproc += 128
+
+ # Time-minimum: look for a CellMethod which is a "minimum" over "time".
+ if scalar_cell_method(cube, 'minimum', 'time') is not None:
+ pp.lbproc += 4096
+
+ # Time-maximum: look for a CellMethod which is a "maximum" over "time".
+ if scalar_cell_method(cube, 'maximum', 'time') is not None:
+ pp.lbproc += 8192
+
+ return pp
+
+
+def _vertical_rules(cube, pp):
+ """
+ Rules for setting vertical levels for the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ # Define commonly-used coords.
+ air_pres_coord = scalar_coord(cube, 'air_pressure')
+ apt_coord = scalar_coord(cube, 'air_potential_temperature')
+ depth_coord = scalar_coord(cube, 'depth')
+ height_coord = scalar_coord(cube, 'height')
+ level_height_coord = scalar_coord(cube, 'level_height')
+ mln_coord = scalar_coord(cube, 'model_level_number')
+ pressure_coord = scalar_coord(cube, 'pressure')
+ pseudo_level_coord = scalar_coord(cube, 'pseudo_level')
+ sigma_coord = scalar_coord(cube, 'sigma')
+ soil_mln_coord = scalar_coord(cube, 'soil_model_level_number')
+
+ # Define commonly-used aux factories.
+ try:
+ height_factory = aux_factory(cube, HybridHeightFactory)
+ except ValueError:
+ height_factory = None
+ try:
+ pressure_factory = aux_factory(cube, HybridPressureFactory)
+ except ValueError:
+ pressure_factory = None
+
+ # Set `lbuser[5]`.
+ if (pseudo_level_coord is not None and
+ not pseudo_level_coord.bounds):
+ pp.lbuser[4] = pseudo_level_coord.points[0]
+
+ # Single height level.
+ if (height_coord is not None and
+ not height_coord.bounds and
+ height_coord.points[0] == 1.5 and
+ cube.name() == 'air_temperature'):
+ pp.lbvc = 129
+ pp.blev = -1
+
+ if pp.lbvc == 0 and height_coord is not None and not height_coord.bounds:
+ pp.lbvc = 1
+ pp.blev = cube.coord('height').points[0]
+
+ # Single air_pressure level.
+ if air_pres_coord is not None and not air_pres_coord.bounds:
+ pp.lbvc = 8
+ pp.blev = air_pres_coord.points[0]
+
+ # Single pressure level.
+ if pressure_coord is not None and not pressure_coord.bounds:
+ pp.lbvc = 8
+ pp.blev = pressure_coord.points[0]
+
+ # Single depth level (non cross-section).
+ if (mln_coord is not None and
+ not mln_coord.bounds and
+ depth_coord is not None and
+ not depth_coord.bounds):
+ pp.lbvc = 2
+ pp.lblev = mln_coord.points[0]
+ pp.blev = depth_coord.points[0]
+
+ # Single depth level (Non-dimensional soil model level).
+ if (soil_mln_coord is not None and
+ not soil_mln_coord.has_bounds() and
+ air_pres_coord is None and
+ depth_coord is None and
+ height_coord is None and
+ pressure_coord is None and
+ cube.standard_name is not None and
+ 'soil' in cube.standard_name):
+ pp.lbvc = 6
+ pp.lblev = soil_mln_coord.points[0]
+ pp.blev = pp.lblev
+ pp.brsvd[0] = 0
+ pp.brlev = 0
+
+ # Single depth level (soil depth).
+ if (depth_coord is not None and
+ depth_coord.has_bounds() and
+ air_pres_coord is None and
+ soil_mln_coord is None and
+ mln_coord is None and
+ height_coord is None and
+ pressure_coord is None and
+ cube.standard_name is not None and
+ 'soil' in cube.standard_name):
+ pp.lbvc = 6
+ pp.blev = depth_coord.points[0]
+ pp.brsvd[0] = depth_coord.bounds[0, 0]
+ pp.brlev = depth_coord.bounds[0, 1]
+
+ # Single potential-temperature level.
+ if (apt_coord is not None and
+ not apt_coord.bounds and
+ air_pres_coord is None and
+ depth_coord is None and
+ height_coord is None and
+ pressure_coord is None and
+ mln_coord is None):
+ pp.lbvc = 19
+ pp.lblev = apt_coord.points[0]
+ pp.blev = apt_coord.points[0]
+
+ # Single hybrid_height level
+ # (without aux factory e.g. due to missing orography).
+ if (not has_aux_factory(cube, HybridHeightFactory) and
+ mln_coord is not None and
+ mln_coord.bounds is None and
+ level_height_coord is not None and
+ level_height_coord.bounds is not None and
+ sigma_coord is not None and
+ sigma_coord.bounds is not None):
+ pp.lbvc = 65
+ pp.lblev = mln_coord.points[0]
+ pp.blev = level_height_coord.points[0]
+ pp.brlev = level_height_coord.bounds[0, 0]
+ pp.brsvd[0] = level_height_coord.bounds[0, 1]
+ pp.bhlev = sigma_coord.points[0]
+ pp.bhrlev = sigma_coord.bounds[0, 0]
+ pp.brsvd[1] = sigma_coord.bounds[0, 1]
+
+ # Single hybrid_height level (with aux factory).
+ if (has_aux_factory(cube, HybridHeightFactory) and
+ mln_coord is not None and
+ mln_coord.bounds is None and
+ height_factory.dependencies['delta'] is not None and
+ height_factory.dependencies['delta'].bounds is not None and
+ height_factory.dependencies['sigma'] is not None and
+ height_factory.dependencies['sigma'].bounds is not None):
+ pp.lbvc = 65
+ pp.lblev = mln_coord.points[0]
+ pp.blev = height_factory.dependencies['delta'].points[0]
+ pp.brlev = height_factory.dependencies['delta'].bounds[0, 0]
+ pp.brsvd[0] = height_factory.dependencies['delta'].bounds[0, 1]
+ pp.bhlev = height_factory.dependencies['sigma'].points[0]
+ pp.bhrlev = height_factory.dependencies['sigma'].bounds[0, 0]
+ pp.brsvd[1] = height_factory.dependencies['sigma'].bounds[0, 1]
+
+ # Single hybrid pressure level.
+ if (has_aux_factory(cube, HybridPressureFactory) and
+ mln_coord is not None and
+ mln_coord.bounds is None and
+ pressure_factory.dependencies['delta'] is not None and
+ pressure_factory.dependencies['delta'].bounds is not None and
+ pressure_factory.dependencies['sigma'] is not None and
+ pressure_factory.dependencies['sigma'].bounds is not None):
+ pp.lbvc = 9
+ pp.lblev = mln_coord.points[0]
+ pp.blev = pressure_factory.dependencies['sigma'].points[0]
+ pp.brlev = pressure_factory.dependencies['sigma'].bounds[0, 0]
+ pp.brsvd[0] = pressure_factory.dependencies['sigma'].bounds[0, 1]
+ pp.bhlev = pressure_factory.dependencies['delta'].points[0]
+ pp.bhrlev = pressure_factory.dependencies['delta'].bounds[0, 0]
+ pp.brsvd[1] = pressure_factory.dependencies['delta'].bounds[0, 1]
+
+ return pp
+
+
+def _all_other_rules(cube, pp):
+ """
+ Rules for setting the horizontal grid and pole location of the PP field.
+
+ Args:
+ cube: the cube being saved as a series of PP fields.
+ pp: the current PP field having save rules applied.
+
+ Returns:
+ The PP field with updated metadata.
+
+ """
+ # "CFNAME mega-rule."
+ check_items = (cube.standard_name, cube.long_name, str(cube.units))
+ if check_items in CF_TO_LBFC:
+ pp.lbfc = CF_TO_LBFC[check_items]
+
+ # Set STASH code.
+ if ('STASH' in cube.attributes and
+ str(cube.attributes['STASH']) in STASH_TRANS):
+ pp.lbfc = STASH_TRANS[str(cube.attributes['STASH'])].field_code
+
+ return pp
+
+
+def verify(cube, field):
+ # Rules functions.
+ field = _basic_coord_system_rules(cube, field)
+ field = _um_version_rules(cube, field)
+ field = _stash_rules(cube, field)
+ field = _general_time_rules(cube, field)
+ field = _calendar_rules(cube, field)
+ field = _grid_and_pole_rules(cube, field)
+ field = _non_std_cross_section_rules(cube, field)
+ field = _lbproc_rules(cube, field)
+ field = _vertical_rules(cube, field)
+ field = _all_other_rules(cube, field)
+
+ return field
+
+
+# Helper functions used when running the rules.
+
+def _conditional_warning(condition, warning):
+ if condition:
+ warnings.warn(warning)
diff --git a/lib/iris/fileformats/rules.py b/lib/iris/fileformats/rules.py
index b101604b74..86e134d51f 100644
--- a/lib/iris/fileformats/rules.py
+++ b/lib/iris/fileformats/rules.py
@@ -23,29 +23,16 @@
from six.moves import (filter, input, map, range, zip) # noqa
import six
-import abc
import collections
-from contextlib import contextmanager
-import getpass
-import logging
-import logging.handlers as handlers
-import os
-import os.path
-import platform
-import sys
import warnings
import cf_units
-from iris._deprecation import warn_deprecated
-from iris.analysis._interpolate_private import linear as regrid_linear
-import iris.config as config
+from iris.analysis import Linear
import iris.cube
import iris.exceptions
import iris.fileformats.um_cf_map
-from iris.util import is_regular, regular_step
-RuleResult = collections.namedtuple('RuleResult', ['cube', 'matching_rules', 'factories'])
Factory = collections.namedtuple('Factory', ['factory_class', 'args'])
ReferenceTarget = collections.namedtuple('ReferenceTarget',
('name', 'transform'))
@@ -89,224 +76,6 @@ def as_cube(self):
return self._final_cube
-# Controls the deferred import of all the symbols from iris.coords.
-# This "import all" is used as the rules file does not use fully qualified class names.
-_rules_globals = None
-_import_pending = True
-def _rules_execution_environment():
- """
- Return an environment with the globals needed for rules code execution.
-
- This is needed as the rules file does not use fully qualified class names.
- If something is needed for rules execution, it can be added here.
-
- A master environment is built only when needed (the first call).
- This allows the import of various modules to be deferred, so we don't load
- all of those when we merely import this module.
-
- """
- global _import_pending, _rules_globals
- if _import_pending:
- # Get all module globals, and add other deferred imports.
- import iris.aux_factory
- import iris.coords
- import iris.coord_systems
- import iris.fileformats.um_cf_map
- # Take a copy of all this module's globals.
- _rules_globals = globals().copy()
- # Add various other stuff.
- # NOTE: these are equivalent to "from xx import *": not tidy !
- _rules_globals.update(iris.aux_factory.__dict__)
- _rules_globals.update(iris.coords.__dict__)
- _rules_globals.update(iris.coord_systems.__dict__)
- _rules_globals.update(iris.fileformats.um_cf_map.__dict__)
- _rules_globals.update(cf_units.__dict__)
- _import_pending = False
-
- return _rules_globals.copy()
-
-
-# Dummy logging routine for when we don't want to do any logging.
-def _dummy_log(format, filename, rules):
- pass
-
-
-# Genuine logging routine
-def _real_log(format, filename, rules):
- # Replace "\" with "\\", and "," with "\,"
- filename = filename.replace('\\', '\\\\').replace(',', '\\,')
- _rule_logger.info("%s,%s,%s" % (format, filename, ','.join([rule.id for rule in rules])))
-
-
-# Debug logging routine (more informative that just object ids)
-def _verbose_log(format, filename, rules):
- # Replace "\" with "\\", and "," with "\,"
- filename = filename.replace('\\', '\\\\').replace(',', '\\,')
- _rule_logger.info("\n\n-----\n\n%s,%s,%s" % (format, filename, '\n\n'.join([str(rule) for rule in rules])))
-
-
-# Prepares a logger for file-based logging of rule usage
-def _prepare_rule_logger(verbose=False, log_dir=None):
- # Default to the dummy logger that does nothing
- logger = _dummy_log
-
- # read the log_dir from the config file unless the log_dir argument is set
- if log_dir is None:
- log_dir = config.RULE_LOG_DIR
- # Only do real logging if we've been told the directory to use ...
- if log_dir is not None:
- user = getpass.getuser()
-
- # .. and if we haven't been told to ignore the current invocation.
- ignore = False
- ignore_users = config.RULE_LOG_IGNORE
- if ignore_users is not None:
- ignore_users = ignore_users.split(',')
- ignore = user in ignore_users
-
- if not ignore:
- try:
- hostname = platform.node() or 'UNKNOWN'
- log_path = os.path.join(log_dir, '_'.join([hostname, user]))
- file_handler = handlers.RotatingFileHandler(log_path, maxBytes=1e7, backupCount=5)
- format = '%%(asctime)s,%s,%%(message)s' % getpass.getuser()
- file_handler.setFormatter(logging.Formatter(format, '%Y-%m-%d %H:%M:%S'))
-
- global _rule_logger
- _rule_logger = logging.getLogger('iris.fileformats.rules')
- _rule_logger.setLevel(logging.INFO)
- _rule_logger.addHandler(file_handler)
- _rule_logger.propagate = False
-
- if verbose:
- logger = _verbose_log
- else:
- logger = _real_log
-
- except IOError:
- # If we can't create the log file for some reason then it's fine to just silently
- # ignore the error and fallback to using the dummy logging routine.
- pass
-
- return logger
-
-
-# A flag to control all the text-rules and rules-logging deprecation warnings.
-_enable_rules_deprecations = True
-
-# A context manager to avoid the deprecation warnings for internal calls.
-@contextmanager
-def _disable_deprecation_warnings():
- global _enable_rules_deprecations
- old_flag_value = _enable_rules_deprecations
- try:
- _enable_rules_deprecations = False
- yield
- finally:
- _enable_rules_deprecations = old_flag_value
-
-
-# Defines the "log" function for this module
-# This is a 'private' version: The public one is now deprecated (see on).
-_log_rules = _prepare_rule_logger()
-
-
-# Provide a public 'log' function, which issues a deprecation warning.
-def log(*args, **kwargs):
- if _enable_rules_deprecations:
- warn_deprecated(
- "The `iris.fileformats.rules.log()` method is deprecated.")
- return _log_rules(*args, **kwargs)
-
-
-class DebugString(str):
- """
- Used by the rules for debug purposes
-
- .. deprecated:: 1.10
-
- """
- def __init__(self, *args, **kwargs):
- warn_deprecated(
- "the `iris.fileformats.rules.DebugString class is deprecated.")
- super(DebugString, self).__init__(*args, **kwargs)
-
-
-class CMAttribute(object):
- """
- Used by the rules for defining attributes on the Cube in a consistent manner.
-
- .. deprecated:: 1.10
-
- """
- __slots__ = ('name', 'value')
- def __init__(self, name, value):
- warn_deprecated(
- "the `iris.fileformats.rules.CmAttribute class is deprecated.")
- self.name = name
- self.value = value
-
-
-class CMCustomAttribute(object):
- """
- Used by the rules for defining custom attributes on the Cube in a consistent manner.
-
- .. deprecated:: 1.10
-
- """
- __slots__ = ('name', 'value')
- def __init__(self, name, value):
- warn_deprecated(
- "the `iris.fileformats.rules.CmCustomAttribute class is "
- "deprecated.")
- self.name = name
- self.value = value
-
-
-class CoordAndDims(object):
- """
- Used within rules to represent a mapping of coordinate to data dimensions.
-
- .. deprecated:: 1.10
-
- """
- def __init__(self, coord, dims=None):
- warn_deprecated(
- "the `iris.fileformats.rules.CoordAndDims class is deprecated.")
- self.coord = coord
- if dims is None:
- dims = []
- if not isinstance(dims, list):
- dims = [dims]
- self.dims = dims
-
- def add_coord(self, cube):
- added = False
-
- # Try to add to dim_coords?
- if isinstance(self.coord, iris.coords.DimCoord) and self.dims:
- if len(self.dims) > 1:
- raise Exception("Only 1 dim allowed for a DimCoord")
-
- # Does the cube already have a coord for this dim?
- already_taken = False
- for coord, coord_dim in cube._dim_coords_and_dims:
- if coord_dim == self.dims[0]:
- already_taken = True
- break
-
- if not already_taken:
- cube.add_dim_coord(self.coord, self.dims[0])
- added = True
-
- # If we didn't add it to dim_coords, add it to aux_coords.
- if not added:
- cube.add_aux_coord(self.coord, self.dims)
-
- def __repr__(self):
- return "" % (self.coord.name, self.dims)
-
-
class Reference(iris.util._OrderedHashable):
_names = ('name',)
"""
@@ -315,367 +84,6 @@ class Reference(iris.util._OrderedHashable):
"""
-def calculate_forecast_period(time, forecast_reference_time):
- """
- Return the forecast period in hours derived from time and
- forecast_reference_time scalar coordinates.
-
- .. deprecated:: 1.10
-
- """
- warn_deprecated("the `iris.fileformats.rules.calculate_forecast_period "
- "method is deprecated.")
-
- if time.points.size != 1:
- raise ValueError('Expected a time coordinate with a single '
- 'point. {!r} has {} points.'.format(time.name(),
- time.points.size))
-
- if not time.has_bounds():
- raise ValueError('Expected a time coordinate with bounds.')
-
- if forecast_reference_time.points.size != 1:
- raise ValueError('Expected a forecast_reference_time coordinate '
- 'with a single point. {!r} has {} '
- 'points.'.format(forecast_reference_time.name(),
- forecast_reference_time.points.size))
-
- origin = time.units.origin.replace(time.units.origin.split()[0], 'hours')
- units = cf_units.Unit(origin, calendar=time.units.calendar)
-
- # Determine start and eof of period in hours since a common epoch.
- end = time.units.convert(time.bounds[0, 1], units)
- start = forecast_reference_time.units.convert(
- forecast_reference_time.points[0], units)
- forecast_period = end - start
-
- return forecast_period
-
-
-class Rule(object):
- """
- A collection of condition expressions and their associated action expressions.
-
- Example rule::
-
- IF
- f.lbuser[6] == 2
- f.lbuser[3] == 101
- THEN
- CMAttribute('standard_name', 'sea_water_potential_temperature')
- CMAttribute('units', 'Celsius')
-
- .. deprecated:: 1.10
-
- """
- def __init__(self, conditions, actions):
- """Create instance methods from our conditions and actions."""
- if _enable_rules_deprecations:
- warn_deprecated(
- "the `iris.fileformats.rules.Rule class is deprecated.")
- if not hasattr(conditions, '__iter__'):
- raise TypeError('Variable conditions should be iterable, got: '+ type(conditions))
- if not hasattr(actions, '__iter__'):
- raise TypeError('Variable actions should be iterable, got: '+ type(actions))
-
- self._conditions = conditions
- self._actions = actions
- self._exec_actions = []
-
- self.id = str(hash((tuple(self._conditions), tuple(self._actions))))
-
- for i, condition in enumerate(conditions):
- self._conditions[i] = condition
-
- # Create the conditions method.
- self._create_conditions_method()
-
- # Create the action methods.
- for i, action in enumerate(self._actions):
- if not action:
- action = 'None'
- self._create_action_method(i, action)
-
- def _create_conditions_method(self):
- # Bundle all the conditions into one big string.
- conditions = '(%s)' % ') and ('.join(self._conditions)
- if not conditions:
- conditions = 'None'
- # Create a method to evaluate the conditions.
- # NB. This creates the name '_f' in the 'compile_locals' namespace,
- # which is then used below.
- code = 'def _f(self, field, f, pp, grib, cm): return %s' % conditions
- rules_globals = _rules_execution_environment()
- compile_locals = {}
- exec(compile(code, '', 'exec'), rules_globals, compile_locals)
- # Make it a method of ours.
- _f = compile_locals['_f']
- self._exec_conditions = six.create_bound_method(_f, self)
-
- @abc.abstractmethod
- def _create_action_method(self, i, action):
- pass
-
- @abc.abstractmethod
- def _process_action_result(self, obj, cube):
- pass
-
- def __repr__(self):
- string = "IF\n"
- string += '\n'.join(self._conditions)
- string += "\nTHEN\n"
- string += '\n'.join(self._actions)
- return string
-
- def evaluates_true(self, cube, field):
- """Returns True if and only if all the conditions evaluate to True for the given field."""
- field = field
- f = field
- pp = field
- grib = field
- cm = cube
-
- try:
- result = self._exec_conditions(field, f, pp, grib, cm)
- except Exception as err:
- print('Condition failed to run conditions: %s : %s' % (self._conditions, err), file=sys.stderr)
- raise err
-
- return result
-
- def _matches_field(self, field):
- """Simple wrapper onto evaluates_true in the case where cube is None."""
- return self.evaluates_true(None, field)
-
- def run_actions(self, cube, field):
- """
- Adds to the given cube based on the return values of all the actions.
-
- """
- # Define the variables which the eval command should be able to see
- f = field
- pp = field
- grib = field
- cm = cube
-
- factories = []
- for i, action in enumerate(self._actions):
- try:
- # Run this action.
- obj = self._exec_actions[i](field, f, pp, grib, cm)
- # Process the return value (if any), e.g a CM object or None.
- action_factory = self._process_action_result(obj, cube)
- if action_factory:
- factories.append(action_factory)
-
- except iris.exceptions.CoordinateNotFoundError as err:
- print('Failed (msg:%(error)s) to find coordinate, perhaps consider running last: %(command)s' % {'command':action, 'error': err}, file=sys.stderr)
- except AttributeError as err:
- print('Failed to get value (%(error)s) to execute: %(command)s' % {'command':action, 'error': err}, file=sys.stderr)
- except Exception as err:
- print('Failed (msg:%(error)s) to run:\n %(command)s\nFrom the rule:\n%(me)r' % {'me':self, 'command':action, 'error': err}, file=sys.stderr)
- raise err
-
- return factories
-
-
-class FunctionRule(Rule):
- """
- A Rule with values returned by its actions.
-
- .. deprecated:: 1.10
-
- """
- def _create_action_method(self, i, action):
- # CM loading style action. Returns an object, such as a coord.
- # Compile a new method for the operation.
- rules_globals = _rules_execution_environment()
- compile_locals = {}
- exec(
- compile(
- 'def _f(self, field, f, pp, grib, cm): return %s' % (action, ),
- '',
- 'exec'),
- rules_globals, compile_locals)
- # Make it a method of ours.
- _f = compile_locals['_f']
- method = six.create_bound_method(_f, self)
- setattr(self, '_exec_action_%d' % (i, ), method)
- # Add to our list of actions.
- self._exec_actions.append(method)
-
- def _process_action_result(self, obj, cube):
- """Process the result of an action."""
-
- factory = None
-
- # NB. The names such as 'CoordAndDims' and 'CellMethod' are defined by
- # the "deferred import" performed by Rule.run_actions() above.
- if isinstance(obj, CoordAndDims):
- obj.add_coord(cube)
-
- #cell methods - not yet implemented
- elif isinstance(obj, CellMethod):
- cube.add_cell_method(obj)
-
- elif isinstance(obj, CMAttribute):
- # Temporary code to deal with invalid standard names from the translation table.
- # TODO: when name is "standard_name" force the value to be a real standard name
- if obj.name == 'standard_name' and obj.value is not None:
- cube.rename(obj.value)
- elif obj.name == 'units':
- # Graceful loading of units.
- try:
- setattr(cube, obj.name, obj.value)
- except ValueError:
- msg = 'Ignoring PP invalid units {!r}'.format(obj.value)
- warnings.warn(msg)
- cube.attributes['invalid_units'] = obj.value
- cube.units = cf_units._UNKNOWN_UNIT_STRING
- else:
- setattr(cube, obj.name, obj.value)
-
- elif isinstance(obj, CMCustomAttribute):
- cube.attributes[obj.name] = obj.value
-
- elif isinstance(obj, Factory):
- factory = obj
-
- elif isinstance(obj, DebugString):
- print(obj)
-
- # The function returned nothing, like the pp save actions, "lbft = 3"
- elif obj is None:
- pass
-
- else:
- raise Exception("Object could not be added to cube. Unknown type: " + obj.__class__.__name__)
-
- return factory
-
-
-class ProcedureRule(Rule):
- """
- A Rule with nothing returned by its actions.
-
- .. deprecated:: 1.10
-
- """
- def _create_action_method(self, i, action):
- # PP saving style action. No return value, e.g. "pp.lbft = 3".
- rules_globals = _rules_execution_environment()
- compile_locals = {}
- exec(compile('def _f(self, field, f, pp, grib, cm): %s' % (action, ),
- '',
- 'exec'),
- rules_globals, compile_locals)
- # Make it a method of ours.
- _f = compile_locals['_f']
- method = six.create_bound_method(_f, self)
- setattr(self, '_exec_action_%d' % (i, ), method)
- # Add to our list of actions.
- self._exec_actions.append(method)
-
- def _process_action_result(self, obj, cube):
- # This should always be None, as our rules won't create anything.
- pass
-
- def conditional_warning(self, condition, warning):
- pass # without this pass statement it alsp print, " Args:" on a new line.
- if condition:
- warnings.warn(warning)
-
-
-class RulesContainer(object):
- """
- A collection of :class:`Rule` instances, with the ability to read rule
- definitions from files and run the rules against given fields.
-
- .. deprecated:: 1.10
-
- """
- def __init__(self, filepath=None, rule_type=FunctionRule):
- """Create a new rule set, optionally adding rules from the specified file.
-
- The rule_type defaults to :class:`FunctionRule`,
- e.g for CM loading actions that return objects, such as *AuxCoord(...)*
-
- rule_type can also be set to :class:`ProcedureRule`
- e.g for PP saving actions that do not return anything, such as *pp.lbuser[3] = 16203*
- """
- if _enable_rules_deprecations:
- warn_deprecated(
- "the `iris.fileformats.rules.RulesContainer class is deprecated.")
- self._rules = []
- self.rule_type = rule_type
- if filepath is not None:
- self.import_rules(filepath)
-
- def import_rules(self, filepath):
- """Extend the rule collection with the rules defined in the specified file."""
- # Define state constants
- IN_CONDITION = 1
- IN_ACTION = 2
-
- rule_file = os.path.expanduser(filepath)
- conditions = []
- actions = []
- state = None
-
- with open(rule_file, 'r') as file:
- for line in file:
- line = line.rstrip()
- if line == "IF":
- if conditions and actions:
- self._rules.append(self.rule_type(conditions, actions))
- conditions = []
- actions = []
- state = IN_CONDITION
- elif line == "THEN":
- state = IN_ACTION
- elif len(line) == 0:
- pass
- elif line.strip().startswith('#'):
- pass
- elif state == IN_CONDITION:
- conditions.append(line)
- elif state == IN_ACTION:
- actions.append(line)
- else:
- raise Exception('Rule file not read correctly at line: ' +
- line)
- if conditions and actions:
- self._rules.append(self.rule_type(conditions, actions))
-
- def verify(self, cube, field):
- """
- Add to the given :class:`iris.cube.Cube` by running this set of
- rules with the given field.
-
- Args:
-
- * cube:
- An instance of :class:`iris.cube.Cube`.
- * field:
- A field object relevant to the rule set.
-
- Returns: (cube, matching_rules)
-
- * cube - the resultant cube
- * matching_rules - a list of rules which matched
-
- """
- matching_rules = []
- factories = []
- for rule in self._rules:
- if rule.evaluates_true(cube, field):
- matching_rules.append(rule)
- rule_factories = rule.run_actions(cube, field)
- if rule_factories:
- factories.extend(rule_factories)
- return RuleResult(cube, matching_rules, factories)
-
-
def scalar_coord(cube, coord_name):
"""Try to find a single-valued coord with the given name."""
found_coord = None
@@ -780,7 +188,7 @@ def _dereference_args(factory, reference_targets, regrid_cache, cube):
def _regrid_to_target(src_cube, target_coords, target_cube):
# Interpolate onto the target grid.
sample_points = [(coord, coord.points) for coord in target_coords]
- result_cube = regrid_linear(src_cube, sample_points)
+ result_cube = src_cube.interpolate(sample_points, Linear())
# Any scalar coords on the target_cube will have become vector
# coords on the resample src_cube (i.e. result_cube).
@@ -848,10 +256,9 @@ def _ensure_aligned(regrid_cache, src_cube, target_cube):
_loader_attrs = ('field_generator', 'field_generator_kwargs',
- 'converter', 'legacy_custom_rules')
+ 'converter')
class Loader(collections.namedtuple('Loader', _loader_attrs)):
- def __new__(cls, field_generator, field_generator_kwargs, converter,
- legacy_custom_rules=None):
+ def __new__(cls, field_generator, field_generator_kwargs, converter):
"""
Create a definition of a field-based Cube loader.
@@ -867,21 +274,9 @@ def __new__(cls, field_generator, field_generator_kwargs, converter,
* converter
A callable that converts a field object into a Cube.
- Kwargs:
-
- * legacy_custom_rules
- An object with a callable `verify` attribute with two
- parameters: (cube, field). Legacy method for modifying
- Cubes during the load process. Default is None.
-
- .. deprecated:: 1.9
-
"""
- if legacy_custom_rules is not None:
- warn_deprecated('The `legacy_custom_rules` attribute is '
- 'deprecated.')
return tuple.__new__(cls, (field_generator, field_generator_kwargs,
- converter, legacy_custom_rules))
+ converter))
ConversionMetadata = collections.namedtuple('ConversionMetadata',
@@ -1021,13 +416,7 @@ def _generate_all_fields_and_filenames():
yield (field, filename)
def loadcubes_user_callback_wrapper(cube, field, filename):
- # First run any custom user-provided rules.
- if loader.legacy_custom_rules:
- warn_deprecated('The `legacy_custom_rules` attribute of '
- 'the `loader` is deprecated.')
- loader.legacy_custom_rules.verify(cube, field)
-
- # Then also run user-provided original callback function.
+ # Run user-provided original callback function.
result = cube
if user_callback is not None:
result = user_callback(cube, field, filename)
@@ -1039,4 +428,3 @@ def loadcubes_user_callback_wrapper(cube, field, filename):
converter=loader.converter,
user_callback_wrapper=loadcubes_user_callback_wrapper):
yield cube
-
diff --git a/lib/iris/fileformats/um/_fast_load.py b/lib/iris/fileformats/um/_fast_load.py
index 9f377af548..94255e8f18 100644
--- a/lib/iris/fileformats/um/_fast_load.py
+++ b/lib/iris/fileformats/um/_fast_load.py
@@ -129,11 +129,12 @@ def _convert_collation(collation):
"""
from iris.fileformats.rules import ConversionMetadata
- from iris.fileformats.pp_rules import (_convert_time_coords,
- _convert_vertical_coords,
- _convert_scalar_realization_coords,
- _convert_scalar_pseudo_level_coords,
- _all_other_rules)
+ from iris.fileformats.pp_load_rules import \
+ (_convert_time_coords,
+ _convert_vertical_coords,
+ _convert_scalar_realization_coords,
+ _convert_scalar_pseudo_level_coords,
+ _all_other_rules)
# For all the scalar conversions, all fields in the collation will
# give the same result, so the choice is arbitrary.
diff --git a/lib/iris/fileformats/um/_fast_load_structured_fields.py b/lib/iris/fileformats/um/_fast_load_structured_fields.py
index 1192394134..b9f596fd0e 100644
--- a/lib/iris/fileformats/um/_fast_load_structured_fields.py
+++ b/lib/iris/fileformats/um/_fast_load_structured_fields.py
@@ -283,7 +283,7 @@ def group_structured_fields(field_iterator):
* the same for all fields,
* completely irrelevant, or
* used by a vectorised rule function (such as
- :func:`iris.fileformats.pp_rules._convert_vector_time_coords`).
+ :func:`iris.fileformats.pp_load_rules._convert_time_coords`).
Returns:
A generator of FieldCollation objects, each of which contains a single
diff --git a/lib/iris/pandas.py b/lib/iris/pandas.py
index 64334f4818..164fdd7859 100644
--- a/lib/iris/pandas.py
+++ b/lib/iris/pandas.py
@@ -143,15 +143,18 @@ def _assert_shared(np_obj, pandas_obj):
if base is None:
base = pandas_obj.values
- # Chase the stack of NumPy `base` references back to see if any of
- # them are our original array.
- while base is not None:
- if base is np_obj:
- return
- # Take the next step up the stack of `base` references.
- base = base.base
- msg = 'Pandas {} does not share memory'.format(type(pandas_obj).__name__)
- raise AssertionError(msg)
+ def _get_base(array):
+ # Chase the stack of NumPy `base` references back to the original array
+ while array.base is not None:
+ array = array.base
+ return array
+
+ base = _get_base(base)
+ np_base = _get_base(np_obj)
+ if base is not np_base:
+ msg = ('Pandas {} does not share memory'
+ .format(type(pandas_obj).__name__))
+ raise AssertionError(msg)
def as_series(cube, copy=True):
diff --git a/lib/iris/proxy.py b/lib/iris/proxy.py
deleted file mode 100644
index 506995369c..0000000000
--- a/lib/iris/proxy.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-.. deprecated:: 1.9
- This module has been deprecated. Please use lazy imports instead.
-
-Provision of a service to handle missing packages at runtime.
-Current just a very thin layer but gives the option to extend
-handling as much as needed
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-import sys
-
-from iris._deprecation import warn_deprecated
-
-
-warn_deprecated('iris.proxy is deprecated in Iris v1.9. Please use lazy '
- 'imports instead.')
-
-
-class FakeModule(object):
- __slots__ = ('_name',)
-
- def __init__(self, name):
- self._name = name
-
- def __setattr__(self, name, value):
- object.__setattr__(self, name, value)
-
- def __getattr__(self, name):
- raise AttributeError(
- 'Module "{}" not available or not installed'.format(self._name))
-
-
-def apply_proxy(module_name, dic):
- """
- Attempt the import else use the proxy module.
- It is important to note that '__import__()' must be used
- instead of the higher-level 'import' as we need to
- ensure the scope of the import can be propagated out of this package.
- Also, note the splitting of name - this is because '__import__()'
- requires full package path, unlike 'import' (this issue is
- explicitly seen in lib/iris/fileformats/pp.py importing pp_packing)
-
- """
- name = module_name.split('.')[-1]
- try:
- __import__(module_name)
- dic[name] = sys.modules[module_name]
- except ImportError:
- dic[name] = sys.modules[name] = FakeModule(name)
diff --git a/lib/iris/tests/analysis/test_interpolate.py b/lib/iris/tests/analysis/test_interpolate.py
deleted file mode 100644
index ece73d16d1..0000000000
--- a/lib/iris/tests/analysis/test_interpolate.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Test the iris.analysis.interpolate module.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import iris tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-import numpy as np
-
-import iris.analysis._interpolate_private as interpolate
-from iris.coords import DimCoord
-from iris.cube import Cube
-from iris.tests.test_interpolation import normalise_order
-
-
-class Test_linear__circular_wrapping(tests.IrisTest):
- def _create_cube(self, longitudes):
- # Return a Cube with circular longitude with the given values.
- data = np.arange(12).reshape((3, 4)) * 0.1
- cube = Cube(data)
- lon = DimCoord(longitudes, standard_name='longitude',
- units='degrees', circular=True)
- cube.add_dim_coord(lon, 1)
- return cube
-
- def test_symmetric(self):
- # Check we can interpolate from a Cube defined over [-180, 180).
- cube = self._create_cube([-180, -90, 0, 90])
- samples = [('longitude', np.arange(-360, 720, 45))]
- result = interpolate.linear(cube, samples, extrapolation_mode='nan')
- normalise_order(result)
- self.assertCMLApproxData(result, ('analysis', 'interpolation',
- 'linear', 'circular_wrapping',
- 'symmetric'))
-
- def test_positive(self):
- # Check we can interpolate from a Cube defined over [0, 360).
- cube = self._create_cube([0, 90, 180, 270])
- samples = [('longitude', np.arange(-360, 720, 45))]
- result = interpolate.linear(cube, samples, extrapolation_mode='nan')
- normalise_order(result)
- self.assertCMLApproxData(result, ('analysis', 'interpolation',
- 'linear', 'circular_wrapping',
- 'positive'))
-
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/experimental/regrid/test_regrid_conservative_via_esmpy.py b/lib/iris/tests/experimental/regrid/test_regrid_conservative_via_esmpy.py
index 4f4d8e5b6e..1e1de2ba9f 100644
--- a/lib/iris/tests/experimental/regrid/test_regrid_conservative_via_esmpy.py
+++ b/lib/iris/tests/experimental/regrid/test_regrid_conservative_via_esmpy.py
@@ -36,8 +36,6 @@
# Import ESMF if installed, else fail quietly + disable all the tests.
try:
import ESMF
- # Check it *is* the real module, and not an iris.proxy FakeModule.
- ESMF.Manager
except ImportError as AttributeError:
ESMF = None
skip_esmf = unittest.skipIf(
diff --git a/lib/iris/tests/experimental/ugrid/test_ugrid.py b/lib/iris/tests/experimental/ugrid/test_ugrid.py
index 30356a501d..617f826bd2 100644
--- a/lib/iris/tests/experimental/ugrid/test_ugrid.py
+++ b/lib/iris/tests/experimental/ugrid/test_ugrid.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -29,8 +29,6 @@
# Import pyugrid if installed, else fail quietly + disable all the tests.
try:
import pyugrid
- # Check it *is* the real module, and not an iris.proxy FakeModule.
- pyugrid.ugrid
except (ImportError, AttributeError):
pyugrid = None
skip_pyugrid = unittest.skipIf(
diff --git a/lib/iris/tests/integration/test_pp.py b/lib/iris/tests/integration/test_pp.py
index 235205d591..0cc0868b1d 100644
--- a/lib/iris/tests/integration/test_pp.py
+++ b/lib/iris/tests/integration/test_pp.py
@@ -31,7 +31,8 @@
from iris.coords import AuxCoord, CellMethod, DimCoord
from iris.cube import Cube
import iris.fileformats.pp
-import iris.fileformats.pp_rules
+import iris.fileformats.pp_load_rules
+from iris.fileformats.pp_save_rules import verify
from iris.exceptions import IgnoreCubeException
from iris.tests import mock
from iris.fileformats.pp import load_pairs_from_fields
@@ -73,8 +74,7 @@ def test_soil_level_round_trip(self):
field.lbvc = 0
field.brsvd = [None] * 4
field.brlev = None
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
@@ -111,8 +111,7 @@ def test_soil_depth_round_trip(self):
field.lbvc = 0
field.brlev = None
field.brsvd = [None] * 4
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 6)
@@ -144,8 +143,7 @@ def test_potential_temperature_level_round_trip(self):
field = iris.fileformats.pp.PPField3()
field.lbfc = 0
field.lbvc = 0
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
# Check the vertical coordinate is as originally specified.
self.assertEqual(field.lbvc, 19)
@@ -214,15 +212,14 @@ def field_with_data(scale=1):
pressure_field.lbvc = 0
pressure_field.brsvd = [None, None]
pressure_field.lbuser = [None] * 7
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(pressure_cube, pressure_field)
+ pressure_field = verify(pressure_cube, pressure_field)
data_field = iris.fileformats.pp.PPField3()
data_field.lbfc = 0
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
- iris.fileformats.pp._save_rules.verify(data_cube, data_field)
+ data_field = verify(data_cube, data_field)
# The reference surface field should have STASH=409
self.assertArrayEqual(pressure_field.lbuser,
@@ -306,8 +303,7 @@ def test_hybrid_height_with_non_standard_coords(self):
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
self.assertEqual(field.blev, delta)
self.assertEqual(field.brlev, delta_lower)
@@ -343,8 +339,7 @@ def test_hybrid_pressure_with_non_standard_coords(self):
field.lbvc = 0
field.brsvd = [None, None]
field.lbuser = [None] * 7
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
self.assertEqual(field.bhlev, delta)
self.assertEqual(field.bhrlev, delta_lower)
@@ -409,8 +404,7 @@ def field_with_data(scale=1):
data_field.lbvc = 0
data_field.brsvd = [None, None]
data_field.lbuser = [None] * 7
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(data_cube, data_field)
+ data_field = verify(data_cube, data_field)
# Check the data field has the vertical coordinate as originally
# specified.
@@ -446,8 +440,7 @@ def convert_cube_to_field(self, cube):
field.lbfc = 0
field.lbvc = 0
field.lbtim = 0
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
+ field = verify(cube, field)
return field
def test_time_mean_from_forecast_period(self):
@@ -633,9 +626,7 @@ def create_cube(self, longitude_coord='longitude'):
def convert_cube_to_field(self, cube):
field = iris.fileformats.pp.PPField3()
field.lbvc = 0
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, field)
- return field
+ return verify(cube, field)
def test_time_mean_only(self):
cube = self.create_cube()
diff --git a/lib/iris/tests/integration/test_pp_constrained_load_cubes.py b/lib/iris/tests/integration/test_pp_constrained_load_cubes.py
index 995506d5a3..ac3d02ea51 100644
--- a/lib/iris/tests/integration/test_pp_constrained_load_cubes.py
+++ b/lib/iris/tests/integration/test_pp_constrained_load_cubes.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -25,7 +25,7 @@
import iris
from iris.fileformats import pp
-from iris.fileformats.pp_rules import convert
+from iris.fileformats.pp_load_rules import convert
from iris.fileformats.rules import load_cubes
diff --git a/lib/iris/tests/integration/test_regrid_equivalence.py b/lib/iris/tests/integration/test_regrid_equivalence.py
index 9b65ea761c..a87c3684cd 100644
--- a/lib/iris/tests/integration/test_regrid_equivalence.py
+++ b/lib/iris/tests/integration/test_regrid_equivalence.py
@@ -30,7 +30,6 @@
import numpy as np
import iris
-from iris.analysis._interpolate_private import regrid
from iris.analysis import Nearest
from iris.cube import Cube
from iris.coords import AuxCoord, DimCoord
@@ -157,19 +156,8 @@ def test_wrapping_non_circular(self):
[3., 4., 5.]]
src_cube = grid_cube(src_x, src_y, data)
dst_cube = grid_cube(dst_x, dst_y)
- # Account for a behavioural difference in this case :
- # The Nearest scheme does wrapping of modular coordinate values.
- # Thus target of 352.0 --> -8.0, which is nearest to -10.
- # This looks just like "circular" handling, but only because it happens
- # to produce the same results *for nearest-neighbour in particular*.
- if isinstance(self, TestInterpolateRegridNearest):
- # interpolate.regrid --> Wrapping-free results (non-circular).
- expected_result = [[3., 3., 4., 4., 5., 5., 5., 5.],
- [3., 3., 4., 4., 5., 5., 5., 5.]]
- else:
- # cube regrid --> Wrapped results.
- expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
- [4., 3., 4., 4., 5., 5., 3., 4.]]
+ expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
+ [4., 3., 4., 4., 5., 5., 3., 4.]]
_debug_data(src_cube, "noncircular SOURCE")
result_cube = self.regrid(src_cube, dst_cube)
_debug_data(result_cube, "noncircular RESULT")
@@ -240,18 +228,6 @@ def test_source_nan(self):
self.assertArrayEqual(result_cube.data, expected_result)
-# perform identical tests on the old + new approaches
-class TestInterpolateRegridNearest(MixinCheckingCode, tests.IrisTest):
- def regrid(self, src_cube, dst_cube,
- translate_nans_to_mask=False, **kwargs):
- result = regrid(src_cube, dst_cube, mode='nearest')
- data = result.data
- if translate_nans_to_mask and np.any(np.isnan(data)):
- data = np.ma.masked_array(data, mask=np.isnan(data))
- result.data = data
- return result
-
-
class TestCubeRegridNearest(MixinCheckingCode, tests.IrisTest):
scheme = Nearest(extrapolation_mode='extrapolate')
diff --git a/lib/iris/tests/unit/analysis/interpolate_private/__init__.py b/lib/iris/tests/integration/um/__init__.py
similarity index 85%
rename from lib/iris/tests/unit/analysis/interpolate_private/__init__.py
rename to lib/iris/tests/integration/um/__init__.py
index bb8f1ebc4a..e012485f2c 100644
--- a/lib/iris/tests/unit/analysis/interpolate_private/__init__.py
+++ b/lib/iris/tests/integration/um/__init__.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2016, Met Office
+# (C) British Crown Copyright 2016 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for the :mod:`iris.analysis._interpolate_private` module."""
+"""Integration tests for :mod:`iris.fileformats.um` fast load functions."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/experimental/test_fieldsfile.py b/lib/iris/tests/integration/um/test_fieldsfile.py
similarity index 77%
rename from lib/iris/tests/experimental/test_fieldsfile.py
rename to lib/iris/tests/integration/um/test_fieldsfile.py
index c97eb2ef94..5f9a0c91c6 100644
--- a/lib/iris/tests/experimental/test_fieldsfile.py
+++ b/lib/iris/tests/integration/um/test_fieldsfile.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -25,8 +25,9 @@
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
+from iris.cube import CubeList
-from iris.experimental.fieldsfile import load
+from iris.fileformats.um import load_cubes as load
@tests.skip_data
@@ -34,14 +35,20 @@ class TestStructuredLoadFF(tests.IrisTest):
def setUp(self):
self.fname = tests.get_data_path(('FF', 'structured', 'small'))
+ def _merge_cubes(self, cubes):
+ # Merge the 2D cubes returned by `iris.fileformats.um.load_cubes`.
+ return CubeList(cubes).merge_cube()
+
def test_simple(self):
- cube, = load(self.fname)
+ list_of_cubes = list(load(self.fname, None))
+ cube = self._merge_cubes(list_of_cubes)
self.assertCML(cube)
def test_simple_callback(self):
def callback(cube, field, filename):
cube.attributes['processing'] = 'fast-ff'
- cube, = load(self.fname, callback=callback)
+ list_of_cubes = list(load(self.fname, callback=callback))
+ cube = self._merge_cubes(list_of_cubes)
self.assertCML(cube)
@@ -51,7 +58,7 @@ def setUp(self):
self.fname = tests.get_data_path(('PP', 'structured', 'small.pp'))
def test_simple(self):
- [cube] = load(self.fname)
+ [cube] = load(self.fname, None)
self.assertCML(cube)
def test_simple_callback(self):
diff --git a/lib/iris/tests/results/analysis/first_quartile_foo_1d_fast_percentile.cml b/lib/iris/tests/results/analysis/first_quartile_foo_1d_fast_percentile.cml
new file mode 100644
index 0000000000..13dff8ffe7
--- /dev/null
+++ b/lib/iris/tests/results/analysis/first_quartile_foo_1d_fast_percentile.cml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/results/analysis/first_quartile_foo_2d_fast_percentile.cml b/lib/iris/tests/results/analysis/first_quartile_foo_2d_fast_percentile.cml
new file mode 100644
index 0000000000..8d04577937
--- /dev/null
+++ b/lib/iris/tests/results/analysis/first_quartile_foo_2d_fast_percentile.cml
@@ -0,0 +1,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/results/analysis/first_quartile_foo_bar_2d_fast_percentile.cml b/lib/iris/tests/results/analysis/first_quartile_foo_bar_2d_fast_percentile.cml
new file mode 100644
index 0000000000..c9e86317bd
--- /dev/null
+++ b/lib/iris/tests/results/analysis/first_quartile_foo_bar_2d_fast_percentile.cml
@@ -0,0 +1,18 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/results/analysis/last_quartile_foo_3d_notmasked_fast_percentile.cml b/lib/iris/tests/results/analysis/last_quartile_foo_3d_notmasked_fast_percentile.cml
new file mode 100644
index 0000000000..9eaf7d59b7
--- /dev/null
+++ b/lib/iris/tests/results/analysis/last_quartile_foo_3d_notmasked_fast_percentile.cml
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/results/analysis/third_quartile_foo_1d_fast_percentile.cml b/lib/iris/tests/results/analysis/third_quartile_foo_1d_fast_percentile.cml
new file mode 100644
index 0000000000..8a0f1e479d
--- /dev/null
+++ b/lib/iris/tests/results/analysis/third_quartile_foo_1d_fast_percentile.cml
@@ -0,0 +1,15 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple.cml b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple.cml
similarity index 98%
rename from lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple.cml
rename to lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple.cml
index 9d985a7ae8..18d641f1cc 100644
--- a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple.cml
+++ b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple.cml
@@ -54,7 +54,7 @@
-
diff --git a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple_callback.cml b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple_callback.cml
similarity index 98%
rename from lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple_callback.cml
rename to lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple_callback.cml
index daf98f9dbd..1595e66821 100644
--- a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadFF/simple_callback.cml
+++ b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadFF/simple_callback.cml
@@ -55,7 +55,7 @@
-
diff --git a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadPP/simple.cml b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadPP/simple.cml
similarity index 100%
rename from lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadPP/simple.cml
rename to lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadPP/simple.cml
diff --git a/lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadPP/simple_callback.cml b/lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadPP/simple_callback.cml
similarity index 100%
rename from lib/iris/tests/results/experimental/fieldsfile/TestStructuredLoadPP/simple_callback.cml
rename to lib/iris/tests/results/integration/um/fieldsfile/TestStructuredLoadPP/simple_callback.cml
diff --git a/lib/iris/tests/results/pp_rules/global.cml b/lib/iris/tests/results/pp_load_rules/global.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/global.cml
rename to lib/iris/tests/results/pp_load_rules/global.cml
diff --git a/lib/iris/tests/results/pp_rules/lbproc_mean_max_min.cml b/lib/iris/tests/results/pp_load_rules/lbproc_mean_max_min.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/lbproc_mean_max_min.cml
rename to lib/iris/tests/results/pp_load_rules/lbproc_mean_max_min.cml
diff --git a/lib/iris/tests/results/pp_rules/lbtim_2.cml b/lib/iris/tests/results/pp_load_rules/lbtim_2.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/lbtim_2.cml
rename to lib/iris/tests/results/pp_load_rules/lbtim_2.cml
diff --git a/lib/iris/tests/results/pp_rules/ocean_depth.cml b/lib/iris/tests/results/pp_load_rules/ocean_depth.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/ocean_depth.cml
rename to lib/iris/tests/results/pp_load_rules/ocean_depth.cml
diff --git a/lib/iris/tests/results/pp_rules/ocean_depth_bounded.cml b/lib/iris/tests/results/pp_load_rules/ocean_depth_bounded.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/ocean_depth_bounded.cml
rename to lib/iris/tests/results/pp_load_rules/ocean_depth_bounded.cml
diff --git a/lib/iris/tests/results/pp_rules/rotated_uk.cml b/lib/iris/tests/results/pp_load_rules/rotated_uk.cml
similarity index 100%
rename from lib/iris/tests/results/pp_rules/rotated_uk.cml
rename to lib/iris/tests/results/pp_load_rules/rotated_uk.cml
diff --git a/lib/iris/tests/results/regrid/airpress_on_theta_0d.cml b/lib/iris/tests/results/regrid/airpress_on_theta_0d.cml
deleted file mode 100644
index 8b52a0eb80..0000000000
--- a/lib/iris/tests/results/regrid/airpress_on_theta_0d.cml
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/airpress_on_theta_1d.cml b/lib/iris/tests/results/regrid/airpress_on_theta_1d.cml
deleted file mode 100644
index 4f91acd589..0000000000
--- a/lib/iris/tests/results/regrid/airpress_on_theta_1d.cml
+++ /dev/null
@@ -1,52 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/airpress_on_theta_2d.cml b/lib/iris/tests/results/regrid/airpress_on_theta_2d.cml
deleted file mode 100644
index d73d0c4430..0000000000
--- a/lib/iris/tests/results/regrid/airpress_on_theta_2d.cml
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/airpress_on_theta_3d.cml b/lib/iris/tests/results/regrid/airpress_on_theta_3d.cml
deleted file mode 100644
index d8dc722cec..0000000000
--- a/lib/iris/tests/results/regrid/airpress_on_theta_3d.cml
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_larger.cml b/lib/iris/tests/results/regrid/bilinear_larger.cml
deleted file mode 100644
index ceb4b1b91e..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_larger.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_left.cml b/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_left.cml
deleted file mode 100644
index d77ba3604b..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_left.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_right.cml b/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_right.cml
deleted file mode 100644
index b2da5439f8..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_larger_lon_extrapolate_right.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_smaller.cml b/lib/iris/tests/results/regrid/bilinear_smaller.cml
deleted file mode 100644
index 68be3270dd..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_smaller.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_left.cml b/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_left.cml
deleted file mode 100644
index c6fe8baa6e..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_left.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_right.cml b/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_right.cml
deleted file mode 100644
index bece6ca380..0000000000
--- a/lib/iris/tests/results/regrid/bilinear_smaller_lon_align_right.cml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/theta_on_airpress_0d.cml b/lib/iris/tests/results/regrid/theta_on_airpress_0d.cml
deleted file mode 100644
index 380dc48af7..0000000000
--- a/lib/iris/tests/results/regrid/theta_on_airpress_0d.cml
+++ /dev/null
@@ -1,50 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/theta_on_airpress_1d.cml b/lib/iris/tests/results/regrid/theta_on_airpress_1d.cml
deleted file mode 100644
index c1357d7b0a..0000000000
--- a/lib/iris/tests/results/regrid/theta_on_airpress_1d.cml
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/theta_on_airpress_2d.cml b/lib/iris/tests/results/regrid/theta_on_airpress_2d.cml
deleted file mode 100644
index cf04c386e7..0000000000
--- a/lib/iris/tests/results/regrid/theta_on_airpress_2d.cml
+++ /dev/null
@@ -1,56 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/results/regrid/theta_on_airpress_3d.cml b/lib/iris/tests/results/regrid/theta_on_airpress_3d.cml
deleted file mode 100644
index 65d19cf14e..0000000000
--- a/lib/iris/tests/results/regrid/theta_on_airpress_3d.cml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/lib/iris/tests/test_analysis.py b/lib/iris/tests/test_analysis.py
index 091670972b..8ad41e5270 100644
--- a/lib/iris/tests/test_analysis.py
+++ b/lib/iris/tests/test_analysis.py
@@ -348,102 +348,157 @@ def test_multi_coord_mdtol(self):
class TestAggregators(tests.IrisTest):
- def test_percentile_1d(self):
+
+ def _check_collapsed_percentile(self, cube, percents, collapse_coord,
+ expected_result, CML_filename=None,
+ **kwargs):
+ expected_result = np.array(expected_result, dtype=np.float32)
+ result = cube.collapsed(collapse_coord, iris.analysis.PERCENTILE,
+ percent=percents, **kwargs)
+ np.testing.assert_array_almost_equal(result.data, expected_result)
+ if CML_filename is not None:
+ self.assertCML(result, ('analysis', CML_filename), checksum=False)
+
+ def _check_percentile(self, data, axis, percents, expected_result,
+ **kwargs):
+ result = iris.analysis._percentile(data, axis, percents, **kwargs)
+ np.testing.assert_array_almost_equal(result, expected_result)
+
+ def test_percentile_1d_25_percent(self):
cube = tests.stock.simple_1d()
+ self._check_collapsed_percentile(
+ cube, 25, 'foo', 2.5, CML_filename='first_quartile_foo_1d.cml')
- first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
- percent=25)
- np.testing.assert_array_almost_equal(first_quartile.data,
- np.array([2.5], dtype=np.float32))
- self.assertCML(first_quartile, ('analysis',
- 'first_quartile_foo_1d.cml'),
- checksum=False)
+ def test_percentile_1d_75_percent(self):
+ cube = tests.stock.simple_1d()
+ self._check_collapsed_percentile(
+ cube, 75, 'foo', 7.5, CML_filename='third_quartile_foo_1d.cml')
- third_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
- percent=75)
- np.testing.assert_array_almost_equal(third_quartile.data,
- np.array([7.5],
- dtype=np.float32))
- self.assertCML(third_quartile,
- ('analysis', 'third_quartile_foo_1d.cml'),
- checksum=False)
+ def test_fast_percentile_1d_25_percent(self):
+ cube = tests.stock.simple_1d()
+ self._check_collapsed_percentile(
+ cube, 25, 'foo', 2.5, fast_percentile_method=True,
+ CML_filename='first_quartile_foo_1d_fast_percentile.cml')
+
+ def test_fast_percentile_1d_75_percent(self):
+ cube = tests.stock.simple_1d()
+ self._check_collapsed_percentile(
+ cube, 75, 'foo', 7.5, fast_percentile_method=True,
+ CML_filename='third_quartile_foo_1d_fast_percentile.cml')
- def test_percentile_2d(self):
+ def test_percentile_2d_single_coord(self):
cube = tests.stock.simple_2d()
+ self._check_collapsed_percentile(
+ cube, 25, 'foo', [0.75, 4.75, 8.75],
+ CML_filename='first_quartile_foo_2d.cml')
- first_quartile = cube.collapsed('foo', iris.analysis.PERCENTILE,
- percent=25)
- np.testing.assert_array_almost_equal(first_quartile.data,
- np.array([0.75, 4.75, 8.75],
- dtype=np.float32))
- self.assertCML(first_quartile, ('analysis',
- 'first_quartile_foo_2d.cml'),
- checksum=False)
+ def test_percentile_2d_two_coords(self):
+ cube = tests.stock.simple_2d()
+ self._check_collapsed_percentile(
+ cube, 25, ['foo', 'bar'], [2.75],
+ CML_filename='first_quartile_foo_bar_2d.cml')
- first_quartile = cube.collapsed(('foo', 'bar'),
- iris.analysis.PERCENTILE, percent=25)
- np.testing.assert_array_almost_equal(first_quartile.data,
- np.array([2.75],
- dtype=np.float32))
- self.assertCML(first_quartile, ('analysis',
- 'first_quartile_foo_bar_2d.cml'),
- checksum=False)
+ def test_fast_percentile_2d_single_coord(self):
+ cube = tests.stock.simple_2d()
+ self._check_collapsed_percentile(
+ cube, 25, 'foo', [0.75, 4.75, 8.75], fast_percentile_method=True,
+ CML_filename='first_quartile_foo_2d_fast_percentile.cml')
+
+ def test_fast_percentile_2d_two_coords(self):
+ cube = tests.stock.simple_2d()
+ self._check_collapsed_percentile(
+ cube, 25, ['foo', 'bar'], [2.75], fast_percentile_method=True,
+ CML_filename='first_quartile_foo_bar_2d_fast_percentile.cml')
def test_percentile_3d(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[6., 7., 8., 9.],
+ [10., 11., 12., 13.],
+ [14., 15., 16., 17.]],
+ dtype=np.float32)
+ self._check_percentile(array_3d, 0, 50, expected_result)
- last_quartile = iris.analysis._percentile(array_3d, 0, 50)
- np.testing.assert_array_almost_equal(last_quartile,
- np.array([[6., 7., 8., 9.],
- [10., 11., 12., 13.],
- [14., 15., 16., 17.]],
- dtype=np.float32))
+ def test_fast_percentile_3d(self):
+ array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[6., 7., 8., 9.],
+ [10., 11., 12., 13.],
+ [14., 15., 16., 17.]],
+ dtype=np.float32)
+ self._check_percentile(array_3d, 0, 50, expected_result,
+ fast_percentile_method=True)
def test_percentile_3d_axis_one(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[4., 5., 6., 7.],
+ [16., 17., 18., 19.]],
+ dtype=np.float32)
- last_quartile = iris.analysis._percentile(array_3d, 1, 50)
- np.testing.assert_array_almost_equal(last_quartile,
- np.array([[4., 5., 6., 7.],
- [16., 17., 18., 19.]],
- dtype=np.float32))
+ self._check_percentile(array_3d, 1, 50, expected_result)
+
+ def test_fast_percentile_3d_axis_one(self):
+ array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[4., 5., 6., 7.],
+ [16., 17., 18., 19.]],
+ dtype=np.float32)
+
+ self._check_percentile(array_3d, 1, 50, expected_result,
+ fast_percentile_method=True)
def test_percentile_3d_axis_two(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[1.5, 5.5, 9.5],
+ [13.5, 17.5, 21.5]],
+ dtype=np.float32)
- last_quartile = iris.analysis._percentile(array_3d, 2, 50)
- np.testing.assert_array_almost_equal(last_quartile,
- np.array([[1.5, 5.5, 9.5],
- [13.5, 17.5, 21.5]],
- dtype=np.float32))
+ self._check_percentile(array_3d, 2, 50, expected_result)
+
+ def test_fast_percentile_3d_axis_two(self):
+ array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
+ expected_result = np.array([[1.5, 5.5, 9.5],
+ [13.5, 17.5, 21.5]],
+ dtype=np.float32)
+
+ self._check_percentile(array_3d, 2, 50, expected_result,
+ fast_percentile_method=True)
def test_percentile_3d_masked(self):
cube = tests.stock.simple_3d_mask()
+ expected_result = [[12., 13., 14., 15.],
+ [16., 17., 18., 19.],
+ [20., 18., 19., 20.]]
- last_quartile = cube.collapsed('wibble',
- iris.analysis.PERCENTILE, percent=75)
- np.testing.assert_array_almost_equal(last_quartile.data,
- np.array([[12., 13., 14., 15.],
- [16., 17., 18., 19.],
- [20., 18., 19., 20.]],
- dtype=np.float32))
- self.assertCML(last_quartile, ('analysis',
- 'last_quartile_foo_3d_masked.cml'),
- checksum=False)
+ self._check_collapsed_percentile(
+ cube, 75, 'wibble', expected_result,
+ CML_filename='last_quartile_foo_3d_masked.cml')
+
+ def test_fast_percentile_3d_masked(self):
+ cube = tests.stock.simple_3d_mask()
+ msg = 'Cannot use fast np.percentile method with masked array.'
+
+ with self.assertRaisesRegexp(TypeError, msg):
+ cube.collapsed('wibble',
+ iris.analysis.PERCENTILE, percent=75,
+ fast_percentile_method=True)
def test_percentile_3d_notmasked(self):
cube = tests.stock.simple_3d()
+ expected_result = [[9., 10., 11., 12.],
+ [13., 14., 15., 16.],
+ [17., 18., 19., 20.]]
- last_quartile = cube.collapsed('wibble',
- iris.analysis.PERCENTILE, percent=75)
- np.testing.assert_array_almost_equal(last_quartile.data,
- np.array([[9., 10., 11., 12.],
- [13., 14., 15., 16.],
- [17., 18., 19., 20.]],
- dtype=np.float32))
- self.assertCML(last_quartile, ('analysis',
- 'last_quartile_foo_3d_notmasked.cml'),
- checksum=False)
+ self._check_collapsed_percentile(
+ cube, 75, 'wibble', expected_result,
+ CML_filename='last_quartile_foo_3d_notmasked.cml')
+
+ def test_fast_percentile_3d_notmasked(self):
+ cube = tests.stock.simple_3d()
+ expected_result = [[9., 10., 11., 12.],
+ [13., 14., 15., 16.],
+ [17., 18., 19., 20.]]
+
+ self._check_collapsed_percentile(
+ cube, 75, 'wibble', expected_result, fast_percentile_method=True,
+ CML_filename='last_quartile_foo_3d_notmasked_fast_percentile.cml')
def test_proportion(self):
cube = tests.stock.simple_1d()
diff --git a/lib/iris/tests/test_analysis_calculus.py b/lib/iris/tests/test_analysis_calculus.py
index 79e64a02fa..cac7855e8d 100644
--- a/lib/iris/tests/test_analysis_calculus.py
+++ b/lib/iris/tests/test_analysis_calculus.py
@@ -33,7 +33,6 @@
import iris.tests.stock
from iris.coords import DimCoord
-from iris.tests.test_interpolation import normalise_order
class TestCubeDelta(tests.IrisTest):
@@ -484,7 +483,6 @@ def test_contrived_non_spherical_curl2(self):
result.data = result.data * 0 + 1
np.testing.assert_array_almost_equal(result.data, r[2].data, decimal=4)
- normalise_order(r[1])
self.assertCML(r, ('analysis', 'calculus', 'curl_contrived_cartesian2.cml'), checksum=False)
def test_contrived_spherical_curl1(self):
diff --git a/lib/iris/tests/test_coding_standards.py b/lib/iris/tests/test_coding_standards.py
index 82eb384756..e2712b58ab 100644
--- a/lib/iris/tests/test_coding_standards.py
+++ b/lib/iris/tests/test_coding_standards.py
@@ -82,12 +82,11 @@
class StandardReportWithExclusions(pep8.StandardReport):
expected_bad_files = [
'*/iris/std_names.py',
- '*/iris/analysis/_interpolate_private.py',
'*/iris/fileformats/cf.py',
'*/iris/fileformats/dot.py',
'*/iris/fileformats/grib/_grib_cf_map.py',
'*/iris/fileformats/grib/_grib1_load_rules.py',
- '*/iris/fileformats/pp_rules.py',
+ '*/iris/fileformats/pp_load_rules.py',
'*/iris/fileformats/rules.py',
'*/iris/fileformats/um_cf_map.py',
'*/iris/fileformats/_pyke_rules/compiled_krb/compiled_pyke_files.py',
@@ -115,7 +114,6 @@ class StandardReportWithExclusions(pep8.StandardReport):
'*/iris/tests/test_grib_save.py',
'*/iris/tests/test_grib_save_rules.py',
'*/iris/tests/test_hybrid.py',
- '*/iris/tests/test_interpolation.py',
'*/iris/tests/test_intersect.py',
'*/iris/tests/test_io_init.py',
'*/iris/tests/test_iterate.py',
@@ -126,7 +124,6 @@ class StandardReportWithExclusions(pep8.StandardReport):
'*/iris/tests/test_pp_stash.py',
'*/iris/tests/test_pp_to_cube.py',
'*/iris/tests/test_quickplot.py',
- '*/iris/tests/test_regrid.py',
'*/iris/tests/test_std_names.py',
'*/iris/tests/test_unit.py',
'*/iris/tests/test_uri_callback.py',
diff --git a/lib/iris/tests/test_coord_api.py b/lib/iris/tests/test_coord_api.py
index d6dbe6de13..d7c0f25d99 100644
--- a/lib/iris/tests/test_coord_api.py
+++ b/lib/iris/tests/test_coord_api.py
@@ -316,7 +316,7 @@ def test_bounded(self):
def test_dim_coord_restrictions(self):
# 1d
- with self.assertRaisesRegexp(ValueError, 'must be 1-dim'):
+ with self.assertRaisesRegexp(ValueError, 'must be scalar or 1-dim'):
iris.coords.DimCoord([[1, 2, 3], [4, 5, 6]])
# monotonic
with self.assertRaisesRegexp(ValueError, 'must be strictly monotonic'):
@@ -326,7 +326,7 @@ def test_dim_coord_restrictions(self):
'monotonicity.*consistent.*all bounds'):
iris.coords.DimCoord([1, 2, 3], bounds=[[1, 12], [2, 9], [3, 6]])
# shapes of points and bounds
- msg = 'Bounds shape must be compatible with points shape'
+ msg = 'The shape of the bounds array should be'
with self.assertRaisesRegexp(ValueError, msg):
iris.coords.DimCoord([1, 2, 3], bounds=[0.5, 1.5, 2.5, 3.5])
# another example of shapes of points and bounds
diff --git a/lib/iris/tests/test_cube_to_pp.py b/lib/iris/tests/test_cube_to_pp.py
index 7041d4216a..fa78f95166 100644
--- a/lib/iris/tests/test_cube_to_pp.py
+++ b/lib/iris/tests/test_cube_to_pp.py
@@ -88,30 +88,6 @@ def test_pp_save_rules(self):
with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
iris.save(cubes, temp_pp_path)
- def test_user_pp_save_rules(self):
- # Test pp save rules with user rules.
-
- #create a user rules file
- user_rules_filename = iris.util.create_temp_filename(suffix='.txt')
- try:
- with open(user_rules_filename, "wt") as user_rules_file:
- user_rules_file.write("IF\ncm.standard_name == 'air_temperature'\nTHEN\npp.lbuser[3] = 9222")
- with iris.fileformats.rules._disable_deprecation_warnings():
- iris.fileformats.pp.add_save_rules(user_rules_filename)
- try:
- #read pp
- in_filename = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
- cubes = iris.load(in_filename, callback=itab_callback)
-
- reference_txt_path = tests.get_result_path(('cube_to_pp', 'user_rules.txt'))
- with self.cube_save_test(reference_txt_path, reference_cubes=cubes) as temp_pp_path:
- iris.save(cubes, temp_pp_path)
-
- finally:
- iris.fileformats.pp.reset_save_rules()
- finally:
- os.remove(user_rules_filename)
-
def test_pp_append_singles(self):
# Test pp append saving - single cubes.
@@ -208,8 +184,7 @@ def test_365_calendar_export(self):
# Add an extra "fill_value" property, as used by the save rules.
cube.fill_value = None
pp_field = mock.MagicMock(spec=PPField3)
- iris.fileformats.pp._ensure_save_rules_loaded()
- iris.fileformats.pp._save_rules.verify(cube, pp_field)
+ iris.fileformats.pp_save_rules.verify(cube, pp_field)
self.assertEqual(pp_field.lbtim.ic, 4)
diff --git a/lib/iris/tests/test_interpolation.py b/lib/iris/tests/test_interpolation.py
deleted file mode 100644
index d840d3852e..0000000000
--- a/lib/iris/tests/test_interpolation.py
+++ /dev/null
@@ -1,763 +0,0 @@
-# (C) British Crown Copyright 2010 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Test the interpolation of Iris cubes.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# import iris tests first so that some things can be initialised before importing anything else
-import iris.tests as tests
-
-import numpy as np
-from scipy.interpolate import interp1d
-
-import iris
-import iris.coord_systems
-import iris.cube
-import iris.analysis.interpolate
-import iris.tests.stock
-import iris.analysis._interpolate_private as iintrp
-
-
-def normalise_order(cube):
- # Avoid the crazy array ordering which results from using:
- # * np.append() in NumPy 1.6, which is triggered in the `linear()`
- # function when the circular flag is true.
- # * scipy.interpolate.interp1d in 0.11.0 which is used in
- # `Linear1dExtrapolator`.
- data = np.ascontiguousarray(cube.data)
- cube.data = data
-
-
-class TestLinearExtrapolator(tests.IrisTest):
- def test_simple_axis0(self):
- a = np.arange(12.).reshape(3, 4)
- r = iintrp.Linear1dExtrapolator(interp1d(np.arange(3), a, axis=0))
-
- np.testing.assert_array_equal(r(0), np.array([ 0., 1., 2., 3.]))
- np.testing.assert_array_equal(r(-1), np.array([-4., -3., -2., -1.]))
- np.testing.assert_array_equal(r(3), np.array([ 12., 13., 14., 15.]))
- np.testing.assert_array_equal(r(2.5), np.array([ 10., 11., 12., 13.]))
-
- # 2 Non-extrapolation point
- np.testing.assert_array_equal(r(np.array([1.5, 2])), np.array([[ 6., 7., 8., 9.],
- [ 8., 9., 10., 11.]]))
-
- # 1 Non-extrapolation point & 1 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([1.5, 3])), np.array([[ 6., 7., 8., 9.],
- [ 12., 13., 14., 15.]]))
-
- # 2 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([2.5, 3])), np.array([[ 10., 11., 12., 13.],
- [ 12., 13., 14., 15.]]))
-
- # 1 lower value extrapolation & 1 Non-extrapolation point
- np.testing.assert_array_equal(r(np.array([-1, 1.5])), np.array([[-4., -3., -2., -1.],
- [ 6., 7., 8., 9.]]))
-
- # 2 lower value extrapolation
- np.testing.assert_array_equal(r(np.array([-1.5, -1])), np.array([[-6., -5., -4., -3.],
- [-4., -3., -2., -1.]]))
-
- # 2 lower value extrapolation, 2 Non-extrapolation point & 2 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([-1.5, -1, 1, 1.5, 2.5, 3])),
- np.array([[ -6., -5., -4., -3.],
- [ -4., -3., -2., -1.],
- [ 4., 5., 6., 7.],
- [ 6., 7., 8., 9.],
- [ 10., 11., 12., 13.],
- [ 12., 13., 14., 15.]]))
-
- def test_simple_axis1(self):
- a = np.arange(12).reshape(3, 4)
- r = iintrp.Linear1dExtrapolator(interp1d(np.arange(4), a, axis=1))
-
- # check non-extrapolation given the Extrapolator object
- np.testing.assert_array_equal(r(0), np.array([ 0., 4., 8.]))
-
- # check the result's shape in a 1d array (of len 0 & 1)
- np.testing.assert_array_equal(r(np.array(0)), np.array([ 0., 4., 8.]))
- np.testing.assert_array_equal(r(np.array([0])), np.array([ [0.], [4.], [8.]]))
-
- # check extrapolation below the minimum value (and check the equivalent 0d & 1d arrays)
- np.testing.assert_array_equal(r(-1), np.array([-1., 3., 7.]))
- np.testing.assert_array_equal(r(np.array(-1)), np.array([-1., 3., 7.]))
- np.testing.assert_array_equal(r(np.array([-1])), np.array([[-1.], [ 3.], [ 7.]]))
-
- # check extrapolation above the maximum value
- np.testing.assert_array_equal(r(3), np.array([ 3., 7., 11.]))
- np.testing.assert_array_equal(r(2.5), np.array([ 2.5, 6.5, 10.5]))
-
- # 2 Non-extrapolation point
- np.testing.assert_array_equal(r(np.array([1.5, 2])), np.array([[ 1.5, 2. ],
- [ 5.5, 6. ],
- [ 9.5, 10. ]]))
-
- # 1 Non-extrapolation point & 1 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([1.5, 5])), np.array([[ 1.5, 5. ],
- [ 5.5, 9. ],
- [ 9.5, 13. ]]))
-
- # 2 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([4.5, 5])), np.array([[ 4.5, 5. ],
- [ 8.5, 9. ],
- [ 12.5, 13. ]]))
-
- # 1 lower value extrapolation & 1 Non-extrapolation point
- np.testing.assert_array_equal(r(np.array([-0.5, 1.5])), np.array([[-0.5, 1.5],
- [ 3.5, 5.5],
- [ 7.5, 9.5]]))
-
- # 2 lower value extrapolation
- np.testing.assert_array_equal(r(np.array([-1.5, -1])), np.array([[-1.5, -1. ],
- [ 2.5, 3. ],
- [ 6.5, 7. ]]))
-
- # 2 lower value extrapolation, 2 Non-extrapolation point & 2 upper value extrapolation
- np.testing.assert_array_equal(r(np.array([-1.5, -1, 1.5, 2, 4.5, 5])),
- np.array([[ -1.5, -1., 1.5, 2., 4.5, 5. ],
- [ 2.5, 3., 5.5, 6., 8.5, 9. ],
- [ 6.5, 7., 9.5, 10., 12.5, 13. ]]))
-
-
- def test_simple_3d_axis1(self):
- a = np.arange(24.).reshape(3, 4, 2)
- r = iintrp.Linear1dExtrapolator(interp1d(np.arange(4.), a, axis=1))
-
-# a:
-# [[[ 0 1]
-# [ 2 3]
-# [ 4 5]
-# [ 6 7]]
-#
-# [[ 8 9]
-# [10 11]
-# [12 13]
-# [14 15]]
-#
-# [[16 17]
-# [18 19]
-# [20 21]
-# [22 23]]
-# ]
-
- np.testing.assert_array_equal(r(0), np.array([[ 0., 1.],
- [ 8., 9.],
- [ 16., 17.]]))
-
- np.testing.assert_array_equal(r(1), np.array([[ 2., 3.],
- [ 10., 11.],
- [ 18., 19.]]))
-
- np.testing.assert_array_equal(r(-1), np.array([[ -2., -1.],
- [ 6., 7.],
- [ 14., 15.]]))
-
- np.testing.assert_array_equal(r(4), np.array([[ 8., 9.],
- [ 16., 17.],
- [ 24., 25.]]))
-
- np.testing.assert_array_equal(r(0.25), np.array([[ 0.5, 1.5],
- [ 8.5, 9.5],
- [ 16.5, 17.5]]))
-
- np.testing.assert_array_equal(r(-0.25), np.array([[ -0.5, 0.5],
- [ 7.5, 8.5],
- [ 15.5, 16.5]]))
-
- np.testing.assert_array_equal(r(4.25), np.array([[ 8.5, 9.5],
- [ 16.5, 17.5],
- [ 24.5, 25.5]]))
-
- np.testing.assert_array_equal(r(np.array([0.5, 1])), np.array([[[ 1., 2.], [ 2., 3.]],
- [[ 9., 10.], [ 10., 11.]],
- [[ 17., 18.], [ 18., 19.]]]))
-
- np.testing.assert_array_equal(r(np.array([0.5, 4])), np.array([[[ 1., 2.], [ 8., 9.]],
- [[ 9., 10.], [ 16., 17.]],
- [[ 17., 18.], [ 24., 25.]]]))
-
- np.testing.assert_array_equal(r(np.array([-0.5, 0.5])), np.array([[[ -1., 0.], [ 1., 2.]],
- [[ 7., 8.], [ 9., 10.]],
- [[ 15., 16.], [ 17., 18.]]]))
-
- np.testing.assert_array_equal(r(np.array([-1.5, -1, 0.5, 1, 4.5, 5])),
- np.array([[[ -3., -2.], [ -2., -1.], [ 1., 2.], [ 2., 3.], [ 9., 10.], [ 10., 11.]],
- [[ 5., 6.], [ 6., 7.], [ 9., 10.], [ 10., 11.], [ 17., 18.], [ 18., 19.]],
- [[ 13., 14.], [ 14., 15.], [ 17., 18.], [ 18., 19.], [ 25., 26.], [ 26., 27.]]]))
-
- def test_variable_gradient(self):
- a = np.array([[2, 4, 8], [0, 5, 11]])
- r = iintrp.Linear1dExtrapolator(interp1d(np.arange(2), a, axis=0))
-
- np.testing.assert_array_equal(r(0), np.array([ 2., 4., 8.]))
- np.testing.assert_array_equal(r(-1), np.array([ 4., 3., 5.]))
- np.testing.assert_array_equal(r(3), np.array([ -4., 7., 17.]))
- np.testing.assert_array_equal(r(2.5), np.array([ -3., 6.5, 15.5]))
-
- np.testing.assert_array_equal(r(np.array([1.5, 2])), np.array([[ -1., 5.5, 12.5],
- [ -2., 6., 14. ]]))
-
- np.testing.assert_array_equal(r(np.array([-1.5, 3.5])), np.array([[ 5., 2.5, 3.5],
- [ -5., 7.5, 18.5]]))
-
-
-class TestLinearLengthOneCoord(tests.IrisTest):
- def setUp(self):
- self.cube = iris.tests.stock.lat_lon_cube()
- self.cube.data = self.cube.data.astype(float)
-
- def test_single_point(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- r = iintrp.linear(cube, [('longitude', [1.])])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_single_pt_0'))
-
- # Slice to form (1, 4) shaped cube.
- cube = self.cube[1:2, :]
- r = iintrp.linear(cube, [('latitude', [1.])])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_single_pt_1'))
-
- def test_multiple_points(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- r = iintrp.linear(cube, [('longitude',
- [1., 2., 3., 4.])])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_0'))
-
- # Slice to form (1, 4) shaped cube.
- cube = self.cube[1:2, :]
- r = iintrp.linear(cube, [('latitude',
- [1., 2., 3., 4.])])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_1'))
-
- def test_single_point_to_scalar(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- r = iintrp.linear(cube, [('longitude', 1.)])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_scalar_0'))
-
- # Slice to form (1, 4) shaped cube.
- cube = self.cube[1:2, :]
- r = iintrp.linear(cube, [('latitude', 1.)])
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_scalar_1'))
-
- def test_extrapolation_mode_same_pt(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- src_points = cube.coord('longitude').points
- r = iintrp.linear(cube, [('longitude', src_points)],
- extrapolation_mode='linear')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_same_pt'))
- r = iintrp.linear(cube, [('longitude', src_points)],
- extrapolation_mode='nan')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_same_pt'))
- r = iintrp.linear(cube, [('longitude', src_points)],
- extrapolation_mode='error')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_same_pt'))
-
- def test_extrapolation_mode_multiple_same_pts(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- src_points = cube.coord('longitude').points
- new_points = [src_points[0]] * 3
- r = iintrp.linear(cube, [('longitude', new_points)],
- extrapolation_mode='linear')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_same'))
- r = iintrp.linear(cube, [('longitude', new_points)],
- extrapolation_mode='nan')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_same'))
- r = iintrp.linear(cube, [('longitude', new_points)],
- extrapolation_mode='error')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_same'))
-
- def test_extrapolation_mode_different_pts(self):
- # Slice to form (3, 1) shaped cube.
- cube = self.cube[:, 2:3]
- src_points = cube.coord('longitude').points
- new_points_single = src_points + 0.2
- new_points_multiple = [src_points[0],
- src_points[0] + 0.2,
- src_points[0] + 0.4]
- new_points_scalar = src_points[0] + 0.2
-
- # 'nan' mode
- r = iintrp.linear(cube, [('longitude',
- new_points_single)],
- extrapolation_mode='nan')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_single_pt_nan'))
- r = iintrp.linear(cube, [('longitude',
- new_points_multiple)],
- extrapolation_mode='nan')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_many_nan'))
- r = iintrp.linear(cube, [('longitude',
- new_points_scalar)],
- extrapolation_mode='nan')
- self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
- 'single_pt_to_scalar_nan'))
-
- # 'error' mode
- with self.assertRaises(ValueError):
- r = iintrp.linear(cube, [('longitude',
- new_points_single)],
- extrapolation_mode='error')
- with self.assertRaises(ValueError):
- r = iintrp.linear(cube, [('longitude',
- new_points_multiple)],
- extrapolation_mode='error')
- with self.assertRaises(ValueError):
- r = iintrp.linear(cube, [('longitude',
- new_points_scalar)],
- extrapolation_mode='error')
-
-
-class TestLinear1dInterpolation(tests.IrisTest):
- def setUp(self):
- data = np.arange(12., dtype=np.float32).reshape((4, 3))
- c2 = iris.cube.Cube(data)
-
- c2.long_name = 'test 2d dimensional cube'
- c2.units = 'kelvin'
-
- pts = 3 + np.arange(4, dtype=np.float32) * 2
- b = iris.coords.DimCoord(pts, long_name='dim1', units=1)
- d = iris.coords.AuxCoord([3, 3.5, 6], long_name='dim2', units=1)
- e = iris.coords.AuxCoord(3.0, long_name='an_other', units=1)
-
- c2.add_dim_coord(b, 0)
- c2.add_aux_coord(d, 1)
- c2.add_aux_coord(e)
-
- self.simple2d_cube = c2
-
- d = iris.coords.AuxCoord([5, 9, 20], long_name='shared_x_coord', units=1)
- c3 = c2.copy()
- c3.add_aux_coord(d, 1)
- self.simple2d_cube_extended = c3
-
- pts = 0.1 + np.arange(5, dtype=np.float32) * 0.1
- f = iris.coords.DimCoord(pts, long_name='r', units=1)
- g = iris.coords.DimCoord([0.0, 90.0, 180.0, 270.0], long_name='theta', units='degrees', circular=True)
- data = np.arange(20., dtype=np.float32).reshape((5, 4))
- c4 = iris.cube.Cube(data)
- c4.add_dim_coord(f, 0)
- c4.add_dim_coord(g, 1)
- self.simple2d_cube_circular = c4
-
- def test_dim_to_aux(self):
- cube = self.simple2d_cube
- other = iris.coords.DimCoord([1, 2, 3, 4], long_name='was_dim')
- cube.add_aux_coord(other, 0)
- r = iintrp.linear(cube, [('dim1', [7, 3, 5])])
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'dim_to_aux.cml'))
-
- def test_bad_sample_point_format(self):
- self.assertRaises(TypeError, iintrp.linear, self.simple2d_cube, ('dim1', 4))
-
- def test_simple_single_point(self):
- r = iintrp.linear(self.simple2d_cube, [('dim1', 4)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_single_point.cml'), checksum=False)
- np.testing.assert_array_equal(r.data, np.array([1.5, 2.5, 3.5], dtype=self.simple2d_cube.data.dtype))
-
- def test_monotonic_decreasing_coord(self):
- c = self.simple2d_cube[::-1]
- r = iintrp.linear(c, [('dim1', 4)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_single_point.cml'), checksum=False)
- np.testing.assert_array_equal(r.data, np.array([1.5, 2.5, 3.5], dtype=self.simple2d_cube.data.dtype))
-
- def test_overspecified(self):
- self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube[0, :], [('dim1', 4)])
-
- def test_bounded_coordinate(self):
- # The results should be exactly the same as for the
- # non-bounded case.
- cube = self.simple2d_cube
- cube.coord('dim1').guess_bounds()
- r = iintrp.linear(cube, [('dim1', [4, 5])])
- np.testing.assert_array_equal(r.data, np.array([[ 1.5, 2.5, 3.5], [ 3., 4., 5. ]]))
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
-
- def test_simple_multiple_point(self):
- r = iintrp.linear(self.simple2d_cube, [('dim1', [4, 5])])
- np.testing.assert_array_equal(r.data, np.array([[ 1.5, 2.5, 3.5], [ 3., 4., 5. ]]))
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
-
- # Check that numpy arrays specifications work
- r = iintrp.linear(self.simple2d_cube, [('dim1', np.array([4, 5]))])
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
-
- def test_circular_vs_non_circular_coord(self):
- cube = self.simple2d_cube_circular
- other = iris.coords.AuxCoord([10, 6, 7, 4], long_name='other')
- cube.add_aux_coord(other, 1)
- samples = [0, 60, 300]
- r = iintrp.linear(cube, [('theta', samples)])
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'circular_vs_non_circular.cml'))
-
- def test_simple_multiple_points_circular(self):
- r = iintrp.linear(self.simple2d_cube_circular, [('theta', [0., 60., 120., 180.])])
- normalise_order(r)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points_circular.cml'))
-
- # check that the values returned by theta 0 & 360 are the same...
- r1 = iintrp.linear(self.simple2d_cube_circular, [('theta', 360)])
- r2 = iintrp.linear(self.simple2d_cube_circular, [('theta', 0)])
- np.testing.assert_array_almost_equal(r1.data, r2.data)
-
- def test_simple_multiple_coords(self):
- expected_result = np.array(2.5)
- r = iintrp.linear(self.simple2d_cube, [('dim1', 4), ('dim2', 3.5), ])
- np.testing.assert_array_equal(r.data, expected_result)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
-
- # Check that it doesn't matter if you do the interpolation in separate steps...
- r = iintrp.linear(self.simple2d_cube, [('dim2', 3.5)])
- r = iintrp.linear(r, [('dim1', 4)])
- np.testing.assert_array_equal(r.data, expected_result)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
-
- r = iintrp.linear(self.simple2d_cube, [('dim1', 4)])
- r = iintrp.linear(r, [('dim2', 3.5)])
- np.testing.assert_array_equal(r.data, expected_result)
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
-
- def test_coord_not_found(self):
- self.assertRaises(KeyError, iintrp.linear, self.simple2d_cube,
- [('non_existant_coord', [3.5, 3.25])])
-
- def test_simple_coord_error_extrapolation(self):
- self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='error')
-
- def test_simple_coord_linear_extrapolation(self):
- r = iintrp.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='linear')
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation.cml'))
-
- np.testing.assert_array_equal(r.data, np.array([-1., 2., 5., 8.], dtype=self.simple2d_cube.data.dtype))
-
- r = iintrp.linear(self.simple2d_cube, [('dim1', 1)])
- np.testing.assert_array_equal(r.data, np.array([-3., -2., -1.], dtype=self.simple2d_cube.data.dtype))
-
- def test_simple_coord_linear_extrapolation_multipoint1(self):
- r = iintrp.linear( self.simple2d_cube, [('dim1', [-1, 1, 10, 11])], extrapolation_mode='linear')
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation_multipoint1.cml'))
-
- def test_simple_coord_linear_extrapolation_multipoint2(self):
- r = iintrp.linear( self.simple2d_cube, [('dim1', [1, 10])], extrapolation_mode='linear')
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation_multipoint2.cml'))
-
- def test_simple_coord_nan_extrapolation(self):
- r = iintrp.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='nan')
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_nan_extrapolation.cml'))
-
- def test_multiple_coord_extrapolation(self):
- self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube, [('dim2', 2.5), ('dim1', 12.5)], extrapolation_mode='error')
-
- def test_multiple_coord_linear_extrapolation(self):
- r = iintrp.linear(self.simple2d_cube, [('dim2', 9), ('dim1', 1.5)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords_extrapolation.cml'))
-
- def test_lots_of_points(self):
- r = iintrp.linear(self.simple2d_cube, [('dim1', np.linspace(3, 9, 20))])
- # XXX Implement a test!?!
-
- def test_shared_axis(self):
- c = self.simple2d_cube_extended
- r = iintrp.linear(c, [('dim2', [3.5, 3.25])])
- normalise_order(r)
-
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_shared_axis.cml'))
-
- self.assertRaises(ValueError, iintrp.linear, c, [('dim2', [3.5, 3.25]), ('shared_x_coord', [9, 7])])
-
- def test_points_datatype_casting(self):
- # this test tries to extract a float from an array of type integer. the result should be of type float.
- r = iintrp.linear(self.simple2d_cube_extended, [('shared_x_coord', 7.5)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_casting_datatype.cml'))
-
-
-@tests.skip_data
-class TestNearestLinearInterpolRealData(tests.IrisTest):
- def setUp(self):
- file = tests.get_data_path(('PP', 'globClim1', 'theta.pp'))
- self.cube = iris.load_cube(file)
-
- def test_slice(self):
- r = iintrp.linear(self.cube, [('latitude', 0)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'real_2dslice.cml'))
-
- def test_2slices(self):
- r = iintrp.linear(self.cube, [('latitude', 0.0), ('longitude', 0.0)])
- self.assertCML(r, ('analysis', 'interpolation', 'linear', 'real_2slices.cml'))
-
- def test_circular(self):
- res = iintrp.linear(self.cube,
- [('longitude', 359.8)])
- normalise_order(res)
- lon_coord = self.cube.coord('longitude').points
- expected = self.cube.data[..., 0] + \
- ((self.cube.data[..., -1] - self.cube.data[..., 0]) *
- (((360 - 359.8) - lon_coord[0]) /
- ((360 - lon_coord[-1]) - lon_coord[0])))
- self.assertArrayAllClose(res.data, expected, rtol=1.0e-6)
-
- # check that the values returned by lon 0 & 360 are the same...
- r1 = iintrp.linear(self.cube, [('longitude', 360)])
- r2 = iintrp.linear(self.cube, [('longitude', 0)])
- np.testing.assert_array_equal(r1.data, r2.data)
-
- self.assertCML(res, ('analysis', 'interpolation', 'linear',
- 'real_circular_2dslice.cml'), checksum=False)
-
-
-class MixinNearestNeighbour(object):
- # Define standard tests for the three 'nearest_neighbour' routines.
- # Cast as a 'mixin' as it used to test both (a) the original routines and
- # (b) replacement operations to justify deprecation.
-
- def _common_setUp(self):
- self.cube = iris.tests.stock.global_pp()
- points = np.arange(self.cube.coord('latitude').shape[0], dtype=np.float32)
- coord_to_add = iris.coords.DimCoord(points, long_name='i', units='meters')
- self.cube.add_aux_coord(coord_to_add, 0)
-
- def test_nearest_neighbour(self):
- point_spec = [('latitude', 40), ('longitude', 39)]
-
- indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
- self.assertEqual(indices, (20, 10))
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
-
- # Check that the data has not been loaded on either the original cube,
- # nor the interpolated one.
- self.assertTrue(b.has_lazy_data())
- self.assertTrue(self.cube.has_lazy_data())
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude_longitude.cml'))
-
- value = iintrp.nearest_neighbour_data_value(self.cube, point_spec)
- self.assertEqual(value, np.array(285.98785, dtype=np.float32))
-
- # Check that the value back is that which was returned by the extract method
- self.assertEqual(value, b.data)
-
- def test_nearest_neighbour_slice(self):
- point_spec = [('latitude', 40)]
- indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
- self.assertEqual(indices, (20, slice(None, None)))
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude.cml'))
-
- # cannot get a specific point from these point specifications
- self.assertRaises(ValueError, iintrp.nearest_neighbour_data_value, self.cube, point_spec)
-
- def test_nearest_neighbour_over_specification_which_is_consistent(self):
- # latitude 40 is the 20th point
- point_spec = [('latitude', 40), ('i', 20), ('longitude', 38)]
-
- indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
- self.assertEqual(indices, (20, 10))
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude_longitude.cml'))
-
- value = iintrp.nearest_neighbour_data_value(self.cube, point_spec)
- # Check that the value back is that which was returned by the extract method
- self.assertEqual(value, b.data)
-
- def test_nearest_neighbour_over_specification_mis_aligned(self):
- # latitude 40 is the 20th point
- point_spec = [('latitude', 40), ('i', 10), ('longitude', 38)]
-
- # assert that we get a ValueError for over specifying our interpolation
- self.assertRaises(ValueError, iintrp.nearest_neighbour_data_value, self.cube, point_spec)
-
- def test_nearest_neighbour_bounded_simple(self):
- point_spec = [('latitude', 37), ('longitude', 38)]
-
- coord = self.cube.coord('latitude')
- coord.guess_bounds(0.5)
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_bounded.cml'))
-
- def test_nearest_neighbour_bounded_requested_midpoint(self):
- # This test checks the "point inside cell" logic
- point_spec = [('latitude', 38), ('longitude', 38)]
-
- coord = self.cube.coord('latitude')
- coord.guess_bounds(0.5)
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_bounded_mid_point.cml'))
-
- def test_nearest_neighbour_locator_style_coord(self):
- point_spec = [('latitude', 39)]
-
- b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
- self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude.cml'))
-
- def test_nearest_neighbour_circular(self):
- # test on non-circular coordinate (latitude)
- lat_vals = np.array([
- [-150.0, -90], [-97, -90], [-92, -90], [-91, -90], [-90.1, -90],
- [-90.0, -90], [-89.9, -90],
- [-89, -90], [-88, -87.5], [-87, -87.5],
- [-86, -85], [-85.5, -85],
- [81, 80], [84, 85], [84.8, 85], [85, 85], [86, 85],
- [87, 87.5], [88, 87.5], [89, 90],
- [89.9, 90], [90.0, 90], [90.1, 90],
- [95, 90], [100, 90], [150, 90]])
- lat_test_vals = lat_vals[:, 0]
- lat_expect_vals = lat_vals[:, 1]
- lat_coord_vals = self.cube.coord('latitude').points
-
- def near_value(val, vals):
- # return the *exact* value from vals that is closest to val.
- # - and raise an exception if there isn't a close match.
- best_val = vals[np.argmin(np.abs(vals - val))]
- if val == 0.0:
- # absolute tolerance to 0.0 (ok for magnitudes >= 1.0 or so)
- error_level = best_val
- else:
- # calculate relative-tolerance
- error_level = abs(0.5 * (val - best_val) / (val + best_val))
- self.assertTrue(error_level < 1.0e-6,
- 'error_level {}% match of {} to one of {}'.format(
- 100.0 * error_level, val, vals))
- return best_val
-
- lat_expect_vals = [near_value(v, lat_coord_vals)
- for v in lat_expect_vals]
- lat_nearest_inds = [
- iintrp.nearest_neighbour_indices(
- self.cube, [('latitude', point_val)])
- for point_val in lat_test_vals]
- lat_nearest_vals = [lat_coord_vals[i[0]] for i in lat_nearest_inds]
- self.assertArrayAlmostEqual(lat_nearest_vals, lat_expect_vals)
-
- # repeat with *circular* coordinate (longitude)
- lon_vals = np.array([
- [0.0, 0.0],
- [-3.75, 356.25],
- [-1.0, 0], [-0.01, 0], [0.5, 0],
- [2, 3.75], [3, 3.75], [4, 3.75], [5, 3.75], [6, 7.5],
- [350.5, 348.75], [351, 352.5], [354, 352.5],
- [355, 356.25], [358, 356.25],
- [358.7, 0], [359, 0], [360, 0], [361, 0],
- [362, 3.75], [363, 3.75], [364, 3.75], [365, 3.75], [366, 7.5],
- [-725.0, 356.25], [-722, 356.25], [-721, 0], [-719, 0.0],
- [-718, 3.75],
- [1234.56, 153.75], [-1234.56, 206.25]])
- lon_test_vals = lon_vals[:, 0]
- lon_expect_vals = lon_vals[:, 1]
- lon_coord_vals = self.cube.coord('longitude').points
- lon_expect_vals = [near_value(v, lon_coord_vals)
- for v in lon_expect_vals]
- lon_nearest_inds = [
- iintrp.nearest_neighbour_indices(self.cube,
- [('longitude', point_val)])
- for point_val in lon_test_vals]
- lon_nearest_vals = [lon_coord_vals[i[1]] for i in lon_nearest_inds]
- self.assertArrayAlmostEqual(lon_nearest_vals, lon_expect_vals)
-
-
-@tests.skip_data
-class TestNearestNeighbour(tests.IrisTest, MixinNearestNeighbour):
- def setUp(self):
- self._common_setUp()
-
-
-@tests.skip_data
-class TestNearestNeighbour__Equivalent(tests.IrisTest, MixinNearestNeighbour):
- # Class that repeats the tests of "TestNearestNeighbour", to check that the
- # behaviour of the three 'nearest_neighbour' routines in
- # iris.analysis.interpolation can be completely replicated with alternative
- # (newer) functionality.
-
- def setUp(self):
- self.patch(
- 'iris.analysis._interpolate_private.nearest_neighbour_indices',
- self._equivalent_nn_indices)
- self.patch(
- 'iris.analysis._interpolate_private.nearest_neighbour_data_value',
- self._equivalent_nn_data_value)
- self.patch(
- 'iris.analysis._interpolate_private.extract_nearest_neighbour',
- self._equivalent_extract_nn)
- self._common_setUp()
-
- @staticmethod
- def _equivalent_nn_indices(cube, sample_points,
- require_single_point=False):
- indices = [slice(None) for _ in cube.shape]
- for coord_spec, point in sample_points:
- coord = cube.coord(coord_spec)
- dim, = cube.coord_dims(coord) # expect only 1d --> single dim !
- dim_index = coord.nearest_neighbour_index(point)
- if require_single_point:
- # Mimic error behaviour of the original "data-value" function:
- # Any dim already addressed must get the same index.
- if indices[dim] != slice(None) and indices[dim] != dim_index:
- raise ValueError('indices over-specified')
- indices[dim] = dim_index
- if require_single_point:
- # Mimic error behaviour of the original "data-value" function:
- # All dims must have an index.
- if any(index == slice(None) for index in indices):
- raise ValueError('result expected to be a single point')
- return tuple(indices)
-
- @staticmethod
- def _equivalent_extract_nn(cube, sample_points):
- indices = TestNearestNeighbour__Equivalent._equivalent_nn_indices(
- cube, sample_points)
- new_cube = cube[indices]
- return new_cube
-
- @staticmethod
- def _equivalent_nn_data_value(cube, sample_points):
- indices = TestNearestNeighbour__Equivalent._equivalent_nn_indices(
- # for this routine only, enable extra index checks.
- cube, sample_points, require_single_point=True)
- return cube.data[indices]
-
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/test_pandas.py b/lib/iris/tests/test_pandas.py
index abe680b318..f259c7a147 100644
--- a/lib/iris/tests/test_pandas.py
+++ b/lib/iris/tests/test_pandas.py
@@ -299,6 +299,13 @@ def test_copy_masked_false(self):
with self.assertRaises(ValueError):
data_frame = iris.pandas.as_data_frame(cube, copy=False)
+ def test_copy_false_with_cube_view(self):
+ data = np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
+ cube = Cube(data[:], long_name="foo")
+ data_frame = iris.pandas.as_data_frame(cube, copy=False)
+ data_frame[0][0] = 99
+ self.assertEqual(cube.data[0, 0], 99)
+
@skip_pandas
class TestSeriesAsCube(tests.IrisTest):
diff --git a/lib/iris/tests/test_pp_module.py b/lib/iris/tests/test_pp_module.py
index fee4523de4..e3106f95e4 100644
--- a/lib/iris/tests/test_pp_module.py
+++ b/lib/iris/tests/test_pp_module.py
@@ -122,12 +122,10 @@ def test_lbtim_setter(self):
def test_lbproc_access(self):
# lbproc == 65539
with mock.patch('warnings.warn') as warn:
- self.assertEqual(self.pp.lbproc[0], 9)
- self.assertEqual(self.pp.lbproc[19], 0)
self.assertEqual(self.pp.lbproc.flag1, 1)
self.assertEqual(self.pp.lbproc.flag65536, 1)
self.assertEqual(self.pp.lbproc.flag131072, 0)
- self.assertEqual(warn.call_count, 5)
+ self.assertEqual(warn.call_count, 3)
def test_set_lbuser(self):
self.pp.stash = 'm02s12i003'
@@ -332,116 +330,7 @@ def test_save_single(self):
self.assertEqual(self.file_checksum(temp_filename), self.file_checksum(filepath))
os.remove(temp_filename)
-
-
-class TestBitwiseInt(tests.IrisTest):
-
- def test_3(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(3)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(t[0], 3)
- self.assertTrue(t.flag1)
- self.assertTrue(t.flag2)
- self.assertRaises(AttributeError, getattr, t, "flag1024")
-
- def test_setting_flags(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(3)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(t._value, 3)
-
- t.flag1 = False
- self.assertEqual(t._value, 2)
- t.flag2 = False
- self.assertEqual(t._value, 0)
-
- t.flag1 = True
- self.assertEqual(t._value, 1)
- t.flag2 = True
- self.assertEqual(t._value, 3)
-
- self.assertRaises(AttributeError, setattr, t, "flag1024", True)
- self.assertRaises(TypeError, setattr, t, "flag2", 1)
-
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(3, num_bits=11)
- self.assertEqual(warn.call_count, 1)
- t.flag1024 = True
- self.assertEqual(t._value, 1027)
-
- def test_standard_operators(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(323)
- self.assertEqual(warn.call_count, 1)
-
- self.assertTrue(t == 323)
- self.assertFalse(t == 324)
-
- self.assertFalse(t != 323)
- self.assertTrue(t != 324)
-
- self.assertTrue(t >= 323)
- self.assertFalse(t >= 324)
-
- self.assertFalse(t > 323)
- self.assertTrue(t > 322)
-
- self.assertTrue(t <= 323)
- self.assertFalse(t <= 322)
-
- self.assertFalse(t < 323)
- self.assertTrue(t < 324)
-
- self.assertTrue(t in [323])
- self.assertFalse(t in [324])
-
- def test_323(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(323)
- self.assertEqual(warn.call_count, 1)
- self.assertRaises(AttributeError, getattr, t, 'flag0')
-
- self.assertEqual(t.flag1, 1)
- self.assertEqual(t.flag2, 1)
- self.assertEqual(t.flag4, 0)
- self.assertEqual(t.flag8, 0)
- self.assertEqual(t.flag16, 0)
- self.assertEqual(t.flag32, 0)
- self.assertEqual(t.flag64, 1)
- self.assertEqual(t.flag128, 0)
- self.assertEqual(t.flag256, 1)
-
- def test_33214(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(33214)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(t[0], 4)
- self.assertEqual(t.flag1, 0)
- self.assertEqual(t.flag2, 1)
-
- def test_negative_number(self):
- with mock.patch('warnings.warn') as warn:
- try:
- _ = pp.BitwiseInt(-5)
- except ValueError as err:
- self.assertEqual(str(err), 'Negative numbers not supported with splittable integers object')
- self.assertEqual(warn.call_count, 1)
-
- def test_128(self):
- with mock.patch('warnings.warn') as warn:
- t = pp.BitwiseInt(128)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(t.flag1, 0)
- self.assertEqual(t.flag2, 0)
- self.assertEqual(t.flag4, 0)
- self.assertEqual(t.flag8, 0)
- self.assertEqual(t.flag16, 0)
- self.assertEqual(t.flag32, 0)
- self.assertEqual(t.flag64, 0)
- self.assertEqual(t.flag128, 1)
-
class TestSplittableInt(tests.IrisTest):
diff --git a/lib/iris/tests/test_pp_stash.py b/lib/iris/tests/test_pp_stash.py
index 6a9d87e076..303dc4cc7e 100644
--- a/lib/iris/tests/test_pp_stash.py
+++ b/lib/iris/tests/test_pp_stash.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
+# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -83,8 +83,10 @@ def test_irregular_stash_str(self):
self.assertEqual('m01s02i003', iris.fileformats.pp.STASH.from_msi('M01s02i003'))
def test_illegal_stash_str_range(self):
+
self.assertEqual(iris.fileformats.pp.STASH(0, 2, 3), 'm??s02i003')
self.assertNotEqual(iris.fileformats.pp.STASH(0, 2, 3), 'm01s02i003')
+
self.assertEqual('m??s02i003', iris.fileformats.pp.STASH(0, 2, 3))
self.assertNotEqual('m01s02i003', iris.fileformats.pp.STASH(0, 2, 3))
@@ -104,28 +106,30 @@ def test_illegal_stash_stash_range(self):
self.assertEqual(iris.fileformats.pp.STASH(100, 2, 3), iris.fileformats.pp.STASH(999, 2, 3))
def test_illegal_stash_format(self):
- with self.assertRaises(ValueError):
- self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), 'abc')
- with self.assertRaises(ValueError):
- self.assertEqual('abc', iris.fileformats.pp.STASH(1, 2, 3))
-
- with self.assertRaises(ValueError):
- self.assertEqual(iris.fileformats.pp.STASH(1, 2, 3), 'm01s02003')
- with self.assertRaises(ValueError):
- self.assertEqual('m01s02003', iris.fileformats.pp.STASH(1, 2, 3))
+ test_values = (
+ ('abc', (1, 2, 3)),
+ ('mlotstmin', (1, 2, 3)),
+ ('m01s02003', (1, 2, 3)))
+
+ for (test_value, reference) in test_values:
+ msg = 'Expected STASH code .* {!r}'.format(test_value)
+ with self.assertRaisesRegexp(ValueError, msg):
+ test_value == iris.fileformats.pp.STASH(*reference)
+ with self.assertRaisesRegexp(ValueError, msg):
+ iris.fileformats.pp.STASH(*reference) == test_value
def test_illegal_stash_type(self):
- with self.assertRaises(TypeError):
- self.assertEqual(iris.fileformats.pp.STASH.from_msi(102003), 'm01s02i003')
-
- with self.assertRaises(TypeError):
- self.assertEqual('m01s02i003', iris.fileformats.pp.STASH.from_msi(102003))
-
- with self.assertRaises(TypeError):
- self.assertEqual(iris.fileformats.pp.STASH.from_msi(['m01s02i003']), 'm01s02i003')
-
- with self.assertRaises(TypeError):
- self.assertEqual('m01s02i003', iris.fileformats.pp.STASH.from_msi(['m01s02i003']))
+ test_values = (
+ (102003, 'm01s02i003'),
+ (['m01s02i003'], 'm01s02i003'),
+ )
+
+ for (test_value, reference) in test_values:
+ msg = 'Expected STASH code .* {!r}'.format(test_value)
+ with self.assertRaisesRegexp(TypeError, msg):
+ iris.fileformats.pp.STASH.from_msi(test_value) == reference
+ with self.assertRaisesRegexp(TypeError, msg):
+ reference == iris.fileformats.pp.STASH.from_msi(test_value)
def test_stash_lbuser(self):
stash = iris.fileformats.pp.STASH(2, 32, 456)
diff --git a/lib/iris/tests/test_pp_to_cube.py b/lib/iris/tests/test_pp_to_cube.py
index 9c6b5ec9de..e8f8bdc766 100644
--- a/lib/iris/tests/test_pp_to_cube.py
+++ b/lib/iris/tests/test_pp_to_cube.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
+# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -25,7 +25,7 @@
import iris
import iris.fileformats.pp
-import iris.fileformats.pp_rules
+import iris.fileformats.pp_load_rules
import iris.fileformats.rules
import iris.io
import iris.util
@@ -41,7 +41,7 @@ def setUp(self):
def _field_to_cube(self, field):
cube, _, _ = iris.fileformats.rules._make_cube(
- field, iris.fileformats.pp_rules.convert)
+ field, iris.fileformats.pp_load_rules.convert)
return cube
def test_lbtim_2(self):
@@ -52,7 +52,7 @@ def test_lbtim_2(self):
cube = self._field_to_cube(field)
self.subcubes.append(cube)
cube = self.subcubes.merge()[0]
- self.assertCML(cube, ('pp_rules', 'lbtim_2.cml'))
+ self.assertCML(cube, ('pp_load_rules', 'lbtim_2.cml'))
def _ocean_depth(self, bounded=False):
lbuser = list(self.template.lbuser)
@@ -76,12 +76,12 @@ def _ocean_depth(self, bounded=False):
def test_ocean_depth(self):
self._ocean_depth()
cube = self.subcubes.merge()[0]
- self.assertCML(cube, ('pp_rules', 'ocean_depth.cml'))
+ self.assertCML(cube, ('pp_load_rules', 'ocean_depth.cml'))
def test_ocean_depth_bounded(self):
self._ocean_depth(bounded=True)
cube = self.subcubes.merge()[0]
- self.assertCML(cube, ('pp_rules', 'ocean_depth_bounded.cml'))
+ self.assertCML(cube, ('pp_load_rules', 'ocean_depth_bounded.cml'))
class TestReferences(tests.IrisTest):
@@ -133,11 +133,11 @@ def test_pp_load_rules(self):
# Test PP loading and rule evaluation.
cube = iris.tests.stock.simple_pp()
- self.assertCML(cube, ('pp_rules', 'global.cml'))
+ self.assertCML(cube, ('pp_load_rules', 'global.cml'))
data_path = tests.get_data_path(('PP', 'rotated_uk', 'rotated_uk.pp'))
cube = iris.load(data_path)[0]
- self.assertCML(cube, ('pp_rules', 'rotated_uk.cml'))
+ self.assertCML(cube, ('pp_load_rules', 'rotated_uk.cml'))
def test_lbproc(self):
data_path = tests.get_data_path(('PP', 'meanMaxMin', '200806081200__qwpb.T24.pp'))
@@ -145,7 +145,7 @@ def test_lbproc(self):
constraint = iris.Constraint('air_temperature', forecast_period=24)
cubes = iris.load(data_path, constraint)
cubes = iris.cube.CubeList([cubes[0], cubes[3], cubes[1], cubes[2], cubes[4]])
- self.assertCML(cubes, ('pp_rules', 'lbproc_mean_max_min.cml'))
+ self.assertCML(cubes, ('pp_load_rules', 'lbproc_mean_max_min.cml'))
def test_cell_methods(self):
# Test cell methods are created for correct values of lbproc
diff --git a/lib/iris/tests/test_regrid.py b/lib/iris/tests/test_regrid.py
deleted file mode 100644
index 67e82fae80..0000000000
--- a/lib/iris/tests/test_regrid.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# (C) British Crown Copyright 2010 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# import iris tests first so that some things can be initialised before importing anything else
-import iris.tests as tests
-
-import numpy as np
-
-import iris
-from iris import load_cube
-from iris.analysis._interpolate_private import regrid_to_max_resolution
-from iris.cube import Cube
-from iris.coords import DimCoord
-from iris.coord_systems import GeogCS
-
-
-@tests.skip_data
-class TestRegrid(tests.IrisTest):
- @staticmethod
- def patch_data(cube):
- # Workaround until regrid can handle factories
- for factory in cube.aux_factories:
- cube.remove_aux_factory(factory)
-
- # Remove coords that share lat/lon dimensions
- dim = cube.coord_dims(cube.coord('grid_longitude'))[0]
- for coord in cube.coords(contains_dimension=dim, dim_coords=False):
- cube.remove_coord(coord)
- dim = cube.coord_dims(cube.coord('grid_latitude'))[0]
- for coord in cube.coords(contains_dimension=dim, dim_coords=False):
- cube.remove_coord(coord)
-
- def setUp(self):
- self.theta_p_alt_path = tests.get_data_path(
- ('PP', 'COLPEX', 'small_colpex_theta_p_alt.pp'))
- self.theta_constraint = iris.Constraint('air_potential_temperature')
- self.airpress_constraint = iris.Constraint('air_pressure')
- self.level_constraint = iris.Constraint(model_level_number=1)
- self.multi_level_constraint = iris.Constraint(
- model_level_number=lambda c: 1 <= c < 6)
- self.forecast_constraint = iris.Constraint(
- forecast_period=lambda dt: 0.49 < dt < 0.51)
-
- def test_regrid_low_dimensional(self):
- theta = load_cube(
- self.theta_p_alt_path,
- (self.theta_constraint
- & self.level_constraint
- & self.forecast_constraint)
- )
- airpress = load_cube(
- self.theta_p_alt_path,
- (self.airpress_constraint
- & self.level_constraint
- & self.forecast_constraint)
- )
- TestRegrid.patch_data(theta)
- TestRegrid.patch_data(airpress)
-
- # 0-dimensional
- theta_0 = theta[0, 0]
- airpress_0 = airpress[0, 0]
- theta0_regridded = theta_0.regridded(airpress_0, mode='nearest')
- airpress0_regridded = airpress_0.regridded(theta_0, mode='nearest')
- self.assertEqual(theta_0, theta0_regridded)
- self.assertEqual(airpress_0, airpress0_regridded)
- self.assertCMLApproxData(
- theta0_regridded,
- ('regrid', 'theta_on_airpress_0d.cml'))
- self.assertCMLApproxData(
- airpress0_regridded,
- ('regrid', 'airpress_on_theta_0d.cml'))
-
- # 1-dimensional
- theta_1 = theta[0, 1:4]
- airpress_1 = airpress[0, 0:4]
- self.assertCMLApproxData(
- theta_1.regridded(airpress_1, mode='nearest'),
- ('regrid', 'theta_on_airpress_1d.cml'))
- self.assertCMLApproxData(
- airpress_1.regridded(theta_1, mode='nearest'),
- ('regrid', 'airpress_on_theta_1d.cml'))
-
- # 2-dimensional
- theta_2 = theta[1:3, 1:4]
- airpress_2 = airpress[0:4, 0:4]
- self.assertCMLApproxData(
- theta_2.regridded(airpress_2, mode='nearest'),
- ('regrid', 'theta_on_airpress_2d.cml'))
- self.assertCMLApproxData(
- airpress_2.regridded(theta_2, mode='nearest'),
- ('regrid', 'airpress_on_theta_2d.cml'))
-
- def test_regrid_3d(self):
- theta = load_cube(
- self.theta_p_alt_path,
- (self.theta_constraint
- & self.multi_level_constraint
- & self.forecast_constraint)
- )
- airpress = load_cube(
- self.theta_p_alt_path,
- (self.airpress_constraint
- & self.multi_level_constraint
- & self.forecast_constraint)
- )
- TestRegrid.patch_data(theta)
- TestRegrid.patch_data(airpress)
-
- theta_part = theta[:, 1:3, 1:4]
- airpress_part = airpress[:, 0:4, 0:4]
- self.assertCMLApproxData(
- theta_part.regridded(airpress_part, mode='nearest'),
- ('regrid', 'theta_on_airpress_3d.cml'))
- self.assertCMLApproxData(
- airpress_part.regridded(theta_part, mode='nearest'),
- ('regrid', 'airpress_on_theta_3d.cml'))
-
- def test_regrid_max_resolution(self):
- low = Cube(np.arange(12).reshape((3, 4)))
- cs = GeogCS(6371229)
- low.add_dim_coord(DimCoord(np.array([-1, 0, 1], dtype=np.int32), 'latitude', units='degrees', coord_system=cs), 0)
- low.add_dim_coord(DimCoord(np.array([-1, 0, 1, 2], dtype=np.int32), 'longitude', units='degrees', coord_system=cs), 1)
-
- med = Cube(np.arange(20).reshape((4, 5)))
- cs = GeogCS(6371229)
- med.add_dim_coord(DimCoord(np.array([-1, 0, 1, 2], dtype=np.int32), 'latitude', units='degrees', coord_system=cs), 0)
- med.add_dim_coord(DimCoord(np.array([-2, -1, 0, 1, 2], dtype=np.int32), 'longitude', units='degrees', coord_system=cs), 1)
-
- high = Cube(np.arange(30).reshape((5, 6)))
- cs = GeogCS(6371229)
- high.add_dim_coord(DimCoord(np.array([-2, -1, 0, 1, 2], dtype=np.int32), 'latitude', units='degrees', coord_system=cs), 0)
- high.add_dim_coord(DimCoord(np.array([-2, -1, 0, 1, 2, 3], dtype=np.int32), 'longitude', units='degrees', coord_system=cs), 1)
-
- cubes = regrid_to_max_resolution([low, med, high], mode='nearest')
- self.assertCMLApproxData(cubes, ('regrid', 'low_med_high.cml'))
-
-
-class TestRegridBilinear(tests.IrisTest):
- def setUp(self):
- self.cs = GeogCS(6371229)
-
- # Source cube candidate for regridding.
- cube = Cube(np.arange(12, dtype=np.float32).reshape(3, 4), long_name='unknown')
- cube.units = '1'
- cube.add_dim_coord(DimCoord(np.array([1, 2, 3]), 'latitude', units='degrees', coord_system=self.cs), 0)
- cube.add_dim_coord(DimCoord(np.array([1, 2, 3, 4]), 'longitude', units='degrees', coord_system=self.cs), 1)
- self.source = cube
-
- # Cube with a smaller grid in latitude and longitude than the source grid by taking the coordinate mid-points.
- cube = Cube(np.arange(6, dtype=np.float).reshape(2, 3))
- cube.units = '1'
- cube.add_dim_coord(DimCoord(np.array([1.5, 2.5]), 'latitude', units='degrees', coord_system=self.cs), 0)
- cube.add_dim_coord(DimCoord(np.array([1.5, 2.5, 3.5]), 'longitude', units='degrees', coord_system=self.cs), 1)
- self.smaller = cube
-
- # Cube with a larger grid in latitude and longitude than the source grid by taking the coordinate mid-points and extrapolating at extremes.
- cube = Cube(np.arange(20, dtype=np.float).reshape(4, 5))
- cube.units = '1'
- cube.add_dim_coord(DimCoord(np.array([0.5, 1.5, 2.5, 3.5]), 'latitude', units='degrees', coord_system=self.cs), 0)
- cube.add_dim_coord(DimCoord(np.array([0.5, 1.5, 2.5, 3.5, 4.5]), 'longitude', units='degrees', coord_system=self.cs), 1)
- self.larger = cube
-
- def test_bilinear_smaller_lon_left(self):
- # Anchor smaller grid from the first point in longitude and perform mid-point linear interpolation in latitude.
- self.smaller.coord('longitude').points = self.smaller.coord('longitude').points - 0.5
- self.assertCMLApproxData(self.source.regridded(self.smaller), ('regrid', 'bilinear_smaller_lon_align_left.cml'))
-
- def test_bilinear_smaller(self):
- # Perform mid-point bilinear interpolation over both latitude and longitude.
- self.assertCMLApproxData(self.source.regridded(self.smaller), ('regrid', 'bilinear_smaller.cml'))
-
- def test_bilinear_smaller_lon_right(self):
- # Anchor smaller grid from the last point in longitude and perform mid-point linear interpolation in latitude.
- self.smaller.coord('longitude').points = self.smaller.coord('longitude').points + 0.5
- self.assertCMLApproxData(self.source.regridded(self.smaller), ('regrid', 'bilinear_smaller_lon_align_right.cml'))
-
- def test_bilinear_larger_lon_left(self):
- # Extrapolate first point of longitude with others aligned to source grid, and perform linear interpolation with extrapolation over latitude.
- coord = iris.coords.DimCoord(np.array([0.5, 1, 2, 3, 4]), 'longitude', units='degrees', coord_system=self.cs)
- self.larger.remove_coord('longitude')
- self.larger.add_dim_coord(coord, 1)
- self.assertCMLApproxData(self.source.regridded(self.larger), ('regrid', 'bilinear_larger_lon_extrapolate_left.cml'))
-
- def test_bilinear_larger(self):
- # Perform mid-point bi-linear interpolation with extrapolation over latitude and longitude.
- self.assertCMLApproxData(self.source.regridded(self.larger), ('regrid', 'bilinear_larger.cml'))
-
- def test_bilinear_larger_lon_right(self):
- # Extrapolate last point of longitude with others aligned to source grid, and perform linear interpolation with extrapolation over latitude.
- coord = iris.coords.DimCoord(np.array([1, 2, 3, 4, 4.5]), 'longitude', units='degrees', coord_system=self.cs)
- self.larger.remove_coord('longitude')
- self.larger.add_dim_coord(coord, 1)
- self.assertCMLApproxData(self.source.regridded(self.larger), ('regrid', 'bilinear_larger_lon_extrapolate_right.cml'))
-
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/test_verbose_fileformat_rules_logging.py b/lib/iris/tests/test_verbose_fileformat_rules_logging.py
deleted file mode 100644
index 484115bb49..0000000000
--- a/lib/iris/tests/test_verbose_fileformat_rules_logging.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-
-"""
-Test of the verbose logging functionality for rules processing from
-:mod:`iris.fileformats.rules`
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# import iris tests first so that some things can be initialised before
-# importing anything else
-import iris.tests as tests
-
-import os
-
-import iris
-import iris.fileformats.pp
-import iris.config as config
-import iris.fileformats.rules as rules
-
-
-@tests.skip_data
-class TestVerboseLogging(tests.IrisTest):
- def test_verbose_logging(self):
- # check that verbose logging no longer breaks in pp.save()
- # load some data, enable logging, and save a cube to PP.
- data_path = tests.get_data_path(('PP', 'simple_pp', 'global.pp'))
- cube = iris.load_cube(data_path)
- rules.log = rules._prepare_rule_logger(verbose=True,
- log_dir='/var/tmp')
-
- # Test writing to a file handle to test that the logger uses the
- # handle name
- with self.temp_filename(suffix='.pp') as mysavefile:
- iris.save(cube, mysavefile)
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/unit/analysis/geometry/test_geometry_area_weights.py b/lib/iris/tests/unit/analysis/geometry/test_geometry_area_weights.py
index ff34d9566d..c96ce47c50 100644
--- a/lib/iris/tests/unit/analysis/geometry/test_geometry_area_weights.py
+++ b/lib/iris/tests/unit/analysis/geometry/test_geometry_area_weights.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -75,7 +75,7 @@ def test_distinct_xy(self):
lat = cube.coord('latitude')
lon.guess_bounds()
lat.guess_bounds()
- from iris.fileformats.rules import regular_step
+ from iris.util import regular_step
quarter = abs(regular_step(lon) * regular_step(lat) * 0.25)
half = abs(regular_step(lon) * regular_step(lat) * 0.5)
minx = 3.7499990463256836
@@ -100,7 +100,7 @@ def test_distinct_xy_bounds(self):
lat = cube.coord('latitude')
lon.guess_bounds()
lat.guess_bounds()
- from iris.fileformats.rules import regular_step
+ from iris.util import regular_step
quarter = abs(regular_step(lon) * regular_step(lat) * 0.25)
half = abs(regular_step(lon) * regular_step(lat) * 0.5)
full = abs(regular_step(lon) * regular_step(lat))
@@ -131,7 +131,7 @@ def test_distinct_xy_bounds_pole(self):
lat = cube.coord('latitude')
lon.guess_bounds()
lat.guess_bounds()
- from iris.fileformats.rules import regular_step
+ from iris.util import regular_step
quarter = abs(regular_step(lon) * regular_step(lat) * 0.25)
half = abs(regular_step(lon) * regular_step(lat) * 0.5)
minx = 3.7499990463256836
diff --git a/lib/iris/tests/unit/analysis/interpolate/test_linear.py b/lib/iris/tests/unit/analysis/interpolate/test_linear.py
deleted file mode 100644
index 6aaeb5aa5b..0000000000
--- a/lib/iris/tests/unit/analysis/interpolate/test_linear.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# (C) British Crown Copyright 2014 - 2017, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""Unit tests for the :func:`iris.analysis.interpolate.linear` function."""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import iris.tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-from collections import OrderedDict
-
-import numpy as np
-
-import iris
-from iris.analysis._interpolate_private import linear
-from iris.tests import mock
-import iris.tests.stock as stock
-
-
-class Test(tests.IrisTest):
- def setUp(self):
- self.cube = stock.simple_2d()
- self.extrapolation = 'extrapolation_mode'
- self.scheme = mock.Mock(name='linear scheme')
-
- @mock.patch('iris.analysis._interpolate_private.Linear',
- name='linear_patch')
- @mock.patch('iris.cube.Cube.interpolate', name='cube_interp_patch')
- def _assert_expected_call(self, sample_points, sample_points_call,
- cinterp_patch, linear_patch):
- linear_patch.return_value = self.scheme
- linear(self.cube, sample_points, self.extrapolation)
-
- linear_patch.assert_called_once_with(self.extrapolation)
-
- cinterp_patch.assert_called_once_with(sample_points_call, self.scheme)
-
- def test_sample_point_dict(self):
- # Passing sample_points in the form of a dictionary.
- sample_points_call = [('foo', 0.5), ('bar', 0.5)]
- sample_points = OrderedDict(sample_points_call)
- self._assert_expected_call(sample_points, sample_points_call)
-
- def test_sample_point_iterable(self):
- # Passing an interable sample_points object.
- sample_points = (('foo', 0.5), ('bar', 0.5))
- sample_points_call = sample_points
- self._assert_expected_call(sample_points, sample_points_call)
-
-
-@tests.skip_data
-class Test_masks(tests.IrisTest):
- def test_mask_retention(self):
- cube = stock.realistic_4d_w_missing_data()
- interp_cube = linear(cube, [('pressure', [850, 950])])
- self.assertIsInstance(interp_cube.data, np.ma.MaskedArray)
-
- # this value is masked in the input
- self.assertTrue(cube.data.mask[0, 2, 2, 0])
- # and is still masked in the output
- self.assertTrue(interp_cube.data.mask[0, 1, 2, 0])
-
-
-class TestNDCoords(tests.IrisTest):
- def setUp(self):
- cube = stock.simple_3d_w_multidim_coords()
- cube.add_dim_coord(iris.coords.DimCoord(np.arange(3), 'longitude'), 1)
- cube.add_dim_coord(iris.coords.DimCoord(np.arange(4), 'latitude'), 2)
- cube.data = cube.data.astype(np.float32)
- self.cube = cube
-
- def test_multi(self):
- # Testing interpolation on specified points on cube with
- # multidimensional coordinates.
- interp_cube = linear(self.cube, {'latitude': 1.5, 'longitude': 1.5})
- self.assertCMLApproxData(interp_cube, ('experimental', 'analysis',
- 'interpolate',
- 'linear_nd_2_coords.cml'))
-
- def test_single_extrapolation(self):
- # Interpolation on the 1d coordinate with extrapolation.
- interp_cube = linear(self.cube, {'wibble': np.float32(1.5)})
- expected = ('experimental', 'analysis', 'interpolate',
- 'linear_nd_with_extrapolation.cml')
- self.assertCMLApproxData(interp_cube, expected)
-
- def test_single(self):
- # Interpolation on the 1d coordinate.
- interp_cube = linear(self.cube, {'wibble': 20})
- self.assertArrayEqual(np.mean(self.cube.data, axis=0),
- interp_cube.data)
- self.assertCMLApproxData(interp_cube, ('experimental', 'analysis',
- 'interpolate', 'linear_nd.cml'))
-
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/unit/analysis/interpolate_private/test__nearest_neighbour_indices_ndcoords.py b/lib/iris/tests/unit/analysis/trajectory/test__nearest_neighbour_indices_ndcoords.py
similarity index 94%
rename from lib/iris/tests/unit/analysis/interpolate_private/test__nearest_neighbour_indices_ndcoords.py
rename to lib/iris/tests/unit/analysis/trajectory/test__nearest_neighbour_indices_ndcoords.py
index ae9f691a7f..45220b398a 100644
--- a/lib/iris/tests/unit/analysis/interpolate_private/test__nearest_neighbour_indices_ndcoords.py
+++ b/lib/iris/tests/unit/analysis/trajectory/test__nearest_neighbour_indices_ndcoords.py
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:meth:`iris.analysis._interpolate_private._nearest_neighbour_indices_ndcoords`.
+:meth:`iris.analysis.trajectory._nearest_neighbour_indices_ndcoords`.
"""
@@ -27,13 +27,12 @@
# importing anything else.
import iris.tests as tests
-import mock
import numpy as np
from iris.cube import Cube
from iris.coords import DimCoord, AuxCoord
-from iris.analysis._interpolate_private import \
+from iris.analysis.trajectory import \
_nearest_neighbour_indices_ndcoords as nn_ndinds
@@ -141,13 +140,9 @@ def test_sample_dictionary(self):
cube.add_aux_coord(co_y, 0)
cube.add_aux_coord(co_x, 1)
sample_point = {'x': 2.8, 'y': 18.5}
- warn_call = self.patch(
- 'iris.analysis._interpolate_private.warn_deprecated')
- result = nn_ndinds(cube, sample_point)
- self.assertEqual(result, [(1, 2)])
- self.assertEqual(warn_call.call_count, 1)
- self.assertIn('dictionary to specify points is deprecated',
- warn_call.call_args[0][0])
+ exp_emsg = 'must be a list of \(coordinate, value\) pairs'
+ with self.assertRaisesRegexp(TypeError, exp_emsg):
+ nn_ndinds(cube, sample_point)
class TestLatlon(tests.IrisTest):
diff --git a/lib/iris/tests/unit/coords/test_DimCoord.py b/lib/iris/tests/unit/coords/test_DimCoord.py
index a201f83110..010bb001e9 100644
--- a/lib/iris/tests/unit/coords/test_DimCoord.py
+++ b/lib/iris/tests/unit/coords/test_DimCoord.py
@@ -84,7 +84,7 @@ def test_fail_bounds_shape_mismatch(self):
bds_shape = list(self.bds_real.shape)
bds_shape[0] += 1
bds_wrong = np.zeros(bds_shape)
- msg = 'Bounds shape must be compatible with points shape'
+ msg = 'The shape of the bounds array should be'
with self.assertRaisesRegexp(ValueError, msg):
DimCoord(self.pts_real, bounds=bds_wrong)
@@ -397,10 +397,20 @@ def test_set_real(self):
def test_fail_bad_shape(self):
# Setting real points requires matching shape.
- coord = DimCoord([1.0, 2.0])
+ points = [1.0, 2.0]
+ coord = DimCoord(points)
msg = 'Require data with shape \(2,\), got \(3,\)'
with self.assertRaisesRegexp(ValueError, msg):
coord.points = np.array([1.0, 2.0, 3.0])
+ self.assertArrayEqual(coord.points, points)
+
+ def test_fail_not_monotonic(self):
+ # Setting real points requires that they are monotonic.
+ coord = DimCoord(self.pts_real, bounds=self.bds_real)
+ msg = 'strictly monotonic'
+ with self.assertRaisesRegexp(ValueError, msg):
+ coord.points = np.array([3.0, 1.0, 2.0])
+ self.assertArrayEqual(coord.points, self.pts_real)
def test_set_lazy(self):
# Setting new lazy points realises them.
@@ -441,9 +451,18 @@ def test_set_real(self):
def test_fail_bad_shape(self):
# Setting real points requires matching shape.
coord = DimCoord(self.pts_real, bounds=self.bds_real)
- msg = 'Bounds shape must be compatible with points shape'
+ msg = 'The shape of the bounds array should be'
with self.assertRaisesRegexp(ValueError, msg):
coord.bounds = np.array([1.0, 2.0, 3.0])
+ self.assertArrayEqual(coord.bounds, self.bds_real)
+
+ def test_fail_not_monotonic(self):
+ # Setting real bounds requires that they are monotonic.
+ coord = DimCoord(self.pts_real, bounds=self.bds_real)
+ msg = 'strictly monotonic'
+ with self.assertRaisesRegexp(ValueError, msg):
+ coord.bounds = np.array([[3.0, 2.0], [1.0, 0.0], [2.0, 1.0]])
+ self.assertArrayEqual(coord.bounds, self.bds_real)
def test_set_lazy(self):
# Setting new lazy bounds realises them.
diff --git a/lib/iris/tests/unit/experimental/fieldsfile/__init__.py b/lib/iris/tests/unit/experimental/fieldsfile/__init__.py
deleted file mode 100644
index 8513b08fdb..0000000000
--- a/lib/iris/tests/unit/experimental/fieldsfile/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""Unit tests for :mod:`iris.experimental.fieldsfile`."""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py b/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py
deleted file mode 100644
index 27288ded7f..0000000000
--- a/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""
-Unit tests for :class:`iris.fileformat.ff`.
-
-The real functional tests now all target :class:`iris.fileformat._ff`.
-This is just here to check that the one is a clean interface to the other.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# Import iris.tests first so that some things can be initialised before
-# importing anything else.
-import iris.tests as tests
-
-import mock
-import warnings
-
-with warnings.catch_warnings():
- # Also suppress invalid units warnings from the Iris loader code.
- warnings.filterwarnings("ignore")
- import iris.fileformats.ff as ff
-
-
-# A global used to catch call information in our mock constructor calls.
-# Used because, as patches, those calls don't receive the test context
-# (i.e. the call gets a different 'self').
-constructor_calls_data = []
-
-
-class Mixin_ConstructorTest(object):
- # A test mixin for the 'ff' wrapper subclasses creation calls.
- # For each one, patch out the __init__ of the target class in '_ff', and
- # check that the ff class constructor correctly calls this with both
- # unnamed args and keywords.
-
- # Name of the target class (in 'ff'): **inheritor defines**.
- target_class_name = 'name_of_a_class_in_ff'
-
- # Constructor function replacement: **inheritor defines**.
- # @staticmethod
- # def dummy_constructor_call(self):
- # pass
- # N.B. do *not* actually define one here, as oddly this affects the call
- # properties of the overriding methods ?
-
- def constructor_setup(self):
- """
- A 'setUp' helper method.
-
- Only defined as a separate method because tests can't inherit an
- *actual* 'setUp' method, for some reason.
-
- """
- # Reset the constructor calls data log.
- global constructor_calls_data
- constructor_calls_data = []
-
- # Define an import string for the __init__ of the corresponding class
- # in '_ff', where the real implementation is.
- tgt_fmt = 'iris.fileformats._ff.{}.__init__'
- patch_target_name = tgt_fmt.format(self.target_class_name)
-
- # Patch the implementation class with the replacement 'dummy'
- # constructor call, to record usage.
- self.patch(patch_target_name, self.dummy_constructor_call)
-
- def check_call(self, target_args, target_keys, expected_result):
- """
- Test instantiation of the target class.
-
- Call with given args and kwargs, and check that the parent class
- (in _ff) got the expected call.
-
- """
- # Invoke the main target while blocking warnings.
- # The parent class is already patched, by the setUp operation.
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore")
- test_class = getattr(ff, self.target_class_name)
- result = test_class(*target_args, **target_keys)
-
- # Check we returned the right type of thing.
- self.assertIsInstance(result, test_class)
-
- # Check the call args that the parent constructor call received.
- # Note: as it is a list, this also ensures we only got *one* call.
- global constructor_calls_data
- self.assertEqual(constructor_calls_data, [expected_result])
-
-
-class Mixin_Grid_Tests(object):
- # A mixin with tests for the 'Grid' derived classes.
-
- @staticmethod
- def dummy_constructor_call(self, column_dependent_constants,
- row_dependent_constants,
- real_constants, horiz_grid_type):
- # A replacement for the 'real' constructor call in the parent class.
- # Used to check for correct args and kwargs in the call.
- # Just record the call arguments in a global, for testing.
- global constructor_calls_data
- # It's global because in use, we do not have the right 'self' here.
- # Note: as we append, it contains a full call history.
- constructor_calls_data += [(column_dependent_constants,
- row_dependent_constants,
- real_constants,
- horiz_grid_type)]
-
- def test__basic(self):
- # Call with four unnamed args.
- self.check_call(['cdc', 'rdc', 'rc', 'ht'], {},
- expected_result=('cdc', 'rdc', 'rc', 'ht'))
-
- def test__all_named_keys(self):
- # Call with four named keys.
- kwargs = {'column_dependent_constants': 1,
- 'row_dependent_constants': 2,
- 'real_constants': 3,
- 'horiz_grid_type': 4}
- self.check_call([], kwargs, expected_result=(1, 2, 3, 4))
-
- def test__badargs(self):
- # Make sure we can catch a bad number of arguments.
- with self.assertRaises(TypeError):
- self.check_call([], {})
-
- def test__badkey(self):
- # Make sure we can catch an unrecognised keyword.
- with self.assertRaises(TypeError):
- self.check_call([], {'_bad_key': 1})
-
-
-class Test_Grid(Mixin_ConstructorTest, Mixin_Grid_Tests, tests.IrisTest):
- target_class_name = 'Grid'
-
- def setUp(self):
- self.constructor_setup()
-
-
-class Test_ArakawaC(Mixin_ConstructorTest, Mixin_Grid_Tests, tests.IrisTest):
- target_class_name = 'ArakawaC'
-
- def setUp(self):
- self.constructor_setup()
-
-
-class Test_ENDGame(Mixin_ConstructorTest, Mixin_Grid_Tests, tests.IrisTest):
- target_class_name = 'ENDGame'
-
- def setUp(self):
- self.constructor_setup()
-
-
-class Test_NewDynamics(Mixin_ConstructorTest, Mixin_Grid_Tests,
- tests.IrisTest):
- target_class_name = 'NewDynamics'
-
- def setUp(self):
- self.constructor_setup()
-
-
-class Test_FFHeader(Mixin_ConstructorTest, tests.IrisTest):
- target_class_name = 'FFHeader'
-
- @staticmethod
- def dummy_constructor_call(self, filename, word_depth=12345):
- # A replacement for the 'real' constructor call in the parent class.
- # Used to check for correct args and kwargs in the call.
- # Just record the call arguments in a global, for testing.
- global constructor_calls_data
- # It's global because in use, we do not have the right 'self' here.
- # Note: as we append, it contains a full call history.
- constructor_calls_data += [(filename, word_depth)]
-
- def setUp(self):
- self.constructor_setup()
-
- def test__basic(self):
- # Call with just a filename.
- # NOTE: this ignores "our" constructor word_depth default, as the
- # default is now re-implemented in the wrapper class definition.
- self.check_call(['filename'], {},
- expected_result=('filename', 12345))
-
- def test__word_depth(self):
- # Call with a word-depth.
- self.check_call(['filename'], {'word_depth': 4},
- expected_result=('filename', 4))
-
- def test__badargs(self):
- # Make sure we can catch a bad number of arguments.
- with self.assertRaises(TypeError):
- self.check_call([], {})
-
- def test__badkey(self):
- # Make sure we can catch an unrecognised keyword.
- with self.assertRaises(TypeError):
- self.check_call([], {'_bad_key': 1})
-
-
-class Test_FF2PP(Mixin_ConstructorTest, tests.IrisTest):
- target_class_name = 'FF2PP'
-
- @staticmethod
- def dummy_constructor_call(self, filename, read_data=False,
- word_depth=12345):
- # A replacement for the 'real' constructor call in the parent class.
- # Used to check for correct args and kwargs in the call.
- # Just record the call arguments in a global, for testing.
- global constructor_calls_data
- # It's global because in use, we do not have the right 'self' here.
- # Note: as we append, it contains a full call history.
- constructor_calls_data += [(filename, read_data, word_depth)]
-
- def setUp(self):
- self.constructor_setup()
-
- def test__basic(self):
- # Call with just a filename.
- # NOTE: this ignores "our" constructor word_depth default, as the
- # default is now re-implemented in the wrapper class definition.
- self.check_call(['filename'], {},
- expected_result=('filename', False, 12345))
-
- def test__read_data(self):
- # Call with a word-depth.
- self.check_call(['filename', True], {},
- expected_result=('filename', True, 12345))
-
- def test__word_depth(self):
- # Call with a word-depth keyword.
- self.check_call(['filename'], {'word_depth': 4},
- expected_result=('filename', False, 4))
-
- def test__badargs(self):
- # Make sure we can catch a bad number of arguments.
- with self.assertRaises(TypeError):
- self.check_call([], {})
-
- def test__badkey(self):
- # Make sure we can catch an unrecognised keyword.
- with self.assertRaises(TypeError):
- self.check_call([], {'_bad_key': 1})
-
-
-if __name__ == "__main__":
- tests.main()
diff --git a/lib/iris/tests/unit/fileformats/grib/test_load_cubes.py b/lib/iris/tests/unit/fileformats/grib/test_load_cubes.py
index da5748ab44..eb39d79dbf 100644
--- a/lib/iris/tests/unit/fileformats/grib/test_load_cubes.py
+++ b/lib/iris/tests/unit/fileformats/grib/test_load_cubes.py
@@ -44,7 +44,7 @@ def test(self):
rules_load.return_value = expected_result
result = load_cubes(files, callback)
kwargs = {}
- loader = Loader(generator, kwargs, converter, None)
+ loader = Loader(generator, kwargs, converter)
rules_load.assert_called_once_with(files, callback, loader)
self.assertIs(result, expected_result)
diff --git a/lib/iris/tests/unit/fileformats/pp/test__LBProc.py b/lib/iris/tests/unit/fileformats/pp/test__LBProc.py
index 64da14ce23..18670874d5 100644
--- a/lib/iris/tests/unit/fileformats/pp/test__LBProc.py
+++ b/lib/iris/tests/unit/fileformats/pp/test__LBProc.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -210,15 +210,6 @@ def test(self):
self.assertEqual(int(lbproc), 13)
-class Test_flags(tests.IrisTest):
- def test(self):
- lbproc = _LBProc(26)
- with mock.patch('warnings.warn') as warn:
- flags = lbproc.flags
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(flags, (2, 8, 16))
-
-
class Test___repr__(tests.IrisTest):
def test(self):
lbproc = _LBProc(8641)
@@ -231,102 +222,5 @@ def test(self):
self.assertEqual(str(lbproc), '8641')
-class Test___len__(tests.IrisTest):
- def test_zero(self):
- lbproc = _LBProc(0)
- with mock.patch('warnings.warn') as warn:
- length = len(lbproc)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(length, 1)
-
- def test_positive(self):
- lbproc = _LBProc(24)
- with mock.patch('warnings.warn') as warn:
- length = len(lbproc)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(length, 2)
-
-
-class Test___getitem__(tests.IrisTest):
- def test_normal_scalar(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- digit = lbproc[1]
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(digit, 3)
-
- def test_out_of_bounds_scalar(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- digit = lbproc[45]
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(digit, 0)
-
- def test_single_digit_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- digit = lbproc[1:2]
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(digit, 3)
-
- def test_double_digit_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- digit = lbproc[1:3]
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(digit, 23)
-
- def test_out_of_bounds_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- digit = lbproc[10:]
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(digit, 0)
-
-
-class Test___setitem__(tests.IrisTest):
- def test_ok(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- lbproc[1] = 9
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(int(lbproc), 1294)
-
- def test_invalid(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- with self.assertRaises(ValueError):
- lbproc[1] = 81
- self.assertEqual(warn.call_count, 1)
-
- def test_out_of_bounds_scalar(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- lbproc[9] = 4
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(lbproc, 4000001234)
-
- def test_single_digit_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- lbproc[1:2] = 6
- self.assertEqual(warn.call_count, 3)
- self.assertEqual(lbproc, 1264)
-
- def test_double_digit_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- lbproc[1:3] = 65
- self.assertEqual(warn.call_count, 4)
- self.assertEqual(lbproc, 1654)
-
- def test_out_of_bounds_slice(self):
- lbproc = _LBProc(1234)
- with mock.patch('warnings.warn') as warn:
- lbproc[6:] = 49
- self.assertEqual(warn.call_count, 4)
- self.assertEqual(lbproc, 49001234)
-
-
if __name__ == '__main__':
tests.main()
diff --git a/lib/iris/tests/unit/fileformats/pp/test_save.py b/lib/iris/tests/unit/fileformats/pp/test_save.py
index 4246834d9d..0784132915 100644
--- a/lib/iris/tests/unit/fileformats/pp/test_save.py
+++ b/lib/iris/tests/unit/fileformats/pp/test_save.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -26,6 +26,7 @@
from iris.coords import DimCoord, CellMethod
from iris.fileformats._ff_cross_references import STASH_TRANS
import iris.fileformats.pp as pp
+from iris.fileformats.pp_save_rules import _lbproc_rules
from iris.tests import mock
import iris.tests.stock as stock
@@ -162,26 +163,38 @@ def test_um_version(self):
class Test_Save__LbprocProduction(tests.IrisTest):
+ # This test class is a little different to the others.
+ # If it called `pp.save` via `_pp_save_ppfield_values` it would run
+ # `pp_save_rules.verify` and run all the save rules. As this class uses
+ # a 3D cube with a time coord it would run the time rules, which would fail
+ # because the mock object does not set up the `pp.lbtim` attribute
+ # correctly (i.e. as a `SplittableInt` object).
+ # To work around this we call the lbproc rules directly here.
+
def setUp(self):
self.cube = stock.realistic_3d()
+ self.pp_field = mock.MagicMock(spec=pp.PPField3)
+ self.pp_field.HEADER_DEFN = pp.PPField3.HEADER_DEFN
+ self.patch('iris.fileformats.pp.PPField3',
+ return_value=self.pp_field)
def test_no_cell_methods(self):
- lbproc = _pp_save_ppfield_values(self.cube).lbproc
+ lbproc = _lbproc_rules(self.cube, self.pp_field).lbproc
self.assertEqual(lbproc, 0)
def test_mean(self):
self.cube.cell_methods = (CellMethod('mean', 'time', '1 hour'),)
- lbproc = _pp_save_ppfield_values(self.cube).lbproc
+ lbproc = _lbproc_rules(self.cube, self.pp_field).lbproc
self.assertEqual(lbproc, 128)
def test_minimum(self):
self.cube.cell_methods = (CellMethod('minimum', 'time', '1 hour'),)
- lbproc = _pp_save_ppfield_values(self.cube).lbproc
+ lbproc = _lbproc_rules(self.cube, self.pp_field).lbproc
self.assertEqual(lbproc, 4096)
def test_maximum(self):
self.cube.cell_methods = (CellMethod('maximum', 'time', '1 hour'),)
- lbproc = _pp_save_ppfield_values(self.cube).lbproc
+ lbproc = _lbproc_rules(self.cube, self.pp_field).lbproc
self.assertEqual(lbproc, 8192)
diff --git a/lib/iris/tests/analysis/__init__.py b/lib/iris/tests/unit/fileformats/pp_load_rules/__init__.py
similarity index 92%
rename from lib/iris/tests/analysis/__init__.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/__init__.py
index 8b52364e6a..b102ddcd0f 100644
--- a/lib/iris/tests/analysis/__init__.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/__init__.py
@@ -14,10 +14,7 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""
-Package for testing the iris.analysis package.
-
-"""
+"""Unit tests for the :mod:`iris.fileformats.pp_load_rules` module."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__all_other_rules.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__all_other_rules.py
similarity index 97%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__all_other_rules.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__all_other_rules.py
index 5737f2580c..78b9dfa816 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__all_other_rules.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__all_other_rules.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,10 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for the `iris.fileformats.pp_rules._all_other_rules` function."""
+"""
+Unit tests for the `iris.fileformats.pp_load_rules._all_other_rules` function.
+
+"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
@@ -29,14 +32,14 @@
import cartopy.crs as ccrs
import iris
-from iris.fileformats.pp_rules import _all_other_rules
+from iris.fileformats.pp_load_rules import _all_other_rules
from iris.fileformats.pp import SplittableInt
from iris.coords import CellMethod, DimCoord, AuxCoord
from iris.tests import mock
from iris.tests.unit.fileformats import TestField
-# iris.fileformats.pp_rules._all_other_rules() returns a tuple of
+# iris.fileformats.pp_load_rules._all_other_rules() returns a tuple of
# of various metadata. This constant is the index into this
# tuple to obtain the cell methods.
CELL_METHODS_INDEX = 5
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__collapse_degenerate_points_and_bounds.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py
similarity index 94%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__collapse_degenerate_points_and_bounds.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py
index f032b2b8cf..4417182dec 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__collapse_degenerate_points_and_bounds.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__collapse_degenerate_points_and_bounds.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._collapse_degenerate_points_and_bounds`.
+:func:`iris.fileformats.pp_load_rules._collapse_degenerate_points_and_bounds`.
"""
@@ -29,7 +29,8 @@
import numpy as np
-from iris.fileformats.pp_rules import _collapse_degenerate_points_and_bounds
+from iris.fileformats.pp_load_rules import \
+ _collapse_degenerate_points_and_bounds
class Test(tests.IrisTest):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_pseudo_level_coords.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_pseudo_level_coords.py
similarity index 87%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_pseudo_level_coords.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_pseudo_level_coords.py
index b876574667..83140476a3 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_pseudo_level_coords.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_pseudo_level_coords.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._convert_pseudo_level_coords`.
+:func:`iris.fileformats.pp_load_rules._convert_pseudo_level_coords`.
"""
@@ -30,7 +30,7 @@
from iris.coords import DimCoord
from iris.tests.unit.fileformats import TestField
-from iris.fileformats.pp_rules import _convert_scalar_pseudo_level_coords
+from iris.fileformats.pp_load_rules import _convert_scalar_pseudo_level_coords
class Test(TestField):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_realization_coords.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_realization_coords.py
similarity index 87%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_realization_coords.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_realization_coords.py
index acf8038b3c..5f6f4a4324 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_scalar_realization_coords.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_scalar_realization_coords.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._convert_scalar_realization_coords`.
+:func:`iris.fileformats.pp_load_rules._convert_scalar_realization_coords`.
"""
@@ -30,7 +30,7 @@
from iris.coords import DimCoord
from iris.tests.unit.fileformats import TestField
-from iris.fileformats.pp_rules import _convert_scalar_realization_coords
+from iris.fileformats.pp_load_rules import _convert_scalar_realization_coords
class Test(TestField):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_time_coords.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_time_coords.py
similarity index 99%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__convert_time_coords.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_time_coords.py
index 0944064530..f531a7a593 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_time_coords.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_time_coords.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2016, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._convert_time_coords`.
+:func:`iris.fileformats.pp_load_rules._convert_time_coords`.
"""
@@ -33,7 +33,7 @@
from iris.coords import DimCoord, AuxCoord
from iris.fileformats.pp import SplittableInt
-from iris.fileformats.pp_rules import _convert_time_coords
+from iris.fileformats.pp_load_rules import _convert_time_coords
from iris.tests import mock
from iris.tests.unit.fileformats import TestField
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_vertical_coords.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_vertical_coords.py
similarity index 99%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__convert_vertical_coords.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_vertical_coords.py
index e1612c4bbc..03fb30f8ba 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__convert_vertical_coords.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__convert_vertical_coords.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._convert_vertical_coords`.
+:func:`iris.fileformats.pp_load_rules._convert_vertical_coords`.
"""
@@ -32,7 +32,7 @@
from iris.coords import DimCoord, AuxCoord
from iris.aux_factory import HybridPressureFactory, HybridHeightFactory
from iris.fileformats.pp import SplittableInt, STASH
-from iris.fileformats.pp_rules import Reference, _convert_vertical_coords
+from iris.fileformats.pp_load_rules import Reference, _convert_vertical_coords
from iris.tests.unit.fileformats import TestField
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__dim_or_aux.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__dim_or_aux.py
similarity index 91%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__dim_or_aux.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__dim_or_aux.py
index 7cbe05d620..3eaa77026e 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__dim_or_aux.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__dim_or_aux.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for :func:`iris.fileformats.pp_rules._dim_or_aux`."""
+"""Unit tests for :func:`iris.fileformats.pp_load_rules._dim_or_aux`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
@@ -24,7 +24,7 @@
import iris.tests as tests
from iris.coords import DimCoord, AuxCoord
-from iris.fileformats.pp_rules import _dim_or_aux
+from iris.fileformats.pp_load_rules import _dim_or_aux
class Test(tests.IrisTest):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__model_level_number.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__model_level_number.py
similarity index 85%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__model_level_number.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__model_level_number.py
index 37de979c94..3970eff1df 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__model_level_number.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__model_level_number.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,10 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for :func:`iris.fileformats.pp_rules._model_level_number`."""
+"""
+Unit tests for :func:`iris.fileformats.pp_load_rules._model_level_number`.
+
+"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
@@ -23,7 +26,7 @@
# importing anything else.
import iris.tests as tests
-from iris.fileformats.pp_rules import _model_level_number
+from iris.fileformats.pp_load_rules import _model_level_number
class Test_9999(tests.IrisTest):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__reduced_points_and_bounds.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__reduced_points_and_bounds.py
similarity index 95%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__reduced_points_and_bounds.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__reduced_points_and_bounds.py
index a2c598afce..85ce2f7217 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__reduced_points_and_bounds.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__reduced_points_and_bounds.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._reduce_points_and_bounds`.
+:func:`iris.fileformats.pp_load_rules._reduce_points_and_bounds`.
"""
@@ -29,7 +29,7 @@
import numpy as np
-from iris.fileformats.pp_rules import _reduce_points_and_bounds
+from iris.fileformats.pp_load_rules import _reduce_points_and_bounds
class Test(tests.IrisTest):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test__reshape_vector_args.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test__reshape_vector_args.py
similarity index 96%
rename from lib/iris/tests/unit/fileformats/pp_rules/test__reshape_vector_args.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test__reshape_vector_args.py
index 8f81135490..35bbce376b 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test__reshape_vector_args.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test__reshape_vector_args.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -16,7 +16,7 @@
# along with Iris. If not, see .
"""
Unit tests for
-:func:`iris.fileformats.pp_rules._reshape_vector_args`.
+:func:`iris.fileformats.pp_load_rules._reshape_vector_args`.
"""
@@ -29,7 +29,7 @@
import numpy as np
-from iris.fileformats.pp_rules import _reshape_vector_args
+from iris.fileformats.pp_load_rules import _reshape_vector_args
class TestEmpty(tests.IrisTest):
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/test_convert.py b/lib/iris/tests/unit/fileformats/pp_load_rules/test_convert.py
similarity index 98%
rename from lib/iris/tests/unit/fileformats/pp_rules/test_convert.py
rename to lib/iris/tests/unit/fileformats/pp_load_rules/test_convert.py
index 8374d0be76..aa406c8c4b 100644
--- a/lib/iris/tests/unit/fileformats/pp_rules/test_convert.py
+++ b/lib/iris/tests/unit/fileformats/pp_load_rules/test_convert.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2013 - 2016, Met Office
+# (C) British Crown Copyright 2013 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for :func:`iris.fileformats.pp_rules.convert`."""
+"""Unit tests for :func:`iris.fileformats.pp_load_rules.convert`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
@@ -30,7 +30,7 @@
import cf_units
import numpy as np
-from iris.fileformats.pp_rules import convert
+from iris.fileformats.pp_load_rules import convert
from iris.util import guess_coord_axis
from iris.fileformats.pp import SplittableInt
from iris.fileformats.pp import STASH
diff --git a/lib/iris/tests/unit/fileformats/pp_rules/__init__.py b/lib/iris/tests/unit/fileformats/pp_rules/__init__.py
deleted file mode 100644
index 8a4714b9e6..0000000000
--- a/lib/iris/tests/unit/fileformats/pp_rules/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# (C) British Crown Copyright 2013 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""Unit tests for the :mod:`iris.fileformats.pp_rules` module."""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/unit/fileformats/rules/test_Loader.py b/lib/iris/tests/unit/fileformats/rules/test_Loader.py
index 63a69b31e9..a8c3d6b854 100644
--- a/lib/iris/tests/unit/fileformats/rules/test_Loader.py
+++ b/lib/iris/tests/unit/fileformats/rules/test_Loader.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2015, Met Office
+# (C) British Crown Copyright 2015 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -39,34 +39,17 @@ def test_normal(self):
self.assertIs(loader.field_generator_kwargs,
mock.sentinel.GEN_FUNC_KWARGS)
self.assertIs(loader.converter, mock.sentinel.CONVERTER)
- self.assertIs(loader.legacy_custom_rules, None)
def test_normal_with_explicit_none(self):
with mock.patch('warnings.warn') as warn:
loader = Loader(mock.sentinel.GEN_FUNC,
mock.sentinel.GEN_FUNC_KWARGS,
- mock.sentinel.CONVERTER, None)
+ mock.sentinel.CONVERTER)
self.assertEqual(warn.call_count, 0)
self.assertIs(loader.field_generator, mock.sentinel.GEN_FUNC)
self.assertIs(loader.field_generator_kwargs,
mock.sentinel.GEN_FUNC_KWARGS)
self.assertIs(loader.converter, mock.sentinel.CONVERTER)
- self.assertIs(loader.legacy_custom_rules, None)
-
- def test_deprecated_custom_rules(self):
- with mock.patch('warnings.warn') as warn:
- loader = Loader(mock.sentinel.GEN_FUNC,
- mock.sentinel.GEN_FUNC_KWARGS,
- mock.sentinel.CONVERTER,
- mock.sentinel.CUSTOM_RULES)
- self.assertEqual(warn.call_count, 1)
- self.assertEqual(warn.call_args[0][0],
- 'The `legacy_custom_rules` attribute is deprecated.')
- self.assertIs(loader.field_generator, mock.sentinel.GEN_FUNC)
- self.assertIs(loader.field_generator_kwargs,
- mock.sentinel.GEN_FUNC_KWARGS)
- self.assertIs(loader.converter, mock.sentinel.CONVERTER)
- self.assertIs(loader.legacy_custom_rules, mock.sentinel.CUSTOM_RULES)
if __name__ == '__main__':
diff --git a/lib/iris/tests/unit/fileformats/test_rules.py b/lib/iris/tests/unit/fileformats/test_rules.py
index 0831ff959f..39f6ab35c6 100644
--- a/lib/iris/tests/unit/fileformats/test_rules.py
+++ b/lib/iris/tests/unit/fileformats/test_rules.py
@@ -129,7 +129,7 @@ def converter(field):
return ConversionMetadata([factory], [], '', '', '', {}, [], [],
[])
# Finish by making a fake Loader
- fake_loader = Loader(field_generator, {}, converter, None)
+ fake_loader = Loader(field_generator, {}, converter)
cubes = load_cubes(['fake_filename'], None, fake_loader)
# Check the result is a generator with a single entry.
@@ -210,7 +210,7 @@ def converter(field):
src.cell_methods, dim_coords_and_dims,
aux_coords_and_dims)
# Finish by making a fake Loader
- fake_loader = Loader(field_generator, {}, converter, None)
+ fake_loader = Loader(field_generator, {}, converter)
cubes = load_cubes(['fake_filename'], None, fake_loader)
# Check the result is a generator containing two Cubes.
diff --git a/lib/iris/tests/unit/analysis/interpolate/__init__.py b/lib/iris/tests/unit/fileformats/um/fast_load/__init__.py
similarity index 86%
rename from lib/iris/tests/unit/analysis/interpolate/__init__.py
rename to lib/iris/tests/unit/fileformats/um/fast_load/__init__.py
index a78d155387..451e19c8d8 100644
--- a/lib/iris/tests/unit/analysis/interpolate/__init__.py
+++ b/lib/iris/tests/unit/fileformats/um/fast_load/__init__.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,10 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for the :mod:`iris.analysis.interpolate` package."""
+"""
+Unit tests for the module :mod:`iris.fileformats.um._fast_load`.
+
+"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/unit/experimental/fieldsfile/test__convert_collation.py b/lib/iris/tests/unit/fileformats/um/fast_load/test__convert_collation.py
similarity index 98%
rename from lib/iris/tests/unit/experimental/fieldsfile/test__convert_collation.py
rename to lib/iris/tests/unit/fileformats/um/fast_load/test__convert_collation.py
index d8edbc3df7..78a9c5b8b5 100644
--- a/lib/iris/tests/unit/experimental/fieldsfile/test__convert_collation.py
+++ b/lib/iris/tests/unit/fileformats/um/fast_load/test__convert_collation.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -14,7 +14,7 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see .
-"""Unit tests for :func:`iris.experimental.fieldsfile._convert_collation`."""
+"""Unit tests for :func:`iris.fileformats.um._fast_load._convert_collation`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
@@ -27,7 +27,7 @@
import netcdftime
import numpy as np
-from iris.experimental.fieldsfile \
+from iris.fileformats.um._fast_load \
import _convert_collation as convert_collation
import iris.aux_factory
import iris.coord_systems
diff --git a/lib/iris/tests/unit/test_sample_data_path.py b/lib/iris/tests/unit/test_sample_data_path.py
index 2c7d45d71d..a941543151 100644
--- a/lib/iris/tests/unit/test_sample_data_path.py
+++ b/lib/iris/tests/unit/test_sample_data_path.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2016, Met Office
+# (C) British Crown Copyright 2016 - 2017, Met Office
#
# This file is part of Iris.
#
@@ -31,7 +31,6 @@
import mock
from iris import sample_data_path
-from iris._deprecation import IrisDeprecation
def _temp_file(sample_dir):
@@ -61,57 +60,42 @@ def test_call(self):
result = sample_data_path(os.path.basename(sample_file))
self.assertEqual(result, sample_file)
-
-class TestConfig(tests.IrisTest):
- def setUp(self):
- # Force iris_sample_data to be unavailable.
- self.patch('iris.iris_sample_data', None)
- # All of our tests are going to run with SAMPLE_DATA_DIR
- # redirected to a temporary directory.
- self.sample_dir = tempfile.mkdtemp()
- patcher = mock.patch('iris.config.SAMPLE_DATA_DIR', self.sample_dir)
- patcher.start()
- self.addCleanup(patcher.stop)
-
- def tearDown(self):
- shutil.rmtree(self.sample_dir)
-
- def test_file_ok(self):
- sample_path = _temp_file(self.sample_dir)
- result = sample_data_path(os.path.basename(sample_path))
- self.assertEqual(result, sample_path)
-
def test_file_not_found(self):
- with self.assertRaisesRegexp(ValueError, 'Sample data .* not found'):
- sample_data_path('foo')
+ with mock.patch('iris_sample_data.path', self.sample_dir):
+ with self.assertRaisesRegexp(ValueError,
+ 'Sample data .* not found'):
+ sample_data_path('foo')
def test_file_absolute(self):
- with self.assertRaisesRegexp(ValueError, 'Absolute path'):
- sample_data_path(os.path.abspath('foo'))
+ with mock.patch('iris_sample_data.path', self.sample_dir):
+ with self.assertRaisesRegexp(ValueError, 'Absolute path'):
+ sample_data_path(os.path.abspath('foo'))
def test_glob_ok(self):
sample_path = _temp_file(self.sample_dir)
sample_glob = '?' + os.path.basename(sample_path)[1:]
- result = sample_data_path(sample_glob)
- self.assertEqual(result, os.path.join(self.sample_dir, sample_glob))
+ with mock.patch('iris_sample_data.path', self.sample_dir):
+ result = sample_data_path(sample_glob)
+ self.assertEqual(result, os.path.join(self.sample_dir,
+ sample_glob))
def test_glob_not_found(self):
- with self.assertRaisesRegexp(ValueError, 'Sample data .* not found'):
- sample_data_path('foo.*')
+ with mock.patch('iris_sample_data.path', self.sample_dir):
+ with self.assertRaisesRegexp(ValueError,
+ 'Sample data .* not found'):
+ sample_data_path('foo.*')
def test_glob_absolute(self):
- with self.assertRaisesRegexp(ValueError, 'Absolute path'):
- sample_data_path(os.path.abspath('foo.*'))
+ with mock.patch('iris_sample_data.path', self.sample_dir):
+ with self.assertRaisesRegexp(ValueError, 'Absolute path'):
+ sample_data_path(os.path.abspath('foo.*'))
- def test_warn_deprecated(self):
- sample_path = _temp_file(self.sample_dir)
- with mock.patch('warnings.warn') as warn:
- sample_data_path(os.path.basename(sample_path))
- self.assertEqual(warn.call_count, 1)
- (warn_msg, warn_exception), _ = warn.call_args
- msg = 'iris.config.SAMPLE_DATA_DIR was deprecated'
- self.assertTrue(warn_msg.startswith(msg))
- self.assertEqual(warn_exception, IrisDeprecation)
+
+class TestIrisSampleDataMissing(tests.IrisTest):
+ def test_no_iris_sample_data(self):
+ self.patch('iris.iris_sample_data', None)
+ with self.assertRaisesRegexp(ImportError, 'Please install'):
+ sample_data_path('')
if __name__ == '__main__':
diff --git a/lib/iris/tests/unit/unit/__init__.py b/lib/iris/tests/unit/unit/__init__.py
deleted file mode 100644
index 8e119a957f..0000000000
--- a/lib/iris/tests/unit/unit/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""Unit tests for the :mod:`cf_units` module."""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
diff --git a/lib/iris/tests/unit/unit/test_Unit.py b/lib/iris/tests/unit/unit/test_Unit.py
deleted file mode 100644
index dfc7b6ffda..0000000000
--- a/lib/iris/tests/unit/unit/test_Unit.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
-#
-# This file is part of Iris.
-#
-# Iris is free software: you can redistribute it and/or modify it under
-# the terms of the GNU Lesser General Public License as published by the
-# Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Iris is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with Iris. If not, see .
-"""Unit tests for the `cf_units.Unit` class."""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-
-# import iris tests first so that some things can be initialised before
-# importing anything else
-import iris.tests as tests
-
-import numpy as np
-
-import cf_units
-from cf_units import Unit
-
-
-class Test___init__(tests.IrisTest):
-
- def test_capitalised_calendar(self):
- calendar = 'GrEgoRian'
- expected = cf_units.CALENDAR_GREGORIAN
- u = Unit('hours since 1970-01-01 00:00:00', calendar=calendar)
- self.assertEqual(u.calendar, expected)
-
- def test_not_basestring_calendar(self):
- with self.assertRaises(TypeError):
- u = Unit('hours since 1970-01-01 00:00:00', calendar=5)
-
-
-class Test_convert__calendar(tests.IrisTest):
-
- class MyStr(str):
- pass
-
- def test_gregorian_calendar_conversion_setup(self):
- # Reproduces a situation where a unit's gregorian calendar would not
- # match (using the `is` operator) to the literal string 'gregorian',
- # causing an `is not` test to return a false negative.
- cal_str = cf_units.CALENDAR_GREGORIAN
- calendar = self.MyStr(cal_str)
- self.assertIsNot(calendar, cal_str)
- u1 = Unit('hours since 1970-01-01 00:00:00', calendar=calendar)
- u2 = Unit('hours since 1969-11-30 00:00:00', calendar=calendar)
- u1point = np.array([8.], dtype=np.float32)
- expected = np.array([776.], dtype=np.float32)
- result = u1.convert(u1point, u2)
- return expected, result
-
- def test_gregorian_calendar_conversion_array(self):
- expected, result = self.test_gregorian_calendar_conversion_setup()
- self.assertArrayEqual(expected, result)
-
- def test_gregorian_calendar_conversion_dtype(self):
- expected, result = self.test_gregorian_calendar_conversion_setup()
- self.assertEqual(expected.dtype, result.dtype)
-
- def test_gregorian_calendar_conversion_shape(self):
- expected, result = self.test_gregorian_calendar_conversion_setup()
- self.assertEqual(expected.shape, result.shape)
-
- def test_non_gregorian_calendar_conversion_dtype(self):
- data = np.arange(4, dtype=np.float32)
- u1 = Unit('hours since 2000-01-01 00:00:00', calendar='360_day')
- u2 = Unit('hours since 2000-01-02 00:00:00', calendar='360_day')
- result = u1.convert(data, u2)
- self.assertEqual(result.dtype, np.float32)
-
-
-class Test_convert__endianness_time(tests.IrisTest):
- # Test the behaviour of converting time units of differing
- # dtype endianness.
-
- def setUp(self):
- self.time1_array = np.array([31.5, 32.5, 33.5])
- self.time2_array = np.array([0.5, 1.5, 2.5])
- self.time1_unit = cf_units.Unit('days since 1970-01-01 00:00:00',
- calendar=cf_units.CALENDAR_STANDARD)
- self.time2_unit = cf_units.Unit('days since 1970-02-01 00:00:00',
- calendar=cf_units.CALENDAR_STANDARD)
-
- def test_no_endian(self):
- dtype = 'f8'
- result = self.time1_unit.convert(self.time1_array.astype(dtype),
- self.time2_unit)
- self.assertArrayAlmostEqual(result, self.time2_array)
-
- def test_little_endian(self):
- dtype = '.
-"""
-.. deprecated:: 1.9
- This module has been deprecated. Please use `cf_units
- `_ instead.
-
-Units of measure.
-
-Provision of a wrapper class to support Unidata/UCAR UDUNITS-2, and the
-netcdftime calendar functionality.
-
-See also: `UDUNITS-2
-`_.
-
-"""
-
-from __future__ import (absolute_import, division, print_function)
-from six.moves import (filter, input, map, range, zip) # noqa
-import six
-
-from contextlib import contextmanager
-import copy
-import ctypes
-import ctypes.util
-import os.path
-import sys
-import warnings
-
-import netcdftime
-import numpy as np
-
-from iris._deprecation import warn_deprecated
-import iris.config
-import iris.util
-
-
-warn_deprecated('iris.unit is deprecated in Iris v1.9. Please use cf_units '
- '(https://github.com/SciTools/cf_units) instead.')
-
-
-__all__ = ['Unit', 'date2num', 'decode_time', 'encode_clock', 'encode_date',
- 'encode_time', 'num2date']
-
-
-########################################################################
-#
-# module level constants
-#
-########################################################################
-
-#
-# default constants
-#
-IRIS_EPOCH = '1970-01-01 00:00:00'
-_STRING_BUFFER_DEPTH = 128
-_UNKNOWN_UNIT_STRING = 'unknown'
-_UNKNOWN_UNIT_SYMBOL = '?'
-_UNKNOWN_UNIT = [_UNKNOWN_UNIT_STRING, _UNKNOWN_UNIT_SYMBOL, '???', '']
-_NO_UNIT_STRING = 'no_unit'
-_NO_UNIT_SYMBOL = '-'
-_NO_UNIT = [_NO_UNIT_STRING, _NO_UNIT_SYMBOL, 'no unit', 'no-unit', 'nounit']
-_UNIT_DIMENSIONLESS = '1'
-_OP_SINCE = ' since '
-_CATEGORY_UNKNOWN, _CATEGORY_NO_UNIT, _CATEGORY_UDUNIT = range(3)
-
-
-#
-# libudunits2 constants
-#
-# ut_status enumerations
-_UT_STATUS = ['UT_SUCCESS', 'UT_BAD_ARG', 'UT_EXISTS', 'UT_NO_UNIT',
- 'UT_OS', 'UT_NOT_SAME_NAME', 'UT_MEANINGLESS', 'UT_NO_SECOND',
- 'UT_VISIT_ERROR', 'UT_CANT_FORMAT', 'UT_SYNTAX', 'UT_UNKNOWN',
- 'UT_OPEN_ARG', 'UT_OPEN_ENV', 'UT_OPEN_DEFAULT', 'UT_PARSE']
-
-# explicit function names
-_UT_HANDLER = 'ut_set_error_message_handler'
-_UT_IGNORE = 'ut_ignore'
-
-# ut_encoding enumerations
-UT_ASCII = 0
-UT_ISO_8859_1 = 1
-UT_LATIN1 = 1
-UT_UTF8 = 2
-UT_NAMES = 4
-UT_DEFINITION = 8
-
-UT_FORMATS = [UT_ASCII, UT_ISO_8859_1, UT_LATIN1, UT_UTF8, UT_NAMES,
- UT_DEFINITION]
-
-#
-# netcdftime constants
-#
-CALENDAR_STANDARD = 'standard'
-CALENDAR_GREGORIAN = 'gregorian'
-CALENDAR_PROLEPTIC_GREGORIAN = 'proleptic_gregorian'
-CALENDAR_NO_LEAP = 'noleap'
-CALENDAR_JULIAN = 'julian'
-CALENDAR_ALL_LEAP = 'all_leap'
-CALENDAR_365_DAY = '365_day'
-CALENDAR_366_DAY = '366_day'
-CALENDAR_360_DAY = '360_day'
-
-CALENDARS = [CALENDAR_STANDARD, CALENDAR_GREGORIAN,
- CALENDAR_PROLEPTIC_GREGORIAN, CALENDAR_NO_LEAP, CALENDAR_JULIAN,
- CALENDAR_ALL_LEAP, CALENDAR_365_DAY, CALENDAR_366_DAY,
- CALENDAR_360_DAY]
-
-#
-# ctypes types
-#
-FLOAT32 = ctypes.c_float
-FLOAT64 = ctypes.c_double
-
-########################################################################
-#
-# module level variables
-#
-########################################################################
-
-# cache for ctypes foreign shared library handles
-_lib_c = None
-_lib_ud = None
-_ud_system = None
-
-# cache for libc shared library functions
-_strerror = None
-
-# class cache for libudunits2 shared library functions
-_cv_convert_float = None
-_cv_convert_floats = None
-_cv_convert_double = None
-_cv_convert_doubles = None
-_cv_free = None
-_ut_are_convertible = None
-_ut_clone = None
-_ut_compare = None
-_ut_decode_time = None
-_ut_divide = None
-_ut_encode_clock = None
-_ut_encode_date = None
-_ut_encode_time = None
-_ut_format = None
-_ut_free = None
-_ut_get_converter = None
-_ut_get_status = None
-_ut_get_unit_by_name = None
-_ut_ignore = None
-_ut_invert = None
-_ut_is_dimensionless = None
-_ut_log = None
-_ut_multiply = None
-_ut_offset = None
-_ut_offset_by_time = None
-_ut_parse = None
-_ut_raise = None
-_ut_read_xml = None
-_ut_root = None
-_ut_scale = None
-_ut_set_error_message_handler = None
-
-########################################################################
-#
-# module level statements
-#
-########################################################################
-
-#
-# load the libc shared library
-#
-if _lib_c is None:
- if sys.platform == 'win32':
- _lib_c = ctypes.cdll.msvcrt
- else:
- _lib_c = ctypes.CDLL(ctypes.util.find_library('libc'))
-
- #
- # cache common shared library functions
- #
- _strerror = _lib_c.strerror
- _strerror.restype = ctypes.c_char_p
-
-#
-# load the libudunits2 shared library
-#
-if _lib_ud is None:
- _lib_ud = iris.config.get_option(
- 'System', 'udunits2_path',
- default=ctypes.util.find_library('udunits2'))
- _lib_ud = ctypes.CDLL(_lib_ud, use_errno=True)
-
- #
- # cache common shared library functions
- #
- _cv_convert_float = _lib_ud.cv_convert_float
- _cv_convert_float.argtypes = [ctypes.c_void_p, ctypes.c_float]
- _cv_convert_float.restype = ctypes.c_float
-
- _cv_convert_floats = _lib_ud.cv_convert_floats
- _cv_convert_floats.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
- ctypes.c_ulong, ctypes.c_void_p]
- _cv_convert_floats.restype = ctypes.c_void_p
-
- _cv_convert_double = _lib_ud.cv_convert_double
- _cv_convert_double.argtypes = [ctypes.c_void_p, ctypes.c_double]
- _cv_convert_double.restype = ctypes.c_double
-
- _cv_convert_doubles = _lib_ud.cv_convert_doubles
- _cv_convert_doubles.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
- ctypes.c_ulong, ctypes.c_void_p]
- _cv_convert_doubles.restype = ctypes.c_void_p
-
- _cv_free = _lib_ud.cv_free
- _cv_free.argtypes = [ctypes.c_void_p]
-
- _ut_are_convertible = _lib_ud.ut_are_convertible
- _ut_are_convertible.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
-
- _ut_clone = _lib_ud.ut_clone
- _ut_clone.argtypes = [ctypes.c_void_p]
- _ut_clone.restype = ctypes.c_void_p
-
- _ut_compare = _lib_ud.ut_compare
- _ut_compare.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
- _ut_compare.restype = ctypes.c_int
-
- _ut_decode_time = _lib_ud.ut_decode_time
- _ut_decode_time.restype = None
-
- _ut_divide = _lib_ud.ut_divide
- _ut_divide.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
- _ut_divide.restype = ctypes.c_void_p
-
- _ut_encode_clock = _lib_ud.ut_encode_clock
- _ut_encode_clock.restype = ctypes.c_double
-
- _ut_encode_date = _lib_ud.ut_encode_date
- _ut_encode_date.restype = ctypes.c_double
-
- _ut_encode_time = _lib_ud.ut_encode_time
- _ut_encode_time.restype = ctypes.c_double
-
- _ut_format = _lib_ud.ut_format
- _ut_format.argtypes = [ctypes.c_void_p, ctypes.c_char_p,
- ctypes.c_ulong, ctypes.c_uint]
-
- _ut_free = _lib_ud.ut_free
- _ut_free.argtypes = [ctypes.c_void_p]
- _ut_free.restype = None
-
- _ut_get_converter = _lib_ud.ut_get_converter
- _ut_get_converter.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
- _ut_get_converter.restype = ctypes.c_void_p
-
- _ut_get_status = _lib_ud.ut_get_status
-
- _ut_get_unit_by_name = _lib_ud.ut_get_unit_by_name
- _ut_get_unit_by_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
- _ut_get_unit_by_name.restype = ctypes.c_void_p
-
- _ut_invert = _lib_ud.ut_invert
- _ut_invert.argtypes = [ctypes.c_void_p]
- _ut_invert.restype = ctypes.c_void_p
-
- _ut_is_dimensionless = _lib_ud.ut_is_dimensionless
- _ut_is_dimensionless.argtypes = [ctypes.c_void_p]
-
- _ut_log = _lib_ud.ut_log
- _ut_log.argtypes = [ctypes.c_double, ctypes.c_void_p]
- _ut_log.restype = ctypes.c_void_p
-
- _ut_multiply = _lib_ud.ut_multiply
- _ut_multiply.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
- _ut_multiply.restype = ctypes.c_void_p
-
- _ut_offset = _lib_ud.ut_offset
- _ut_offset.argtypes = [ctypes.c_void_p, ctypes.c_double]
- _ut_offset.restype = ctypes.c_void_p
-
- _ut_offset_by_time = _lib_ud.ut_offset_by_time
- _ut_offset_by_time.argtypes = [ctypes.c_void_p, ctypes.c_double]
- _ut_offset_by_time.restype = ctypes.c_void_p
-
- _ut_parse = _lib_ud.ut_parse
- _ut_parse.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int]
- _ut_parse.restype = ctypes.c_void_p
-
- _ut_raise = _lib_ud.ut_raise
- _ut_raise.argtypes = [ctypes.c_void_p, ctypes.c_int]
- _ut_raise.restype = ctypes.c_void_p
-
- _ut_read_xml = _lib_ud.ut_read_xml
- _ut_read_xml.argtypes = [ctypes.c_char_p]
- _ut_read_xml.restype = ctypes.c_void_p
-
- _ut_root = _lib_ud.ut_root
- _ut_root.argtypes = [ctypes.c_void_p, ctypes.c_int]
- _ut_root.restype = ctypes.c_void_p
-
- _ut_scale = _lib_ud.ut_scale
- _ut_scale.argtypes = [ctypes.c_double, ctypes.c_void_p]
- _ut_scale.restype = ctypes.c_void_p
-
- # convenience dictionary for the Unit convert method
- _cv_convert_scalar = {FLOAT32: _cv_convert_float,
- FLOAT64: _cv_convert_double}
- _cv_convert_array = {FLOAT32: _cv_convert_floats,
- FLOAT64: _cv_convert_doubles}
- _numpy2ctypes = {np.float32: FLOAT32, np.float64: FLOAT64}
- _ctypes2numpy = {v: k for k, v in _numpy2ctypes.items()}
-#
-# load the UDUNITS-2 xml-formatted unit-database
-#
-if not _ud_system:
- _func_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p,
- use_errno=True)
- _set_handler_type = ctypes.CFUNCTYPE(_func_type, _func_type)
- _ut_set_error_message_handler = _set_handler_type((_UT_HANDLER, _lib_ud))
- _ut_ignore = _func_type((_UT_IGNORE, _lib_ud))
- # ignore standard UDUNITS-2 start-up preamble redirected to stderr stream
- _default_handler = _ut_set_error_message_handler(_ut_ignore)
- # Load the unit-database from the default location (modified via
- # the UDUNITS2_XML_PATH environment variable) and if that fails look
- # relative to sys.prefix to support environments such as conda.
- _ud_system = _ut_read_xml(None)
- if _ud_system is None:
- _alt_xml_path = os.path.join(sys.prefix, 'share',
- 'udunits', 'udunits2.xml')
- _ud_system = _ut_read_xml(_alt_xml_path.encode())
- # reinstate old error handler
- _ut_set_error_message_handler(_default_handler)
- del _func_type
- if not _ud_system:
- _status_msg = 'UNKNOWN'
- _error_msg = ''
- _status = _ut_get_status()
- try:
- _status_msg = _UT_STATUS[_status]
- except IndexError:
- pass
- _errno = ctypes.get_errno()
- if _errno != 0:
- _error_msg = ': "%s"' % _strerror(_errno)
- ctypes.set_errno(0)
- raise OSError('[%s] Failed to open UDUNITS-2 XML unit database %s' % (
- _status_msg, _error_msg))
-
-
-########################################################################
-#
-# module level function definitions
-#
-########################################################################
-
-def encode_time(year, month, day, hour, minute, second):
- """
- Return date/clock time encoded as a double precision value.
-
- Encoding performed using UDUNITS-2 hybrid Gregorian/Julian calendar.
- Dates on or after 1582-10-15 are assumed to be Gregorian dates;
- dates before that are assumed to be Julian dates. In particular, the
- year 1 BCE is immediately followed by the year 1 CE.
-
- Args:
-
- * year (int):
- Year value to be encoded.
- * month (int):
- Month value to be encoded.
- * day (int):
- Day value to be encoded.
- * hour (int):
- Hour value to be encoded.
- * minute (int):
- Minute value to be encoded.
- * second (int):
- Second value to be encoded.
-
- Returns:
- float.
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.encode_time(1970, 1, 1, 0, 0, 0)
- -978307200.0
-
- """
-
- return _ut_encode_time(ctypes.c_int(year), ctypes.c_int(month),
- ctypes.c_int(day), ctypes.c_int(hour),
- ctypes.c_int(minute), ctypes.c_double(second))
-
-
-def encode_date(year, month, day):
- """
- Return date encoded as a double precision value.
-
- Encoding performed using UDUNITS-2 hybrid Gergorian/Julian calendar.
- Dates on or after 1582-10-15 are assumed to be Gregorian dates;
- dates before that are assumed to be Julian dates. In particular, the
- year 1 BCE is immediately followed by the year 1 CE.
-
- Args:
-
- * year (int):
- Year value to be encoded.
- * month (int):
- Month value to be encoded.
- * day (int):
- Day value to be encoded.
-
- Returns:
- float.
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.encode_date(1970, 1, 1)
- -978307200.0
-
- """
-
- return _ut_encode_date(ctypes.c_int(year), ctypes.c_int(month),
- ctypes.c_int(day))
-
-
-def encode_clock(hour, minute, second):
- """
- Return clock time encoded as a double precision value.
-
- Args:
-
- * hour (int):
- Hour value to be encoded.
- * minute (int):
- Minute value to be encoded.
- * second (int):
- Second value to be encoded.
-
- Returns:
- float.
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.encode_clock(0, 0, 0)
- 0.0
-
- """
-
- return _ut_encode_clock(ctypes.c_int(hour), ctypes.c_int(minute),
- ctypes.c_double(second))
-
-
-def decode_time(time):
- """
- Decode a double precision date/clock time value into its component
- parts and return as tuple.
-
- Decode time into it's year, month, day, hour, minute, second, and
- resolution component parts. Where resolution is the uncertainty of
- the time in seconds.
-
- Args:
-
- * time (float): Date/clock time encoded as a double precision value.
-
- Returns:
- tuple of (year, month, day, hour, minute, second, resolution).
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.decode_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
- (1970, 1, 1, 0, 0, 0.0, 1.086139178596568e-07)
-
- """
-
- year = ctypes.c_int()
- month = ctypes.c_int()
- day = ctypes.c_int()
- hour = ctypes.c_int()
- minute = ctypes.c_int()
- second = ctypes.c_double()
- resolution = ctypes.c_double()
- _ut_decode_time(ctypes.c_double(time), ctypes.pointer(year),
- ctypes.pointer(month), ctypes.pointer(day),
- ctypes.pointer(hour), ctypes.pointer(minute),
- ctypes.pointer(second), ctypes.pointer(resolution))
- return (year.value, month.value, day.value, hour.value, minute.value,
- second.value, resolution.value)
-
-
-def julian_day2date(julian_day, calendar):
- """
- Return a netcdftime datetime-like object representing the Julian day.
-
- If calendar is 'standard' or 'gregorian', Julian day follows
- Julian calendar on and before 1582-10-5, Gregorian calendar after
- 1582-10-15.
- If calendar is 'proleptic_gregorian', Julian Day follows Gregorian
- calendar.
- If calendar is 'julian', Julian Day follows Julian calendar.
-
- The datetime object is a 'real' datetime object if the date falls in
- the Gregorian calendar (i.e. calendar is 'proleptic_gregorian', or
- calendar is 'standard'/'gregorian' and the date is after 1582-10-15).
- Otherwise, it's a 'phony' datetime object which is actually an instance
- of netcdftime.datetime.
-
- Algorithm:
- Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
- Willmann-Bell, Virginia. p. 63.
-
- Args:
-
- * julian_day (float):
- Julian day with a resolution of 1 second.
- * calendar (string):
- Name of the calendar, see cf_units.CALENDARS.
-
- Returns:
- datetime or netcdftime.datetime.
-
- For example:
-
- >>> import cf_units as unit
- >>> import datetime
- >>> unit.julian_day2date(
- ... unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
- ... unit.CALENDAR_STANDARD),
- ... unit.CALENDAR_STANDARD)
- datetime.datetime(1970, 1, 1, 0, 0)
-
- """
-
- return netcdftime.DateFromJulianDay(julian_day, calendar)
-
-
-def date2julian_day(date, calendar):
- """
- Return the Julian day (resolution of 1 second) from a netcdftime
- datetime-like object.
-
- If calendar is 'standard' or 'gregorian', Julian day follows Julian
- calendar on and before 1582-10-5, Gregorian calendar after 1582-10-15.
- If calendar is 'proleptic_gregorian', Julian day follows Gregorian
- calendar.
- If calendar is 'julian', Julian day follows Julian calendar.
-
- Algorithm:
- Meeus, Jean (1998) Astronomical Algorithms (2nd Edition).
- Willmann-Bell, Virginia. p. 63.
-
- Args:
-
- * date (netcdftime.date):
- Date and time representation.
- * calendar (string):
- Name of the calendar, see cf_units.CALENDARS.
-
- Returns:
- float.
-
- For example:
-
- >>> import cf_units as unit
- >>> import datetime
- >>> unit.date2julian_day(datetime.datetime(1970, 1, 1, 0, 0, 0),
- ... unit.CALENDAR_STANDARD)
- 2440587.5
-
- """
-
- return netcdftime.JulianDayFromDate(date, calendar)
-
-
-def date2num(date, unit, calendar):
- """
- Return numeric time value (resolution of 1 second) encoding of
- datetime object.
-
- The units of the numeric time values are described by the unit and
- calendar arguments. The datetime objects must be in UTC with no
- time-zone offset. If there is a time-zone offset in unit, it will be
- applied to the returned numeric values.
-
- Like the :func:`matplotlib.dates.date2num` function, except that it allows
- for different units and calendars. Behaves the same as if
- unit = 'days since 0001-01-01 00:00:00' and
- calendar = 'proleptic_gregorian'.
-
- Args:
-
- * date (datetime):
- A datetime object or a sequence of datetime objects.
- The datetime objects should not include a time-zone offset.
- * unit (string):
- A string of the form ' since ' describing
- the time units. The can be days, hours, minutes or seconds.
- The is a date/time reference point. A valid choice
- would be unit='hours since 1800-01-01 00:00:00 -6:00'.
- * calendar (string):
- Name of the calendar, see cf_units.CALENDARS.
-
- Returns:
- float, or numpy.ndarray of float.
-
- For example:
-
- >>> import cf_units as unit
- >>> import datetime
- >>> dt1 = datetime.datetime(1970, 1, 1, 6, 0, 0)
- >>> dt2 = datetime.datetime(1970, 1, 1, 7, 0, 0)
- >>> unit.date2num(dt1, 'hours since 1970-01-01 00:00:00',
- ... unit.CALENDAR_STANDARD)
- 6.0
- >>> unit.date2num([dt1, dt2], 'hours since 1970-01-01 00:00:00',
- ... unit.CALENDAR_STANDARD)
- array([ 6., 7.])
-
- """
-
- #
- # ensure to strip out any 'UTC' postfix which is generated by
- # UDUNITS-2 formatted output and causes the netcdftime parser
- # to choke
- #
- unit_string = unit.rstrip(" UTC")
- if unit_string.endswith(" since epoch"):
- unit_string = unit_string.replace("epoch", IRIS_EPOCH)
- cdftime = netcdftime.utime(unit_string, calendar=calendar)
- return cdftime.date2num(date)
-
-
-def num2date(time_value, unit, calendar):
- """
- Return datetime encoding of numeric time value (resolution of 1 second).
-
- The units of the numeric time value are described by the unit and
- calendar arguments. The returned datetime object represent UTC with
- no time-zone offset, even if the specified unit contain a time-zone
- offset.
-
- Like the :func:`matplotlib.dates.num2date` function, except that it allows
- for different units and calendars. Behaves the same if
- unit = 'days since 001-01-01 00:00:00'}
- calendar = 'proleptic_gregorian'.
-
- The datetime instances returned are 'real' python datetime
- objects if the date falls in the Gregorian calendar (i.e.
- calendar='proleptic_gregorian', or calendar = 'standard' or 'gregorian'
- and the date is after 1582-10-15). Otherwise, they are 'phony' datetime
- objects which support some but not all the methods of 'real' python
- datetime objects. This is because the python datetime module cannot
- use the 'proleptic_gregorian' calendar, even before the switch
- occured from the Julian calendar in 1582. The datetime instances
- do not contain a time-zone offset, even if the specified unit
- contains one.
-
- Args:
-
- * time_value (float):
- Numeric time value/s. Maximum resolution is 1 second.
- * unit (sting):
- A string of the form ' since '
- describing the time units. The can be days, hours,
- minutes or seconds. The is the date/time reference
- point. A valid choice would be
- unit='hours since 1800-01-01 00:00:00 -6:00'.
- * calendar (string):
- Name of the calendar, see cf_units.CALENDARS.
-
- Returns:
- datetime, or numpy.ndarray of datetime object.
-
- For example:
-
- >>> import cf_units as unit
- >>> import datetime
- >>> unit.num2date(6, 'hours since 1970-01-01 00:00:00',
- ... unit.CALENDAR_STANDARD)
- datetime.datetime(1970, 1, 1, 6, 0)
- >>> unit.num2date([6, 7], 'hours since 1970-01-01 00:00:00',
- ... unit.CALENDAR_STANDARD)
- array([datetime.datetime(1970, 1, 1, 6, 0),
- datetime.datetime(1970, 1, 1, 7, 0)], dtype=object)
-
- """
-
- #
- # ensure to strip out any 'UTC' postfix which is generated by
- # UDUNITS-2 formatted output and causes the netcdftime parser
- # to choke
- #
- unit_string = unit.rstrip(" UTC")
- if unit_string.endswith(" since epoch"):
- unit_string = unit_string.replace("epoch", IRIS_EPOCH)
- cdftime = netcdftime.utime(unit_string, calendar=calendar)
- return cdftime.num2date(time_value)
-
-
-def _handler(func):
- """Set the error message handler."""
-
- _ut_set_error_message_handler(func)
-
-
-@contextmanager
-def suppress_unit_warnings():
- """
- Suppresses all warnings raised because of invalid units in loaded data.
-
- """
- # Suppress any warning messages raised by UDUNITS2.
- _func_type = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_char_p,
- use_errno=True)
- _set_handler_type = ctypes.CFUNCTYPE(_func_type, _func_type)
- _ut_set_error_message_handler = _set_handler_type((_UT_HANDLER, _lib_ud))
- _ut_ignore = _func_type((_UT_IGNORE, _lib_ud))
- _default_handler = _ut_set_error_message_handler(_ut_ignore)
- with warnings.catch_warnings():
- # Also suppress invalid units warnings from the Iris loader code.
- warnings.filterwarnings("ignore", message=".*invalid units")
- yield
- _ut_set_error_message_handler(_default_handler)
-
-
-########################################################################
-#
-# unit wrapper class for unidata/ucar UDUNITS-2
-#
-########################################################################
-
-def _Unit(category, ut_unit, calendar=None, origin=None):
- unit = iris.util._OrderedHashable.__new__(Unit)
- unit._init(category, ut_unit, calendar, origin)
- return unit
-
-
-_CACHE = {}
-
-
-def as_unit(unit):
- """
- Returns a Unit corresponding to the given unit.
-
- .. note::
-
- If the given unit is already a Unit it will be returned unchanged.
-
- """
- if isinstance(unit, Unit):
- result = unit
- else:
- result = None
- use_cache = isinstance(unit, six.string_types) or unit is None
- if use_cache:
- result = _CACHE.get(unit)
- if result is None:
- result = Unit(unit)
- if use_cache:
- _CACHE[unit] = result
- return result
-
-
-def is_time(unit):
- """
- Determine whether the unit is a related SI Unit of time.
-
- Args:
-
- * unit (string/Unit): Unit to be compared.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.is_time('hours')
- True
- >>> unit.is_time('meters')
- False
-
- """
- return as_unit(unit).is_time()
-
-
-def is_vertical(unit):
- """
- Determine whether the unit is a related SI Unit of pressure or distance.
-
- Args:
-
- * unit (string/Unit): Unit to be compared.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> unit.is_vertical('millibar')
- True
- >>> unit.is_vertical('km')
- True
-
- """
- return as_unit(unit).is_vertical()
-
-
-class Unit(iris.util._OrderedHashable):
- """
- A class to represent S.I. units and support common operations to
- manipulate such units in a consistent manner as per UDUNITS-2.
-
- These operations include scaling the unit, offsetting the unit by a
- constant or time, inverting the unit, raising the unit by a power,
- taking a root of the unit, taking a log of the unit, multiplying the
- unit by a constant or another unit, dividing the unit by a constant
- or another unit, comparing units, copying units and converting unit
- data to single precision or double precision floating point numbers.
-
- This class also supports time and calendar defintion and manipulation.
-
- """
- # Declare the attribute names relevant to the _OrderedHashable behaviour.
- _names = ('category', 'ut_unit', 'calendar', 'origin')
-
- category = None
- 'Is this an unknown unit, a no-unit, or a UDUNITS-2 unit.'
-
- ut_unit = None
- 'Reference to the ctypes quantity defining the UDUNITS-2 unit.'
-
- calendar = None
- 'Represents the unit calendar name, see cf_units.CALENDARS'
-
- origin = None
- 'The original string used to create this unit.'
-
- __slots__ = ()
-
- def __init__(self, unit, calendar=None):
- """
- Create a wrapper instance for UDUNITS-2.
-
- An optional calendar may be provided for a unit which defines a
- time reference of the form ' since '
- i.e. unit='days since 1970-01-01 00:00:00'. For a unit that is a
- time reference, the default calendar is 'standard'.
-
- Accepted calendars are as follows,
-
- * 'standard' or 'gregorian' - Mixed Gregorian/Julian calendar as
- defined by udunits.
- * 'proleptic_gregorian' - A Gregorian calendar extended to dates
- before 1582-10-15. A year is a leap year if either,
-
- 1. It is divisible by 4 but not by 100, or
- 2. It is divisible by 400.
-
- * 'noleap' or '365_day' - A Gregorian calendar without leap
- years i.e. all years are 365 days long.
- * 'all_leap' or '366_day' - A Gregorian calendar with every year
- being a leap year i.e. all years are 366 days long.
- * '360_day' - All years are 360 days divided into 30 day months.
- * 'julian' - Proleptic Julian calendar, extended to dates after
- 1582-10-5. A year is a leap year if it is divisible by 4.
-
- Args:
-
- * unit:
- Specify the unit as defined by UDUNITS-2.
- * calendar (string):
- Describes the calendar used in time calculations. The
- default is 'standard' or 'gregorian' for a time reference
- unit.
-
- Returns:
-
- Unit object.
-
- Units should be set to "no_unit" for values which are strings.
- Units can also be set to "unknown" (or None).
- For example:
-
- >>> from cf_units import Unit
- >>> volts = Unit('volts')
- >>> no_unit = Unit('no_unit')
- >>> unknown = Unit('unknown')
- >>> unknown = Unit(None)
-
- """
- ut_unit = None
- calendar_ = None
-
- if unit is None:
- unit = ''
- else:
- unit = str(unit).strip()
-
- if unit.lower().endswith(' utc'):
- unit = unit[:unit.lower().rfind(' utc')]
-
- if unit.endswith(" since epoch"):
- unit = unit.replace("epoch", IRIS_EPOCH)
-
- if unit.lower() in _UNKNOWN_UNIT:
- # TODO - removing the option of an unknown unit. Currently
- # the auto generated MOSIG rules are missing units on a
- # number of phenomena which would lead to errors.
- # Will be addressed by work on metadata translation.
- category = _CATEGORY_UNKNOWN
- unit = _UNKNOWN_UNIT_STRING
- elif unit.lower() in _NO_UNIT:
- category = _CATEGORY_NO_UNIT
- unit = _NO_UNIT_STRING
- else:
- category = _CATEGORY_UDUNIT
- ut_unit = _ut_parse(_ud_system, unit.encode('ascii'), UT_ASCII)
- # _ut_parse returns 0 on failure
- if ut_unit is None:
- self._raise_error('Failed to parse unit "%s"' % unit)
- if _OP_SINCE in unit.lower():
- if calendar is None:
- calendar_ = CALENDAR_GREGORIAN
- elif isinstance(calendar, six.string_types):
- if calendar.lower() in CALENDARS:
- calendar_ = calendar.lower()
- else:
- msg = '{!r} is an unsupported calendar.'
- raise ValueError(msg.format(calendar))
- else:
- msg = 'Expected string-like calendar argument, got {!r}.'
- raise TypeError(msg.format(type(calendar)))
-
- self._init(category, ut_unit, calendar_, unit)
-
- def _raise_error(self, msg):
- """
- Retrieve the UDUNITS-2 ut_status, the implementation-defined string
- corresponding to UDUNITS-2 errno and raise generic exception.
-
- """
- status_msg = 'UNKNOWN'
- error_msg = ''
- if _lib_ud:
- status = _ut_get_status()
- try:
- status_msg = _UT_STATUS[status]
- except IndexError:
- pass
- errno = ctypes.get_errno()
- if errno != 0:
- error_msg = ': "%s"' % _strerror(errno)
- ctypes.set_errno(0)
-
- raise ValueError('[%s] %s %s' % (status_msg, msg, error_msg))
-
- # NOTE:
- # "__getstate__" and "__setstate__" functions are defined here to
- # provide a custom interface for Pickle
- # : Pickle "normal" behaviour is just to save/reinstate the object
- # dictionary
- # : that won't work here, because the "ut_unit" attribute is an
- # object handle
- # - the corresponding udunits object only exists in the original
- # invocation
- def __getstate__(self):
- # state capture method for Pickle.dump()
- # - return the instance data needed to reconstruct a Unit value
- return {'unit_text': self.origin, 'calendar': self.calendar}
-
- def __setstate__(self, state):
- # object reconstruction method for Pickle.load()
- # intercept the Pickle.load() operation and call own __init__ again
- # - this is to ensure a valid ut_unit attribute (as these
- # handles aren't persistent)
- self.__init__(state['unit_text'], calendar=state['calendar'])
-
- def __del__(self):
- # NB. If Python is terminating then the module global "_ut_free"
- # may have already been deleted ... so we check before using it.
- if _ut_free:
- _ut_free(self.ut_unit)
-
- def __copy__(self):
- return self
-
- def __deepcopy__(self, memo):
- return self
-
- def is_time(self):
- """
- Determine whether this unit is a related SI Unit of time.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('hours')
- >>> u.is_time()
- True
- >>> v = unit.Unit('meter')
- >>> v.is_time()
- False
-
- """
- if self.is_unknown() or self.is_no_unit():
- result = False
- else:
- day = _ut_get_unit_by_name(_ud_system, b'day')
- result = _ut_are_convertible(self.ut_unit, day) != 0
- return result
-
- def is_vertical(self):
- """
- Determine whether the unit is a related SI Unit of pressure or
- distance.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('millibar')
- >>> u.is_vertical()
- True
- >>> v = unit.Unit('km')
- >>> v.is_vertical()
- True
-
- """
- if self.is_unknown() or self.is_no_unit():
- result = False
- else:
- bar = _ut_get_unit_by_name(_ud_system, b'bar')
- result = _ut_are_convertible(self.ut_unit, bar) != 0
- if not result:
- meter = _ut_get_unit_by_name(_ud_system, b'meter')
- result = _ut_are_convertible(self.ut_unit, meter) != 0
- return result
-
- def is_udunits(self):
- """Return whether the unit is a vaild unit of UDUNITS."""
- return self.ut_unit is not None
-
- def is_time_reference(self):
- """
- Return whether the unit is a time reference unit of the form
- ' since '
- i.e. unit='days since 1970-01-01 00:00:00'
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('days since epoch')
- >>> u.is_time_reference()
- True
-
- """
- return self.calendar is not None
-
- def title(self, value):
- """
- Return the unit value as a title string.
-
- Args:
-
- * value (float): Unit value to be incorporated into title string.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('hours since epoch',
- ... calendar=unit.CALENDAR_STANDARD)
- >>> u.title(10)
- '1970-01-01 10:00:00'
-
- """
- if self.is_time_reference():
- dt = self.num2date(value)
- result = dt.strftime('%Y-%m-%d %H:%M:%S')
- else:
- result = '%s %s' % (str(value), self)
- return result
-
- @property
- def modulus(self):
- """
- *(read-only)* Return the modulus value of the unit.
-
- Convenience method that returns the unit modulus value as follows,
- * 'radians' - pi*2
- * 'degrees' - 360.0
- * Otherwise None.
-
- Returns:
- float.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('degrees')
- >>> u.modulus
- 360.0
-
- """
-
- if self == 'radians':
- result = np.pi * 2
- elif self == 'degrees':
- result = 360.0
- else:
- result = None
- return result
-
- def is_convertible(self, other):
- """
- Return whether two units are convertible.
-
- Args:
-
- * other (Unit): Unit to be compared.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> v = unit.Unit('kilometers')
- >>> u.is_convertible(v)
- True
-
- """
- other = as_unit(other)
- if self.is_unknown() or self.is_no_unit() or other.is_unknown() or \
- other.is_no_unit():
- result = False
- else:
- result = (self.calendar == other.calendar and
- _ut_are_convertible(self.ut_unit, other.ut_unit) != 0)
- return result
-
- def is_dimensionless(self):
- """
- Return whether the unit is dimensionless.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> u.is_dimensionless()
- False
- >>> u = unit.Unit('1')
- >>> u.is_dimensionless()
- True
-
- """
- return (self.category == _CATEGORY_UDUNIT and
- bool(_ut_is_dimensionless(self.ut_unit)))
-
- def is_unknown(self):
- """
- Return whether the unit is defined to be an *unknown* unit.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('unknown')
- >>> u.is_unknown()
- True
- >>> u = unit.Unit('meters')
- >>> u.is_unknown()
- False
-
- """
- return self.category == _CATEGORY_UNKNOWN
-
- def is_no_unit(self):
- """
- Return whether the unit is defined to be a *no_unit* unit.
-
- Typically, a quantity such as a string, will have no associated
- unit to describe it. Such a class of quantity may be defined
- using the *no_unit* unit.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('no unit')
- >>> u.is_no_unit()
- True
- >>> u = unit.Unit('meters')
- >>> u.is_no_unit()
- False
-
- """
- return self.category == _CATEGORY_NO_UNIT
-
- def format(self, option=None):
- """
- Return a formatted string representation of the binary unit.
-
- Args:
-
- * option (cf_units.UT_FORMATS):
- Set the encoding option of the formatted string representation.
- Valid encoding options may be one of the following enumerations:
-
- * Unit.UT_ASCII
- * Unit.UT_ISO_8859_1
- * Unit.UT_LATIN1
- * Unit.UT_UTF8
- * Unit.UT_NAMES
- * Unit.UT_DEFINITION
-
- Multiple options may be combined within a list. The default
- option is cf_units.UT_ASCII.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> u.format()
- 'm'
- >>> u.format(unit.UT_NAMES)
- 'meter'
- >>> u.format(unit.UT_DEFINITION)
- 'm'
-
- """
- if self.is_unknown():
- return _UNKNOWN_UNIT_STRING
- elif self.is_no_unit():
- return _NO_UNIT_STRING
- else:
- bitmask = UT_ASCII
- if option is not None:
- if not isinstance(option, list):
- option = [option]
- for i in option:
- bitmask |= i
- string_buffer = ctypes.create_string_buffer(_STRING_BUFFER_DEPTH)
- depth = _ut_format(self.ut_unit, string_buffer,
- ctypes.sizeof(string_buffer), bitmask)
- if depth < 0:
- self._raise_error('Failed to format %r' % self)
- return str(string_buffer.value.decode('ascii'))
-
- @property
- def name(self):
- """
- *(read-only)* The full name of the unit.
-
- Formats the binary unit into a string representation using
- method :func:`cf_units.Unit.format` with keyword argument
- option=cf_units.UT_NAMES.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('watts')
- >>> u.name
- 'watt'
-
- """
- return self.format(UT_NAMES)
-
- @property
- def symbol(self):
- """
- *(read-only)* The symbolic representation of the unit.
-
- Formats the binary unit into a string representation using
- method :func:`cf_units.Unit.format`.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('watts')
- >>> u.symbol
- 'W'
-
- """
- if self.is_unknown():
- result = _UNKNOWN_UNIT_SYMBOL
- elif self.is_no_unit():
- result = _NO_UNIT_SYMBOL
- else:
- result = self.format()
- return result
-
- @property
- def definition(self):
- """
- *(read-only)* The symbolic decomposition of the unit.
-
- Formats the binary unit into a string representation using
- method :func:`cf_units.Unit.format` with keyword argument
- option=cf_units.UT_DEFINITION.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('watts')
- >>> u.definition
- 'm2.kg.s-3'
-
- """
- if self.is_unknown():
- result = _UNKNOWN_UNIT_SYMBOL
- elif self.is_no_unit():
- result = _NO_UNIT_SYMBOL
- else:
- result = self.format(UT_DEFINITION)
- return result
-
- def offset_by_time(self, origin):
- """
- Returns the time unit offset with respect to the time origin.
-
- Args:
-
- * origin (float): Time origin as returned by the
- :func:`cf_units.encode_time` method.
-
- Returns:
- None.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('hours')
- >>> u.offset_by_time(unit.encode_time(1970, 1, 1, 0, 0, 0))
- Unit('hour since 1970-01-01 00:00:00.0000000 UTC')
-
- """
-
- if not isinstance(origin, (float, six.integer_types)):
- raise TypeError('a numeric type for the origin argument is'
- ' required')
- ut_unit = _ut_offset_by_time(self.ut_unit, ctypes.c_double(origin))
- if not ut_unit:
- self._raise_error('Failed to offset %r' % self)
- calendar = None
- return _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
-
- def invert(self):
- """
- Invert the unit i.e. find the reciprocal of the unit, and return
- the Unit result.
-
- Returns:
- Unit.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> u.invert()
- Unit('meter^-1')
-
- """
- if self.is_unknown():
- result = self
- elif self.is_no_unit():
- raise ValueError("Cannot invert a 'no-unit'.")
- else:
- ut_unit = _ut_invert(self.ut_unit)
- if not ut_unit:
- self._raise_error('Failed to invert %r' % self)
- calendar = None
- result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
- return result
-
- def root(self, root):
- """
- Returns the given root of the unit.
-
- Args:
-
- * root (int): Value by which the unit root is taken.
-
- Returns:
- None.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters^2')
- >>> u.root(2)
- Unit('meter')
-
- .. note::
-
- Taking a fractional root of a unit is not supported.
-
- """
- try:
- root = ctypes.c_int(root)
- except TypeError:
- raise TypeError('An int type for the root argument'
- ' is required')
-
- if self.is_unknown():
- result = self
- elif self.is_no_unit():
- raise ValueError("Cannot take the logarithm of a 'no-unit'.")
- else:
- # only update the unit if it is not scalar
- if self == Unit('1'):
- result = self
- else:
- ut_unit = _ut_root(self.ut_unit, root)
- if not ut_unit:
- self._raise_error('Failed to take the root of %r' % self)
- calendar = None
- result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
- return result
-
- def log(self, base):
- """
- Returns the logorithmic unit corresponding to the given
- logorithmic base.
-
- Args:
-
- * base (int/float): Value of the logorithmic base.
-
- Returns:
- None.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> u.log(2)
- Unit('lb(re 1 meter)')
-
- """
- try:
- base = ctypes.c_double(base)
- except TypeError:
- raise TypeError('A numeric type for the base argument is required')
-
- if self.is_unknown():
- result = self
- elif self.is_no_unit():
- raise ValueError("Cannot take the logarithm of a 'no-unit'.")
- else:
- ut_unit = _ut_log(base, self.ut_unit)
- if not ut_unit:
- msg = 'Failed to calculate logorithmic base of %r' % self
- self._raise_error(msg)
- calendar = None
- result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
- return result
-
- def __str__(self):
- """
- Returns a simple string representation of the unit.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> str(u)
- 'meters'
-
- """
- return self.origin or self.name
-
- def __repr__(self):
- """
- Returns a string representation of the unit object.
-
- Returns:
- string.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> repr(u)
- "Unit('meters')"
-
- """
-
- if self.calendar is None:
- result = "%s('%s')" % (self.__class__.__name__, self)
- else:
- result = "%s('%s', calendar='%s')" % (self.__class__.__name__,
- self, self.calendar)
- return result
-
- def _offset_common(self, offset):
- try:
- offset = ctypes.c_double(offset)
- except TypeError:
- result = NotImplemented
- else:
- if self.is_unknown():
- result = self
- elif self.is_no_unit():
- raise ValueError("Cannot offset a 'no-unit'.")
- else:
- ut_unit = _ut_offset(self.ut_unit, offset)
- if not ut_unit:
- self._raise_error('Failed to offset %r' % self)
- calendar = None
- result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
- return result
-
- def __add__(self, other):
- return self._offset_common(other)
-
- def __sub__(self, other):
- try:
- other = -other
- except TypeError:
- result = NotImplemented
- else:
- result = self._offset_common(-other)
- return result
-
- def _op_common(self, other, op_func):
- # Convienience method to create a new unit from an operation between
- # the units 'self' and 'other'.
-
- op_label = op_func.__name__.split('_')[1]
-
- other = as_unit(other)
-
- if self.is_no_unit() or other.is_no_unit():
- raise ValueError("Cannot %s a 'no-unit'." % op_label)
-
- if self.is_unknown() or other.is_unknown():
- result = _Unit(_CATEGORY_UNKNOWN, None)
- else:
- ut_unit = op_func(self.ut_unit, other.ut_unit)
- if not ut_unit:
- msg = 'Failed to %s %r by %r' % (op_label, self, other)
- self._raise_error(msg)
- calendar = None
- result = _Unit(_CATEGORY_UDUNIT, ut_unit, calendar)
- return result
-
- def __rmul__(self, other):
- # NB. Because we've subclassed a tuple, we need to define this to
- # prevent the default tuple-repetition behaviour.
- # ie. 2 * ('a', 'b') -> ('a', 'b', 'a', 'b')
- return self * other
-
- def __mul__(self, other):
- """
- Multiply the self unit by the other scale factor or unit and
- return the Unit result.
-
- Note that, multiplication involving an 'unknown' unit will always
- result in an 'unknown' unit.
-
- Args:
-
- * other (int/float/string/Unit): Multiplication scale
- factor or unit.
-
- Returns:
- Unit.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> v = unit.Unit('hertz')
- >>> u*v
- Unit('meter-second^-1')
-
- """
- return self._op_common(other, _ut_multiply)
-
- def __div__(self, other):
- """
- Divide the self unit by the other scale factor or unit and
- return the Unit result.
-
- Note that, division involving an 'unknown' unit will always
- result in an 'unknown' unit.
-
- Args:
-
- * other (int/float/string/Unit): Division scale factor or unit.
-
- Returns:
- Unit.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('m.s-1')
- >>> v = unit.Unit('hertz')
- >>> u/v
- Unit('meter')
-
- """
- return self._op_common(other, _ut_divide)
-
- def __truediv__(self, other):
- """
- Divide the self unit by the other scale factor or unit and
- return the Unit result.
-
- Note that, division involving an 'unknown' unit will always
- result in an 'unknown' unit.
-
- Args:
-
- * other (int/float/string/Unit): Division scale factor or unit.
-
- Returns:
- Unit.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('m.s-1')
- >>> v = unit.Unit('hertz')
- >>> u/v
- Unit('meter')
-
- """
- return self.__div__(other)
-
- def __pow__(self, power):
- """
- Raise the unit by the given power and return the Unit result.
-
- Note that, UDUNITS-2 does not support raising a
- non-dimensionless unit by a fractional power.
- Approximate floating point power behaviour has been implemented
- specifically for Iris.
-
- Args:
-
- * power (int/float): Value by which the unit power is raised.
-
- Returns:
- Unit.
-
- For example:
-
- >>> import cf_units as unit
- >>> u = unit.Unit('meters')
- >>> u**2
- Unit('meter^2')
-
- """
- try:
- power = float(power)
- except ValueError:
- raise TypeError('A numeric value is required for the power'
- ' argument.')
-
- if self.is_unknown():
- result = self
- elif self.is_no_unit():
- raise ValueError("Cannot raise the power of a 'no-unit'.")
- elif self == Unit('1'):
- # 1 ** N -> 1
- result = self
- else:
- # UDUNITS-2 does not support floating point raise/root.
- # But if the power is of the form 1/N, where N is an integer
- # (within a certain acceptable accuracy) then we can find the Nth
- # root.
- if not iris.util.approx_equal(power, 0.0) and abs(power) < 1:
- if not iris.util.approx_equal(1 / power, round(1 / power)):
- raise ValueError('Cannot raise a unit by a decimal.')
- root = int(round(1 / power))
- result = self.root(root)
- else:
- # Failing that, check for powers which are (very nearly) simple
- # integer values.
- if not iris.util.approx_equal(power, round(power)):
- msg = 'Cannot raise a unit by a decimal (got %s).' % power
- raise ValueError(msg)
- power = int(round(power))
-
- ut_unit = _ut_raise(self.ut_unit, ctypes.c_int(power))
- if not ut_unit:
- self._raise_error('Failed to raise the power of %r' % self)
- result = _Unit(_CATEGORY_UDUNIT, ut_unit)
- return result
-
- def _identity(self):
- # Redefine the comparison/hash/ordering identity as used by
- # iris.util._OrderedHashable.
- return (self.name, self.calendar)
-
- __hash__ = iris.util._OrderedHashable.__hash__
-
- def __eq__(self, other):
- """
- Compare the two units for equality and return the boolean result.
-
- Args:
-
- * other (string/Unit): Unit to be compared.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> from cf_units import Unit
- >>> Unit('meters') == Unit('millimeters')
- False
- >>> Unit('meters') == 'm'
- True
-
- """
- other = as_unit(other)
-
- # Compare category (i.e. unknown, no_unit, etc.).
- if self.category != other.category:
- return False
-
- # Compare calendar as UDUNITS cannot handle calendars.
- if self.calendar != other.calendar:
- return False
-
- # Compare UDUNITS.
- res = _ut_compare(self.ut_unit, other.ut_unit)
- return res == 0
-
- def __ne__(self, other):
- """
- Compare the two units for inequality and return the boolean result.
-
- Args:
-
- * other (string/Unit): Unit to be compared.
-
- Returns:
- Boolean.
-
- For example:
-
- >>> from cf_units import Unit
- >>> Unit('meters') != Unit('millimeters')
- True
- >>> Unit('meters') != 'm'
- False
-
- """
- return not self == other
-
- def convert(self, value, other, ctype=FLOAT64):
- """
- Converts a single value or numpy array of values from the current unit
- to the other target unit.
-
- If the units are not convertible, then no conversion will take place.
-
- Args:
-
- * value (int/float/numpy.ndarray):
- Value/s to be converted.
- * other (string/Unit):
- Target unit to convert to.
- * ctype (ctypes.c_float/ctypes.c_double):
- Floating point 32-bit single-precision (cf_units.FLOAT32) or
- 64-bit double-precision (cf_units.FLOAT64) used for conversion
- when `value` is not a NumPy array or is a NumPy array composed of
- NumPy integers. The default is 64-bit double-precision conversion.
-
- Returns:
- float or numpy.ndarray of appropriate float type.
-
- For example:
-
- >>> import cf_units as unit
- >>> import numpy as np
- >>> c = unit.Unit('deg_c')
- >>> f = unit.Unit('deg_f')
- >>> c.convert(0, f)
- 31.999999999999886
- >>> c.convert(0, f, unit.FLOAT32)
- 32.0
- >>> a64 = np.arange(10, dtype=np.float64)
- >>> c.convert(a64, f)
- array([ 32. , 33.8, 35.6, 37.4, 39.2, 41. , 42.8, 44.6, \
- 46.4, 48.2])
- >>> a32 = np.arange(10, dtype=np.float32)
- >>> c.convert(a32, f)
- array([ 32. , 33.79999924, 35.59999847, 37.40000153,
- 39.20000076, 41. , 42.79999924, 44.59999847,
- 46.40000153, 48.20000076], dtype=float32)
-
- .. note::
-
- Conversion between unit calendars is not permitted.
-
- """
- result = None
- other = as_unit(other)
- value_copy = copy.deepcopy(value)
-
- if self == other:
- return value
-
- if self.is_convertible(other):
- # Use utime for converting reference times that are not using a
- # gregorian calendar as it handles these and udunits does not.
- if self.is_time_reference() \
- and self.calendar != CALENDAR_GREGORIAN:
- ut1 = self.utime()
- ut2 = other.utime()
- result = ut2.date2num(ut1.num2date(value_copy))
- # Preserve the datatype of the input array if it was float32.
- if isinstance(value, np.ndarray) and value.dtype == np.float32:
- result = result.astype(np.float32)
- else:
- ut_converter = _ut_get_converter(self.ut_unit, other.ut_unit)
- if ut_converter:
- if isinstance(value_copy, np.ndarray):
- # Can only handle array of np.float32 or np.float64 so
- # cast array of ints to array of floats of requested
- # precision.
- if issubclass(value_copy.dtype.type, np.integer):
- value_copy = value_copy.astype(
- _ctypes2numpy[ctype])
- # Convert arrays with explicit endianness to native
- # endianness: udunits seems to be tripped up by arrays
- # with endianness other than native.
- if value_copy.dtype.byteorder != '=':
- value_copy = value_copy.astype(
- value_copy.dtype.type)
- # strict type check of numpy array
- if value_copy.dtype.type not in _numpy2ctypes:
- raise TypeError(
- "Expect a numpy array of '%s' or '%s'" %
- tuple(sorted(_numpy2ctypes.keys())))
- ctype = _numpy2ctypes[value_copy.dtype.type]
- pointer = value_copy.ctypes.data_as(
- ctypes.POINTER(ctype))
- # Utilise global convenience dictionary
- # _cv_convert_array
- _cv_convert_array[ctype](ut_converter, pointer,
- value_copy.size, pointer)
- result = value_copy
- else:
- if ctype not in _cv_convert_scalar:
- raise ValueError('Invalid target type. Can only '
- 'convert to float or double.')
- # Utilise global convenience dictionary
- # _cv_convert_scalar
- result = _cv_convert_scalar[ctype](ut_converter,
- ctype(value_copy))
- _cv_free(ut_converter)
- else:
- self._raise_error('Failed to convert %r to %r' %
- (self, other))
- else:
- raise ValueError("Unable to convert from '%r' to '%r'." %
- (self, other))
- return result
-
- def utime(self):
- """
- Returns a netcdftime.utime object which performs conversions of
- numeric time values to/from datetime objects given the current
- calendar and unit time reference.
-
- The current unit time reference must be of the form:
- ' since '
- i.e. 'hours since 1970-01-01 00:00:00'
-
- Returns:
- netcdftime.utime.
- """
-
- #
- # ensure to strip out non-parsable 'UTC' postfix which
- # is generated by UDUNITS-2 formatted output
- #
- if self.calendar is None:
- raise ValueError('Unit has undefined calendar')
- return netcdftime.utime(str(self).rstrip(" UTC"), self.calendar)
-
- def date2num(self, date):
- """
- Returns the numeric time value calculated from the datetime
- object using the current calendar and unit time reference.
-
- The current unit time reference must be of the form:
- ' since '
- i.e. 'hours since 1970-01-01 00:00:00'
-
- Works for scalars, sequences and numpy arrays. Returns a scalar
- if input is a scalar, else returns a numpy array.
-
- Args:
-
- * date (datetime):
- A datetime object or a sequence of datetime objects.
- The datetime objects should not include a time-zone offset.
-
- Returns:
- float or numpy.ndarray of float.
- """
-
- cdf_utime = self.utime()
- return cdf_utime.date2num(date)
-
- def num2date(self, time_value):
- """
- Returns a datetime-like object calculated from the numeric time
- value using the current calendar and the unit time reference.
-
- The current unit time reference must be of the form:
- ' since '
- i.e. 'hours since 1970-01-01 00:00:00'
-
- The datetime objects returned are 'real' Python datetime objects
- if the date falls in the Gregorian calendar (i.e. the calendar
- is 'standard', 'gregorian', or 'proleptic_gregorian' and the
- date is after 1582-10-15). Otherwise a 'phoney' datetime-like
- object (netcdftime.datetime) is returned which can handle dates
- that don't exist in the Proleptic Gregorian calendar.
-
- Works for scalars, sequences and numpy arrays. Returns a scalar
- if input is a scalar, else returns a numpy array.
-
- Args:
-
- * time_value (float): Numeric time value/s. Maximum resolution
- is 1 second.
-
- Returns:
- datetime, or numpy.ndarray of datetime object.
- """
- cdf_utime = self.utime()
- return cdf_utime.num2date(time_value)
diff --git a/setup.py b/setup.py
index d69ff83517..6cd19e5cb1 100644
--- a/setup.py
+++ b/setup.py
@@ -210,20 +210,6 @@ def extract_version():
},
data_files=[('iris', ['CHANGES', 'COPYING', 'COPYING.LESSER'])],
tests_require=['nose'],
- features={
- 'unpack': setuptools.Feature(
- "use of UKMO unpack library",
- standard=False,
- ext_modules=[
- setuptools.Extension(
- 'iris.fileformats._old_pp_packing',
- ['src/iris/fileformats/pp_packing/pp_packing.c'],
- libraries=['mo_unpack'],
- include_dirs=[np.get_include()]
- )
- ]
- )
- },
cmdclass={'test': SetupTestRunner, 'build_py': BuildPyWithExtras,
'std_names': MakeStdNames, 'pyke_rules': MakePykeRules,
'clean_source': CleanSource},
diff --git a/src/iris/fileformats/pp_packing/pp_packing.c b/src/iris/fileformats/pp_packing/pp_packing.c
deleted file mode 100644
index 044ce1295d..0000000000
--- a/src/iris/fileformats/pp_packing/pp_packing.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// (C) British Crown Copyright 2010 - 2015, Met Office
-//
-// This file is part of Iris.
-//
-// Iris is free software: you can redistribute it and/or modify it under
-// the terms of the GNU Lesser General Public License as published by the
-// Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// Iris is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with Iris. If not, see .
-#include
-
-#include
-
-#include
-#include
-
-static PyObject *wgdos_unpack_py(PyObject *self, PyObject *args);
-static PyObject *rle_decode_py(PyObject *self, PyObject *args);
-
-#define BYTES_PER_INT_UNPACK_PPFIELD 4
-#define LBPACK_WGDOS_PACKED 1
-#define LBPACK_RLE_PACKED 4
-
-
-
-#if PY_MAJOR_VERSION >= 3
-PyMODINIT_FUNC PyInit_old_pp_packing(void)
-#else
-PyMODINIT_FUNC init_old_pp_packing(void)
-#endif
-{
-
- /* The module doc string */
- PyDoc_STRVAR(pp_packing__doc__,
- "This extension module provides access to the underlying libmo_unpack library functionality.\n"
- ""
- );
-
- PyDoc_STRVAR(wgdos_unpack__doc__,
- "Unpack PP field data that has been packed using WGDOS archive method.\n"
- "\n"
- "Provides access to the libmo_unpack library function Wgdos_Unpack.\n"
- "\n"
- "Args:\n\n"
- "* data (numpy.ndarray):\n"
- " The raw field byte array to be unpacked.\n"
- "* lbrow (int):\n"
- " The number of rows in the grid.\n"
- "* lbnpt (int):\n"
- " The number of points (columns) per row in the grid.\n"
- "* bmdi (float):\n"
- " The value used in the field to indicate missing data points.\n"
- "\n"
- "Returns:\n"
- " numpy.ndarray, 2d array containing normal unpacked field data.\n"
- ""
- );
-
-
- PyDoc_STRVAR(rle_decode__doc__,
- "Uncompress PP field data that has been compressed using Run Length Encoding.\n"
- "\n"
- "Provides access to the libmo_unpack library function runlenDecode.\n"
- "Decodes the field by expanding out the missing data points represented\n"
- "by a single missing data value followed by a value indicating the length\n"
- "of the run of missing data values.\n"
- "\n"
- "Args:\n\n"
- "* data (numpy.ndarray):\n"
- " The raw field byte array to be uncompressed.\n"
- "* lbrow (int):\n"
- " The number of rows in the grid.\n"
- "* lbnpt (int):\n"
- " The number of points (columns) per row in the grid.\n"
- "* bmdi (float):\n"
- " The value used in the field to indicate missing data points.\n"
- "\n"
- "Returns:\n"
- " numpy.ndarray, 2d array containing normal uncompressed field data.\n"
- ""
- );
-
- /* ==== Set up the module's methods table ====================== */
- static PyMethodDef pp_packingMethods[] = {
- {"wgdos_unpack", wgdos_unpack_py, METH_VARARGS, wgdos_unpack__doc__},
- {"rle_decode", rle_decode_py, METH_VARARGS, rle_decode__doc__},
- {NULL, NULL, 0, NULL} /* marks the end of this structure */
- };
-
-#if PY_MAJOR_VERSION >= 3
- static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "_old_pp_packing",
- pp_packing__doc__,
- -1,
- pp_packingMethods,
- NULL,
- NULL,
- NULL,
- NULL,
- };
-
- PyObject *m = PyModule_Create(&moduledef);
- import_array(); // Must be present for NumPy.
-
- return m;
-#else
- Py_InitModule3("_old_pp_packing", pp_packingMethods, pp_packing__doc__);
- import_array(); // Must be present for NumPy.
-#endif
-}
-
-
-/* wgdos_unpack(byte_array, lbrow, lbnpt, mdi) */
-static PyObject *wgdos_unpack_py(PyObject *self, PyObject *args)
-{
- char *bytes_in=NULL;
- PyArrayObject *npy_array_out=NULL;
- int bytes_in_len;
- npy_intp dims[2];
- int lbrow, lbnpt, npts;
- float mdi;
-
- if (!PyArg_ParseTuple(args, "s#iif", &bytes_in, &bytes_in_len, &lbrow, &lbnpt, &mdi)) return NULL;
-
- // Unpacking algorithm accepts an int - so assert that lbrow*lbnpt does not overflow
- if (lbrow > 0 && lbnpt >= INT_MAX / (lbrow+1)) {
- PyErr_SetString(PyExc_ValueError, "Resulting unpacked PP field is larger than PP supports.");
- return NULL;
- } else{
- npts = lbnpt*lbrow;
- }
-
- // We can't use the macros Py_BEGIN_ALLOW_THREADS / Py_END_ALLOW_THREADS
- // because they declare a new scope block, but we want multiple exits.
- PyThreadState *_save;
- _save = PyEval_SaveThread();
-
- /* Do the unpack of the given byte array */
- float *dataout = (float*)calloc(npts, sizeof(float));
-
- if (dataout == NULL) {
- PyEval_RestoreThread(_save);
- PyErr_SetString(PyExc_ValueError, "Unable to allocate memory for wgdos_unpacking.");
- return NULL;
- }
-
- function func; // function is defined by wgdosstuff.
- set_function_name(__func__, &func, 0);
- int status = unpack_ppfield(mdi, 0, bytes_in, LBPACK_WGDOS_PACKED, npts, dataout, &func);
-
- /* Raise an exception if there was a problem with the WGDOS algorithm */
- if (status != 0) {
- free(dataout);
- PyEval_RestoreThread(_save);
- PyErr_SetString(PyExc_ValueError, "WGDOS unpack encountered an error.");
- return NULL;
- }
- else {
- /* The data came back fine, so make a Numpy array and return it */
- dims[0]=lbrow;
- dims[1]=lbnpt;
- PyEval_RestoreThread(_save);
- npy_array_out=(PyArrayObject *) PyArray_SimpleNewFromData(2, dims, NPY_FLOAT, dataout);
-
- if (npy_array_out == NULL) {
- PyErr_SetString(PyExc_ValueError, "Failed to make the numpy array for the packed data.");
- return NULL;
- }
-
- // give ownership of dataout to the Numpy array - Numpy will then deal with memory cleanup.
- npy_array_out->flags = npy_array_out->flags | NPY_OWNDATA;
-
- return (PyObject *)npy_array_out;
- }
-}
-
-
-/* A null function required by the wgdos unpack library */
-void MO_syslog(int value, char* message, const function* const caller)
-{
- /* printf("MESSAGE %d %s: %s\n", value, caller, message); */
- return;
-}
-
-
-/* rle_decode(byte_array, lbrow, lbnpt, mdi) */
-static PyObject *rle_decode_py(PyObject *self, PyObject *args)
-{
- char *bytes_in=NULL;
- PyArrayObject *npy_array_out=NULL;
- int bytes_in_len;
- npy_intp dims[2];
- int lbrow, lbnpt, npts;
- float mdi;
-
- if (!PyArg_ParseTuple(args, "s#iif", &bytes_in, &bytes_in_len, &lbrow, &lbnpt, &mdi)) return NULL;
-
- // Unpacking algorithm accepts an int - so assert that lbrow*lbnpt does not overflow
- if (lbrow > 0 && lbnpt >= INT_MAX / (lbrow+1)) {
- PyErr_SetString(PyExc_ValueError, "Resulting unpacked PP field is larger than PP supports.");
- return NULL;
- } else{
- npts = lbnpt*lbrow;
- }
-
- // We can't use the macros Py_BEGIN_ALLOW_THREADS / Py_END_ALLOW_THREADS
- // because they declare a new scope block, but we want multiple exits.
- PyThreadState *_save;
- _save = PyEval_SaveThread();
-
- float *dataout = (float*)calloc(npts, sizeof(float));
-
- if (dataout == NULL) {
- PyEval_RestoreThread(_save);
- PyErr_SetString(PyExc_ValueError, "Unable to allocate memory for wgdos_unpacking.");
- return NULL;
- }
-
- function func; // function is defined by wgdosstuff.
- set_function_name(__func__, &func, 0);
- int status = unpack_ppfield(mdi, (bytes_in_len/BYTES_PER_INT_UNPACK_PPFIELD), bytes_in, LBPACK_RLE_PACKED, npts, dataout, &func);
-
- /* Raise an exception if there was a problem with the REL algorithm */
- if (status != 0) {
- free(dataout);
- PyEval_RestoreThread(_save);
- PyErr_SetString(PyExc_ValueError, "RLE decode encountered an error.");
- return NULL;
- }
- else {
- /* The data came back fine, so make a Numpy array and return it */
- dims[0]=lbrow;
- dims[1]=lbnpt;
- PyEval_RestoreThread(_save);
- npy_array_out=(PyArrayObject *) PyArray_SimpleNewFromData(2, dims, NPY_FLOAT, dataout);
-
- if (npy_array_out == NULL) {
- PyErr_SetString(PyExc_ValueError, "Failed to make the numpy array for the packed data.");
- return NULL;
- }
-
- // give ownership of dataout to the Numpy array - Numpy will then deal with memory cleanup.
- npy_array_out->flags = npy_array_out->flags | NPY_OWNDATA;
- return (PyObject *)npy_array_out;
- }
-}