diff --git a/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_analysis_interpolate_module.txt b/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_analysis_interpolate_module.txt
new file mode 100644
index 0000000000..722c63cd98
--- /dev/null
+++ b/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_analysis_interpolate_module.txt
@@ -0,0 +1,12 @@
+* deprecated the module :mod:`iris.analysis.interpolate`.
+ This contains the following public items, all of which are now deprecated and
+ will be removed in a future release:
+ * :func:`~iris.analysis.interpolate.linear`
+ * :func:`~iris.analysis.interpolate.regrid`
+ * :func:`~iris.analysis.interpolate.regrid_to_max_resolution`
+ * :func:`~iris.analysis.interpolate.nearest_neighbour_indices`
+ * :func:`~iris.analysis.interpolate.nearest_neighbour_data_value`
+ * :func:`~iris.analysis.interpolate.extract_nearest_neighbour`
+ * class :class:`~iris.analysis.interpolate.Linear1dExtrapolator`.
+ Please use the replacement facilities individually noted in the module
+ documentation for :mod:`iris.analysis.interpolate`
diff --git a/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_cube_regridded.txt b/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_cube_regridded.txt
new file mode 100644
index 0000000000..751c45c0d2
--- /dev/null
+++ b/docs/iris/src/whatsnew/contributions_1.10/deprecate_2016-Apr-21_cube_regridded.txt
@@ -0,0 +1,3 @@
+* the method :meth:`iris.cube.Cube.regridded` has been deprecated.
+ Please use :meth:`iris.cube.Cube.regrid` instead (see
+ :meth:`~iris.cube.Cube.regridded` for details).
diff --git a/lib/iris/_deprecation_helpers.py b/lib/iris/_deprecation_helpers.py
new file mode 100644
index 0000000000..6aeea4d158
--- /dev/null
+++ b/lib/iris/_deprecation_helpers.py
@@ -0,0 +1,44 @@
+# (C) British Crown Copyright 2010 - 2016, Met Office
+#
+# This file is part of Iris.
+#
+# Iris is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Iris is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Iris. If not, see .
+"""
+Utilities for producing runtime deprecation messages.
+
+"""
+from __future__ import (absolute_import, division, print_function)
+from six.moves import (filter, input, map, range, zip) # noqa
+import six
+
+import warnings
+
+
+# A Mixin for a wrapper class that copies the docstring of the wrapped class
+# into the wrapper.
+# This is useful in producing wrapper classes that nede to mimic the original
+# but emit deprecation warnings when used.
+class ClassWrapperSameDocstring(type):
+ def __new__(metacls, classname, bases, class_dict):
+ # Patch the subclass to duplicate the class docstring from the wrapped
+ # class, and give it a special '__new__' that issues a deprecation
+ # warning when creating an instance.
+ parent_class = bases[0]
+
+ # Copy the original class docstring.
+ class_dict['__doc__'] = parent_class.__doc__
+
+ # Return the result.
+ return super(ClassWrapperSameDocstring, metacls).__new__(
+ metacls, classname, bases, class_dict)
diff --git a/lib/iris/analysis/_interpolate_private.py b/lib/iris/analysis/_interpolate_private.py
new file mode 100644
index 0000000000..0488af8c15
--- /dev/null
+++ b/lib/iris/analysis/_interpolate_private.py
@@ -0,0 +1,836 @@
+# (C) British Crown Copyright 2010 - 2016, Met Office
+#
+# This file is part of Iris.
+#
+# Iris is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Iris is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Iris. If not, see .
+"""
+This is the 'original' content of :mod:`iris.analysis.interpolate`, which has
+now been deprecated.
+
+A rename was essential to provide a deprecation warning on import of the
+original name, while still providing this code for internal usage (for now)
+without triggering the deprecation notice.
+
+"""
+
+from __future__ import (absolute_import, division, print_function)
+from six.moves import (filter, input, map, range, zip) # noqa
+import six
+
+import collections
+import warnings
+
+import numpy as np
+import scipy
+import scipy.spatial
+from scipy.interpolate.interpolate import interp1d
+
+from iris.analysis import Linear
+import iris.cube
+import iris.coord_systems
+import iris.coords
+import iris.exceptions
+
+
+def _ll_to_cart(lon, lat):
+ # Based on cartopy.img_transform.ll_to_cart()
+ x = np.sin(np.deg2rad(90 - lat)) * np.cos(np.deg2rad(lon))
+ y = np.sin(np.deg2rad(90 - lat)) * np.sin(np.deg2rad(lon))
+ z = np.cos(np.deg2rad(90 - lat))
+ return (x, y, z)
+
+def _cartesian_sample_points(sample_points, sample_point_coord_names):
+ # Replace geographic latlon with cartesian xyz.
+ # Generates coords suitable for nearest point calculations with scipy.spatial.cKDTree.
+ #
+ # Input:
+ # sample_points[coord][datum] : list of sample_positions for each datum, formatted for fast use of _ll_to_cart()
+ # sample_point_coord_names[coord] : list of n coord names
+ #
+ # Output:
+ # list of [x,y,z,t,etc] positions, formatted for kdtree
+
+ # Find lat and lon coord indices
+ i_lat = i_lon = None
+ i_non_latlon = list(range(len(sample_point_coord_names)))
+ for i, name in enumerate(sample_point_coord_names):
+ if "latitude" in name:
+ i_lat = i
+ i_non_latlon.remove(i_lat)
+ if "longitude" in name:
+ i_lon = i
+ i_non_latlon.remove(i_lon)
+
+ if i_lat is None or i_lon is None:
+ return sample_points.transpose()
+
+ num_points = len(sample_points[0])
+ cartesian_points = [None] * num_points
+
+ # Get the point coordinates without the latlon
+ for p in range(num_points):
+ cartesian_points[p] = [sample_points[c][p] for c in i_non_latlon]
+
+ # Add cartesian xyz coordinates from latlon
+ x, y, z = _ll_to_cart(sample_points[i_lon], sample_points[i_lat])
+ for p in range(num_points):
+ cartesian_point = cartesian_points[p]
+ cartesian_point.append(x[p])
+ cartesian_point.append(y[p])
+ cartesian_point.append(z[p])
+
+ return cartesian_points
+
+
+def nearest_neighbour_indices(cube, sample_points):
+ """
+ Returns the indices to select the data value(s) closest to the given coordinate point values.
+
+ The sample_points mapping does not have to include coordinate values corresponding to all data
+ dimensions. Any dimensions unspecified will default to a full slice.
+
+ For example:
+
+ >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
+ >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0), ('longitude', 10)])
+ (slice(None, None, None), 9, 12)
+ >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0)])
+ (slice(None, None, None), 9, slice(None, None, None))
+
+ Args:
+
+ * cube:
+ An :class:`iris.cube.Cube`.
+ * sample_points
+ A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
+
+ Returns:
+ The tuple of indices which will select the point in the cube closest to the supplied coordinate values.
+
+ .. note::
+
+ Nearest neighbour interpolation of multidimensional coordinates is not
+ yet supported.
+
+ .. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please replace usage of
+ :func:`iris.analysis.interpolate.nearest_neighbour_indices`
+ with :meth:`iris.coords.Coord.nearest_neighbour_index`.
+
+ """
+ if isinstance(sample_points, dict):
+ warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
+ sample_points = list(sample_points.items())
+
+ if sample_points:
+ try:
+ coord, values = sample_points[0]
+ except ValueError:
+ raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)
+
+ points = []
+ for coord, values in sample_points:
+ if isinstance(coord, six.string_types):
+ coord = cube.coord(coord)
+ else:
+ coord = cube.coord(coord)
+ points.append((coord, values))
+ sample_points = points
+
+ # Build up a list of indices to span the cube.
+ indices = [slice(None, None)] * cube.ndim
+
+ # Build up a dictionary which maps the cube's data dimensions to a list (which will later
+ # be populated by coordinates in the sample points list)
+ dim_to_coord_map = {}
+ for i in range(cube.ndim):
+ dim_to_coord_map[i] = []
+
+ # Iterate over all of the specifications provided by sample_points
+ for coord, point in sample_points:
+ data_dim = cube.coord_dims(coord)
+
+ # If no data dimension then we don't need to make any modifications to indices.
+ if not data_dim:
+ continue
+ elif len(data_dim) > 1:
+ raise iris.exceptions.CoordinateMultiDimError("Nearest neighbour interpolation of multidimensional "
+ "coordinates is not supported.")
+ data_dim = data_dim[0]
+
+ dim_to_coord_map[data_dim].append(coord)
+
+ #calculate the nearest neighbour
+ min_index = coord.nearest_neighbour_index(point)
+
+ if getattr(coord, 'circular', False):
+ warnings.warn("Nearest neighbour on a circular coordinate may not be picking the nearest point.", DeprecationWarning)
+
+ # If the dimension has already been interpolated then assert that the index from this coordinate
+ # agrees with the index already calculated, otherwise we have a contradicting specification
+ if indices[data_dim] != slice(None, None) and min_index != indices[data_dim]:
+ raise ValueError('The coordinates provided (%s) over specify dimension %s.' %
+ (', '.join([coord.name() for coord in dim_to_coord_map[data_dim]]), data_dim))
+
+ indices[data_dim] = min_index
+
+ return tuple(indices)
+
+
+def _nearest_neighbour_indices_ndcoords(cube, sample_point, cache=None):
+ """
+ See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.
+
+ This function is adapted for points sampling a multi-dimensional coord,
+ and can currently only do nearest neighbour interpolation.
+
+ Because this function can be slow for multidimensional coordinates,
+ a 'cache' dictionary can be provided by the calling code.
+
+ """
+
+ # Developer notes:
+ # A "sample space cube" is made which only has the coords and dims we are sampling on.
+ # We get the nearest neighbour using this sample space cube.
+
+ if isinstance(sample_point, dict):
+ warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
+ sample_point = list(sample_point.items())
+
+ if sample_point:
+ try:
+ coord, value = sample_point[0]
+ except ValueError:
+ raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_point)
+
+ # Convert names to coords in sample_point
+ point = []
+ ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
+ for coord, value in sample_point:
+ if isinstance(coord, six.string_types):
+ coord = cube.coord(coord)
+ else:
+ coord = cube.coord(coord)
+ if id(coord) not in ok_coord_ids:
+ msg = ('Invalid sample coordinate {!r}: derived coordinates are'
+ ' not allowed.'.format(coord.name()))
+ raise ValueError(msg)
+ point.append((coord, value))
+
+ # Reformat sample_point for use in _cartesian_sample_points(), below.
+ sample_point = np.array([[value] for coord, value in point])
+ sample_point_coords = [coord for coord, value in point]
+ sample_point_coord_names = [coord.name() for coord, value in point]
+
+ # Which dims are we sampling?
+ sample_dims = set()
+ for coord in sample_point_coords:
+ for dim in cube.coord_dims(coord):
+ sample_dims.add(dim)
+ sample_dims = sorted(list(sample_dims))
+
+ # Extract a sub cube that lives in just the sampling space.
+ sample_space_slice = [0] * cube.ndim
+ for sample_dim in sample_dims:
+ sample_space_slice[sample_dim] = slice(None, None)
+ sample_space_slice = tuple(sample_space_slice)
+ sample_space_cube = cube[sample_space_slice]
+
+ #...with just the sampling coords
+ for coord in sample_space_cube.coords():
+ if not coord.name() in sample_point_coord_names:
+ sample_space_cube.remove_coord(coord)
+
+ # Order the sample point coords according to the sample space cube coords
+ sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()]
+ new_order = [sample_space_coord_names.index(name) for name in sample_point_coord_names]
+ sample_point = np.array([sample_point[i] for i in new_order])
+ sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]
+
+ # Convert the sample point to cartesian coords.
+ # If there is no latlon within the coordinate there will be no change.
+ # Otherwise, geographic latlon is replaced with cartesian xyz.
+ cartesian_sample_point = _cartesian_sample_points(sample_point, sample_point_coord_names)[0]
+
+ sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
+ sample_space_coords_and_dims = [(coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords]
+
+ if cache is not None and cube in cache:
+ kdtree = cache[cube]
+ else:
+ # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
+ sample_space_data_positions = np.empty((len(sample_space_coords_and_dims), sample_space_cube.data.size), dtype=float)
+ for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
+ for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims):
+ # Index of this datum along this coordinate (could be nD).
+ keys = tuple(ndi[ind] for ind in coord_dims) if coord_dims else slice(None, None)
+ # Position of this datum along this coordinate.
+ sample_space_data_positions[c][d] = coord.points[keys]
+
+ # Convert to cartesian coordinates. Flatten for kdtree compatibility.
+ cartesian_space_data_coords = _cartesian_sample_points(sample_space_data_positions, sample_point_coord_names)
+
+ # Get the nearest datum index to the sample point. This is the goal of the function.
+ kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)
+
+ cartesian_distance, datum_index = kdtree.query(cartesian_sample_point)
+ sample_space_ndi = np.unravel_index(datum_index, sample_space_cube.data.shape)
+
+ # Turn sample_space_ndi into a main cube slice.
+ # Map sample cube to main cube dims and leave the rest as a full slice.
+ main_cube_slice = [slice(None, None)] * cube.ndim
+ for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
+ # Find the coord in the main cube
+ main_coord = cube.coord(sample_coord.name())
+ main_coord_dims = cube.coord_dims(main_coord)
+ # Mark the nearest data index/indices with respect to this coord
+ for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
+ main_cube_slice[main_i] = sample_space_ndi[sample_i]
+
+
+ # Update cache
+ if cache is not None:
+ cache[cube] = kdtree
+
+ return tuple(main_cube_slice)
+
+
+def extract_nearest_neighbour(cube, sample_points):
+ """
+ Returns a new cube using data value(s) closest to the given coordinate point values.
+
+ The sample_points mapping does not have to include coordinate values corresponding to all data
+ dimensions. Any dimensions unspecified will default to a full slice.
+
+ For example:
+
+ >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
+ >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0), ('longitude', 10)])
+
+ >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0)])
+
+
+ Args:
+
+ * cube:
+ An :class:`iris.cube.Cube`.
+ * sample_points
+ A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
+
+ Returns:
+ A cube that represents uninterpolated data as near to the given points as possible.
+
+ .. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please replace usage of
+ :func:`iris.analysis.interpolate.extract_nearest_neighbour`
+ with :meth:`iris.cube.Cube.interpolate` using the scheme
+ :class:`iris.analysis.Nearest`.
+
+ """
+ return cube[nearest_neighbour_indices(cube, sample_points)]
+
+
+def nearest_neighbour_data_value(cube, sample_points):
+ """
+ Returns the data value closest to the given coordinate point values.
+
+ The sample_points mapping must include coordinate values corresponding to all data
+ dimensions.
+
+ For example:
+
+ >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
+ >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0), ('longitude', 10)])
+ 299.21564
+ >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0)])
+ Traceback (most recent call last):
+ ...
+ ValueError: The sample points [('latitude', 0)] was not specific enough to return a single value from the cube.
+
+
+ Args:
+
+ * cube:
+ An :class:`iris.cube.Cube`.
+ * sample_points
+ A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
+
+ Returns:
+ The data value at the point in the cube closest to the supplied coordinate values.
+
+ .. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please replace usage of
+ :func:`iris.analysis.interpolate.nearest_neighbour_data_value`
+ with :meth:`iris.cube.Cube.interpolate` using the scheme
+ :class:`iris.analysis.Nearest`.
+
+ """
+ indices = nearest_neighbour_indices(cube, sample_points)
+ for ind in indices:
+ if isinstance(ind, slice):
+ raise ValueError('The sample points given (%s) were not specific enough to return a '
+ 'single value from the cube.' % sample_points)
+
+ return cube.data[indices]
+
+
+def regrid(source_cube, grid_cube, mode='bilinear', **kwargs):
+ """
+ Returns a new cube with values derived from the source_cube on the horizontal grid specified
+ by the grid_cube.
+
+ Fundamental input requirements:
+ 1) Both cubes must have a CoordSystem.
+ 2) The source 'x' and 'y' coordinates must not share data dimensions with any other coordinates.
+
+ In addition, the algorithm currently used requires:
+ 3) Both CS instances must be compatible:
+ i.e. of the same type, with the same attribute values, and with compatible coordinates.
+ 4) No new data dimensions can be created.
+ 5) Source cube coordinates to map to a single dimension.
+
+ Args:
+
+ * source_cube:
+ An instance of :class:`iris.cube.Cube` which supplies the source data and metadata.
+ * grid_cube:
+ An instance of :class:`iris.cube.Cube` which supplies the horizontal grid definition.
+
+ Kwargs:
+
+ * mode (string):
+ Regridding interpolation algorithm to be applied, which may be one of the following:
+
+ * 'bilinear' for bi-linear interpolation (default), see :func:`iris.analysis.interpolate.linear`.
+ * 'nearest' for nearest neighbour interpolation.
+
+ Returns:
+ A new :class:`iris.cube.Cube` instance.
+
+ .. note::
+
+ The masked status of values are currently ignored. See :func:\
+`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid`
+ for regrid support with mask awareness.
+
+ .. deprecated:: 1.10
+
+ Please use :meth:`iris.cube.Cube.regrid` instead, with an appropriate
+ regridding scheme:
+
+ * For mode='bilinear', simply use the :class:`~iris.analysis.Linear`
+ scheme.
+
+ * For mode='nearest', use the :class:`~iris.analysis.Nearest` scheme,
+ with extrapolation_mode='extrapolate', but be aware of the
+ following possible differences:
+
+ * Any missing result points, i.e. those which match source points
+ which are masked or NaN, are returned as as NaN values by this
+ routine. The 'Nearest' scheme, however, represents missing
+ results as masked points in a masked array.
+ *Which* points are missing is unchanged.
+
+ * Longitude wrapping for this routine is controlled by the
+ 'circular' property of the x coordinate.
+ The 'Nearest' scheme, however, *always* wraps any coords with
+ modular units, such as (correctly formed) longitudes.
+ Thus, behaviour can be different if "x_coord.circular" is
+ False : In that case, if the original non-longitude-wrapped
+ operation is required, it can be replicated by converting all
+ X and Y coordinates' units to '1' and removing their coordinate
+ systems.
+
+ """
+ if mode == 'bilinear':
+ scheme = iris.analysis.Linear(**kwargs)
+ return source_cube.regrid(grid_cube, scheme)
+
+ # Condition 1
+ source_cs = source_cube.coord_system(iris.coord_systems.CoordSystem)
+ grid_cs = grid_cube.coord_system(iris.coord_systems.CoordSystem)
+ if (source_cs is None) != (grid_cs is None):
+ raise ValueError("The source and grid cubes must both have a CoordSystem or both have None.")
+
+ # Condition 2: We can only have one x coordinate and one y coordinate with the source CoordSystem, and those coordinates
+ # must be the only ones occupying their respective dimension
+ source_x = source_cube.coord(axis='x', coord_system=source_cs)
+ source_y = source_cube.coord(axis='y', coord_system=source_cs)
+
+ source_x_dims = source_cube.coord_dims(source_x)
+ source_y_dims = source_cube.coord_dims(source_y)
+
+ source_x_dim = None
+ if source_x_dims:
+ if len(source_x_dims) > 1:
+ raise ValueError('The source x coordinate may not describe more than one data dimension.')
+ source_x_dim = source_x_dims[0]
+ dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_x_dim) if coord is not source_x])
+ if dim_sharers:
+ raise ValueError('No coordinates may share a dimension (dimension %s) with the x '
+ 'coordinate, but (%s) do.' % (source_x_dim, dim_sharers))
+
+ source_y_dim = None
+ if source_y_dims:
+ if len(source_y_dims) > 1:
+ raise ValueError('The source y coordinate may not describe more than one data dimension.')
+ source_y_dim = source_y_dims[0]
+ dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_y_dim) if coord is not source_y])
+ if dim_sharers:
+ raise ValueError('No coordinates may share a dimension (dimension %s) with the y '
+ 'coordinate, but (%s) do.' % (source_y_dim, dim_sharers))
+
+ if source_x_dim is not None and source_y_dim == source_x_dim:
+ raise ValueError('The source x and y coords may not describe the same data dimension.')
+
+
+ # Condition 3
+ # Check for compatible horizontal CSs. Currently that means they're exactly the same except for the coordinate
+ # values.
+ # The same kind of CS ...
+ compatible = (source_cs == grid_cs)
+ if compatible:
+ grid_x = grid_cube.coord(axis='x', coord_system=grid_cs)
+ grid_y = grid_cube.coord(axis='y', coord_system=grid_cs)
+ compatible = source_x.is_compatible(grid_x) and \
+ source_y.is_compatible(grid_y)
+ if not compatible:
+ raise ValueError("The new grid must be defined on the same coordinate system, and have the same coordinate "
+ "metadata, as the source.")
+
+ # Condition 4
+ if grid_cube.coord_dims(grid_x) and not source_x_dims or \
+ grid_cube.coord_dims(grid_y) and not source_y_dims:
+ raise ValueError("The new grid must not require additional data dimensions.")
+
+ x_coord = grid_x.copy()
+ y_coord = grid_y.copy()
+
+
+ #
+ # Adjust the data array to match the new grid.
+ #
+
+ # get the new shape of the data
+ new_shape = list(source_cube.shape)
+ if source_x_dims:
+ new_shape[source_x_dims[0]] = grid_x.shape[0]
+ if source_y_dims:
+ new_shape[source_y_dims[0]] = grid_y.shape[0]
+
+ new_data = np.empty(new_shape, dtype=source_cube.data.dtype)
+
+ # Prepare the index pattern which will be used to insert a single "column" of data.
+ # NB. A "column" is a slice constrained to a single XY point, which therefore extends over *all* the other axes.
+ # For an XYZ cube this means a column only extends over Z and corresponds to the normal definition of "column".
+ indices = [slice(None, None)] * new_data.ndim
+
+ if mode == 'bilinear':
+ # Perform bilinear interpolation, passing through any keywords.
+ points_dict = [(source_x, list(x_coord.points)), (source_y, list(y_coord.points))]
+ new_data = linear(source_cube, points_dict, **kwargs).data
+ else:
+ # Perform nearest neighbour interpolation on each column in turn.
+ for iy, y in enumerate(y_coord.points):
+ for ix, x in enumerate(x_coord.points):
+ column_pos = [(source_x, x), (source_y, y)]
+ column_data = extract_nearest_neighbour(source_cube, column_pos).data
+ if source_y_dim is not None:
+ indices[source_y_dim] = iy
+ if source_x_dim is not None:
+ indices[source_x_dim] = ix
+ new_data[tuple(indices)] = column_data
+
+ # Special case to make 0-dimensional results take the same form as NumPy
+ if new_data.shape == ():
+ new_data = new_data.flat[0]
+
+ # Start with just the metadata and the re-sampled data...
+ new_cube = iris.cube.Cube(new_data)
+ new_cube.metadata = source_cube.metadata
+
+ # ... and then copy across all the unaffected coordinates.
+
+ # Record a mapping from old coordinate IDs to new coordinates,
+ # for subsequent use in creating updated aux_factories.
+ coord_mapping = {}
+
+ def copy_coords(source_coords, add_method):
+ for coord in source_coords:
+ if coord is source_x or coord is source_y:
+ continue
+ dims = source_cube.coord_dims(coord)
+ new_coord = coord.copy()
+ add_method(new_coord, dims)
+ coord_mapping[id(coord)] = new_coord
+
+ copy_coords(source_cube.dim_coords, new_cube.add_dim_coord)
+ copy_coords(source_cube.aux_coords, new_cube.add_aux_coord)
+
+ for factory in source_cube.aux_factories:
+ new_cube.add_aux_factory(factory.updated(coord_mapping))
+
+ # Add the new coords
+ if source_x in source_cube.dim_coords:
+ new_cube.add_dim_coord(x_coord, source_x_dim)
+ else:
+ new_cube.add_aux_coord(x_coord, source_x_dims)
+
+ if source_y in source_cube.dim_coords:
+ new_cube.add_dim_coord(y_coord, source_y_dim)
+ else:
+ new_cube.add_aux_coord(y_coord, source_y_dims)
+
+ return new_cube
+
+
+def regrid_to_max_resolution(cubes, **kwargs):
+ """
+ Returns all the cubes re-gridded to the highest horizontal resolution.
+
+ Horizontal resolution is defined by the number of grid points/cells covering the horizontal plane.
+ See :func:`iris.analysis.interpolation.regrid` regarding mode of interpolation.
+
+ Args:
+
+ * cubes:
+ An iterable of :class:`iris.cube.Cube` instances.
+
+ Returns:
+ A list of new :class:`iris.cube.Cube` instances.
+
+ .. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please replace usage of :func:`regrid_to_max_resolution` with
+ :meth:`iris.cube.Cube.regrid`.
+
+ """
+ # TODO: This could be significantly improved for readability and functionality.
+ resolution = lambda cube_: (cube_.shape[cube_.coord_dims(cube_.coord(axis="x"))[0]]) * (cube_.shape[cube_.coord_dims(cube_.coord(axis="y"))[0]])
+ grid_cube = max(cubes, key=resolution)
+ return [cube.regridded(grid_cube, **kwargs) for cube in cubes]
+
+
+def linear(cube, sample_points, extrapolation_mode='linear'):
+ """
+ Return a cube of the linearly interpolated points given the desired
+ sample points.
+
+ Given a list of tuple pairs mapping coordinates (or coordinate names)
+ to their desired values, return a cube with linearly interpolated values.
+ If more than one coordinate is specified, the linear interpolation will be
+ carried out in sequence, thus providing n-linear interpolation
+ (bi-linear, tri-linear, etc.).
+
+ If the input cube's data is masked, the result cube will have a data
+ mask interpolated to the new sample points
+
+ .. testsetup::
+
+ import numpy as np
+
+ For example:
+
+ >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
+ >>> sample_points = [('latitude', np.linspace(-90, 90, 10)),
+ ... ('longitude', np.linspace(-180, 180, 20))]
+ >>> iris.analysis.interpolate.linear(cube, sample_points)
+
+
+ .. note::
+
+ By definition, linear interpolation requires all coordinates to
+ be 1-dimensional.
+
+ .. note::
+
+ If a specified coordinate is single valued its value will be
+ extrapolated to the desired sample points by assuming a gradient of
+ zero.
+
+ Args:
+
+ * cube
+ The cube to be interpolated.
+
+ * sample_points
+ List of one or more tuple pairs mapping coordinate to desired
+ points to interpolate. Points may be a scalar or a numpy array
+ of values. Multi-dimensional coordinates are not supported.
+
+ Kwargs:
+
+ * extrapolation_mode - string - one of 'linear', 'nan' or 'error'
+
+ * If 'linear' the point will be calculated by extending the
+ gradient of closest two points.
+ * If 'nan' the extrapolation point will be put as a NaN.
+ * If 'error' a value error will be raised notifying of the
+ attempted extrapolation.
+
+ .. note::
+
+ If the source cube's data, or any of its resampled coordinates,
+ have an integer data type they will be promoted to a floating
+ point data type in the result.
+
+ .. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please replace usage of
+ :func:`iris.analysis.interpolate.linear`
+ with :meth:`iris.cube.Cube.interpolate` using the scheme
+ :class:`iris.analysis.Linear`.
+
+ """
+ if isinstance(sample_points, dict):
+ sample_points = list(sample_points.items())
+
+ # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
+ if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], six.string_types)):
+ raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))
+
+ scheme = Linear(extrapolation_mode)
+ return cube.interpolate(sample_points, scheme)
+
+
+def _interp1d_rolls_y():
+ """
+ Determines if :class:`scipy.interpolate.interp1d` rolls its array `y` by
+ comparing the shape of y passed into interp1d to the shape of its internal
+ representation of y.
+
+ SciPy v0.13.x+ no longer rolls the axis of its internal representation
+ of y so we test for this occurring to prevent us subsequently
+ extrapolating along the wrong axis.
+
+ For further information on this change see, for example:
+ * https://github.com/scipy/scipy/commit/0d906d0fc54388464603c63119b9e35c9a9c4601
+ (the commit that introduced the change in behaviour).
+ * https://github.com/scipy/scipy/issues/2621
+ (a discussion on the change - note the issue is not resolved
+ at time of writing).
+
+ """
+ y = np.arange(12).reshape(3, 4)
+ f = interp1d(np.arange(3), y, axis=0)
+ # If the initial shape of y and the shape internal to interp1d are *not*
+ # the same then scipy.interp1d rolls y.
+ return y.shape != f.y.shape
+
+
+class Linear1dExtrapolator(object):
+ """
+ Extension class to :class:`scipy.interpolate.interp1d` to provide linear extrapolation.
+
+ See also: :mod:`scipy.interpolate`.
+
+ .. deprecated :: 1.10
+
+ """
+ roll_y = _interp1d_rolls_y()
+
+ def __init__(self, interpolator):
+ """
+ Given an already created :class:`scipy.interpolate.interp1d` instance, return a callable object
+ which supports linear extrapolation.
+
+ .. deprecated :: 1.10
+
+ """
+ self._interpolator = interpolator
+ self.x = interpolator.x
+ # Store the y values given to the interpolator.
+ self.y = interpolator.y
+ """
+ The y values given to the interpolator object.
+
+ .. note:: These are stored with the interpolator.axis last.
+
+ """
+ # Roll interpolator.axis to the end if scipy no longer does it for us.
+ if not self.roll_y:
+ self.y = np.rollaxis(self.y, self._interpolator.axis, self.y.ndim)
+
+ def all_points_in_range(self, requested_x):
+ """Given the x points, do all of the points sit inside the interpolation range."""
+ test = (requested_x >= self.x[0]) & (requested_x <= self.x[-1])
+ if isinstance(test, np.ndarray):
+ test = test.all()
+ return test
+
+ def __call__(self, requested_x):
+ if not self.all_points_in_range(requested_x):
+ # cast requested_x to a numpy array if it is not already.
+ if not isinstance(requested_x, np.ndarray):
+ requested_x = np.array(requested_x)
+
+ # we need to catch the special case of providing a single value...
+ remember_that_i_was_0d = requested_x.ndim == 0
+
+ requested_x = requested_x.flatten()
+
+ gt = np.where(requested_x > self.x[-1])[0]
+ lt = np.where(requested_x < self.x[0])[0]
+ ok = np.where( (requested_x >= self.x[0]) & (requested_x <= self.x[-1]) )[0]
+
+ data_shape = list(self.y.shape)
+ data_shape[-1] = len(requested_x)
+ result = np.empty(data_shape, dtype=self._interpolator(self.x[0]).dtype)
+
+ # Make a variable to represent the slice into the resultant data. (This will be updated in each of gt, lt & ok)
+ interpolator_result_index = [slice(None, None)] * self.y.ndim
+
+ if len(ok) != 0:
+ interpolator_result_index[-1] = ok
+
+ r = self._interpolator(requested_x[ok])
+ # Reshape the properly formed array to put the interpolator.axis last i.e. dims 0, 1, 2 -> 0, 2, 1 if axis = 1
+ axes = list(range(r.ndim))
+ del axes[self._interpolator.axis]
+ axes.append(self._interpolator.axis)
+
+ result[interpolator_result_index] = r.transpose(axes)
+
+ if len(lt) != 0:
+ interpolator_result_index[-1] = lt
+
+ grad = (self.y[..., 1:2] - self.y[..., 0:1]) / (self.x[1] - self.x[0])
+ result[interpolator_result_index] = self.y[..., 0:1] + (requested_x[lt] - self.x[0]) * grad
+
+ if len(gt) != 0:
+ interpolator_result_index[-1] = gt
+
+ grad = (self.y[..., -1:] - self.y[..., -2:-1]) / (self.x[-1] - self.x[-2])
+ result[interpolator_result_index] = self.y[..., -1:] + (requested_x[gt] - self.x[-1]) * grad
+
+ axes = list(range(len(interpolator_result_index)))
+ axes.insert(self._interpolator.axis, axes.pop(axes[-1]))
+ result = result.transpose(axes)
+
+ if remember_that_i_was_0d:
+ new_shape = list(result.shape)
+ del new_shape[self._interpolator.axis]
+ result = result.reshape(new_shape)
+
+ return result
+ else:
+ return self._interpolator(requested_x)
diff --git a/lib/iris/analysis/interpolate.py b/lib/iris/analysis/interpolate.py
index 990b4e9bf0..14551dea18 100644
--- a/lib/iris/analysis/interpolate.py
+++ b/lib/iris/analysis/interpolate.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
+# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
@@ -19,6 +19,13 @@
See also: :mod:`NumPy `, and :ref:`SciPy `.
+.. deprecated:: 1.10
+
+ The module :mod:`iris.analysis.interpolate` is deprecated.
+ Please use :meth:`iris.cube.regrid` or :meth:`iris.cube.interpolate` with
+ the appropriate regridding and interpolation schemes from
+ :mod:`iris.analysis` instead.
+
"""
from __future__ import (absolute_import, division, print_function)
@@ -26,6 +33,7 @@
import six
import collections
+from functools import wraps
import warnings
import numpy as np
@@ -38,727 +46,97 @@
import iris.coord_systems
import iris.coords
import iris.exceptions
+import iris.analysis._interpolate_private as oldinterp
+# Import deprecation support from the underlying module.
+# Put it there so we can use it from elsewhere without triggering the
+# deprecation warning (!)
+from iris._deprecation_helpers import ClassWrapperSameDocstring
-def _ll_to_cart(lon, lat):
- # Based on cartopy.img_transform.ll_to_cart()
- x = np.sin(np.deg2rad(90 - lat)) * np.cos(np.deg2rad(lon))
- y = np.sin(np.deg2rad(90 - lat)) * np.sin(np.deg2rad(lon))
- z = np.cos(np.deg2rad(90 - lat))
- return (x, y, z)
-
-def _cartesian_sample_points(sample_points, sample_point_coord_names):
- # Replace geographic latlon with cartesian xyz.
- # Generates coords suitable for nearest point calculations with scipy.spatial.cKDTree.
- #
- # Input:
- # sample_points[coord][datum] : list of sample_positions for each datum, formatted for fast use of _ll_to_cart()
- # sample_point_coord_names[coord] : list of n coord names
- #
- # Output:
- # list of [x,y,z,t,etc] positions, formatted for kdtree
-
- # Find lat and lon coord indices
- i_lat = i_lon = None
- i_non_latlon = list(range(len(sample_point_coord_names)))
- for i, name in enumerate(sample_point_coord_names):
- if "latitude" in name:
- i_lat = i
- i_non_latlon.remove(i_lat)
- if "longitude" in name:
- i_lon = i
- i_non_latlon.remove(i_lon)
-
- if i_lat is None or i_lon is None:
- return sample_points.transpose()
-
- num_points = len(sample_points[0])
- cartesian_points = [None] * num_points
-
- # Get the point coordinates without the latlon
- for p in range(num_points):
- cartesian_points[p] = [sample_points[c][p] for c in i_non_latlon]
-
- # Add cartesian xyz coordinates from latlon
- x, y, z = _ll_to_cart(sample_points[i_lon], sample_points[i_lat])
- for p in range(num_points):
- cartesian_point = cartesian_points[p]
- cartesian_point.append(x[p])
- cartesian_point.append(y[p])
- cartesian_point.append(z[p])
-
- return cartesian_points
+_INTERPOLATE_DEPRECATION_WARNING = \
+ "The module 'iris.analysis.interpolate' is deprecated."
-def nearest_neighbour_indices(cube, sample_points):
- """
- Returns the indices to select the data value(s) closest to the given coordinate point values.
- The sample_points mapping does not have to include coordinate values corresponding to all data
- dimensions. Any dimensions unspecified will default to a full slice.
+# Define a common callpoint for deprecation warnings.
+def _warn_deprecated(msg=None):
+ if msg is None:
+ msg = _INTERPOLATE_DEPRECATION_WARNING
+ warnings.warn(msg)
- For example:
+# Issue a deprecation message when the module is loaded.
+_warn_deprecated()
- >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
- >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0), ('longitude', 10)])
- (slice(None, None, None), 9, 12)
- >>> iris.analysis.interpolate.nearest_neighbour_indices(cube, [('latitude', 0)])
- (slice(None, None, None), 9, slice(None, None, None))
- Args:
+def nearest_neighbour_indices(cube, sample_points):
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of '
+ 'iris.analysis.interpolate.nearest_neighbour_indices() '
+ 'with iris.coords.Coord.nearest_neighbour_index()).')
+ _warn_deprecated(msg)
+ return oldinterp.nearest_neighbour_indices(cube, sample_points)
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
-
- Returns:
- The tuple of indices which will select the point in the cube closest to the supplied coordinate values.
-
- .. note::
-
- Nearest neighbour interpolation of multidimensional coordinates is not
- yet supported.
-
- """
- if isinstance(sample_points, dict):
- warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
- sample_points = list(sample_points.items())
-
- if sample_points:
- try:
- coord, values = sample_points[0]
- except ValueError:
- raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_points)
-
- points = []
- for coord, values in sample_points:
- if isinstance(coord, six.string_types):
- coord = cube.coord(coord)
- else:
- coord = cube.coord(coord)
- points.append((coord, values))
- sample_points = points
-
- # Build up a list of indices to span the cube.
- indices = [slice(None, None)] * cube.ndim
-
- # Build up a dictionary which maps the cube's data dimensions to a list (which will later
- # be populated by coordinates in the sample points list)
- dim_to_coord_map = {}
- for i in range(cube.ndim):
- dim_to_coord_map[i] = []
-
- # Iterate over all of the specifications provided by sample_points
- for coord, point in sample_points:
- data_dim = cube.coord_dims(coord)
-
- # If no data dimension then we don't need to make any modifications to indices.
- if not data_dim:
- continue
- elif len(data_dim) > 1:
- raise iris.exceptions.CoordinateMultiDimError("Nearest neighbour interpolation of multidimensional "
- "coordinates is not supported.")
- data_dim = data_dim[0]
-
- dim_to_coord_map[data_dim].append(coord)
-
- #calculate the nearest neighbour
- min_index = coord.nearest_neighbour_index(point)
-
- if getattr(coord, 'circular', False):
- warnings.warn("Nearest neighbour on a circular coordinate may not be picking the nearest point.", DeprecationWarning)
-
- # If the dimension has already been interpolated then assert that the index from this coordinate
- # agrees with the index already calculated, otherwise we have a contradicting specification
- if indices[data_dim] != slice(None, None) and min_index != indices[data_dim]:
- raise ValueError('The coordinates provided (%s) over specify dimension %s.' %
- (', '.join([coord.name() for coord in dim_to_coord_map[data_dim]]), data_dim))
-
- indices[data_dim] = min_index
-
- return tuple(indices)
-
-
-def _nearest_neighbour_indices_ndcoords(cube, sample_point, cache=None):
- """
- See documentation for :func:`iris.analysis.interpolate.nearest_neighbour_indices`.
-
- This function is adapted for points sampling a multi-dimensional coord,
- and can currently only do nearest neighbour interpolation.
-
- Because this function can be slow for multidimensional coordinates,
- a 'cache' dictionary can be provided by the calling code.
-
- """
-
- # Developer notes:
- # A "sample space cube" is made which only has the coords and dims we are sampling on.
- # We get the nearest neighbour using this sample space cube.
-
- if isinstance(sample_point, dict):
- warnings.warn('Providing a dictionary to specify points is deprecated. Please provide a list of (coordinate, values) pairs.')
- sample_point = list(sample_point.items())
-
- if sample_point:
- try:
- coord, value = sample_point[0]
- except ValueError:
- raise ValueError('Sample points must be a list of (coordinate, value) pairs. Got %r.' % sample_point)
-
- # Convert names to coords in sample_point
- point = []
- ok_coord_ids = set(map(id, cube.dim_coords + cube.aux_coords))
- for coord, value in sample_point:
- if isinstance(coord, six.string_types):
- coord = cube.coord(coord)
- else:
- coord = cube.coord(coord)
- if id(coord) not in ok_coord_ids:
- msg = ('Invalid sample coordinate {!r}: derived coordinates are'
- ' not allowed.'.format(coord.name()))
- raise ValueError(msg)
- point.append((coord, value))
-
- # Reformat sample_point for use in _cartesian_sample_points(), below.
- sample_point = np.array([[value] for coord, value in point])
- sample_point_coords = [coord for coord, value in point]
- sample_point_coord_names = [coord.name() for coord, value in point]
-
- # Which dims are we sampling?
- sample_dims = set()
- for coord in sample_point_coords:
- for dim in cube.coord_dims(coord):
- sample_dims.add(dim)
- sample_dims = sorted(list(sample_dims))
-
- # Extract a sub cube that lives in just the sampling space.
- sample_space_slice = [0] * cube.ndim
- for sample_dim in sample_dims:
- sample_space_slice[sample_dim] = slice(None, None)
- sample_space_slice = tuple(sample_space_slice)
- sample_space_cube = cube[sample_space_slice]
-
- #...with just the sampling coords
- for coord in sample_space_cube.coords():
- if not coord.name() in sample_point_coord_names:
- sample_space_cube.remove_coord(coord)
-
- # Order the sample point coords according to the sample space cube coords
- sample_space_coord_names = [coord.name() for coord in sample_space_cube.coords()]
- new_order = [sample_space_coord_names.index(name) for name in sample_point_coord_names]
- sample_point = np.array([sample_point[i] for i in new_order])
- sample_point_coord_names = [sample_point_coord_names[i] for i in new_order]
-
- # Convert the sample point to cartesian coords.
- # If there is no latlon within the coordinate there will be no change.
- # Otherwise, geographic latlon is replaced with cartesian xyz.
- cartesian_sample_point = _cartesian_sample_points(sample_point, sample_point_coord_names)[0]
-
- sample_space_coords = sample_space_cube.dim_coords + sample_space_cube.aux_coords
- sample_space_coords_and_dims = [(coord, sample_space_cube.coord_dims(coord)) for coord in sample_space_coords]
-
- if cache is not None and cube in cache:
- kdtree = cache[cube]
- else:
- # Create a "sample space position" for each datum: sample_space_data_positions[coord_index][datum_index]
- sample_space_data_positions = np.empty((len(sample_space_coords_and_dims), sample_space_cube.data.size), dtype=float)
- for d, ndi in enumerate(np.ndindex(sample_space_cube.data.shape)):
- for c, (coord, coord_dims) in enumerate(sample_space_coords_and_dims):
- # Index of this datum along this coordinate (could be nD).
- keys = tuple(ndi[ind] for ind in coord_dims) if coord_dims else slice(None, None)
- # Position of this datum along this coordinate.
- sample_space_data_positions[c][d] = coord.points[keys]
-
- # Convert to cartesian coordinates. Flatten for kdtree compatibility.
- cartesian_space_data_coords = _cartesian_sample_points(sample_space_data_positions, sample_point_coord_names)
-
- # Get the nearest datum index to the sample point. This is the goal of the function.
- kdtree = scipy.spatial.cKDTree(cartesian_space_data_coords)
-
- cartesian_distance, datum_index = kdtree.query(cartesian_sample_point)
- sample_space_ndi = np.unravel_index(datum_index, sample_space_cube.data.shape)
-
- # Turn sample_space_ndi into a main cube slice.
- # Map sample cube to main cube dims and leave the rest as a full slice.
- main_cube_slice = [slice(None, None)] * cube.ndim
- for sample_coord, sample_coord_dims in sample_space_coords_and_dims:
- # Find the coord in the main cube
- main_coord = cube.coord(sample_coord.name())
- main_coord_dims = cube.coord_dims(main_coord)
- # Mark the nearest data index/indices with respect to this coord
- for sample_i, main_i in zip(sample_coord_dims, main_coord_dims):
- main_cube_slice[main_i] = sample_space_ndi[sample_i]
-
-
- # Update cache
- if cache is not None:
- cache[cube] = kdtree
-
- return tuple(main_cube_slice)
+nearest_neighbour_indices.__doc__ = oldinterp.nearest_neighbour_indices.__doc__
def extract_nearest_neighbour(cube, sample_points):
- """
- Returns a new cube using data value(s) closest to the given coordinate point values.
-
- The sample_points mapping does not have to include coordinate values corresponding to all data
- dimensions. Any dimensions unspecified will default to a full slice.
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('ostia_monthly.nc'))
- >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0), ('longitude', 10)])
-
- >>> iris.analysis.interpolate.extract_nearest_neighbour(cube, [('latitude', 0)])
-
-
- Args:
-
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of '
+ 'iris.analysis.interpolate.extract_nearest_neighbour() with '
+ 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Nearest()).')
+ _warn_deprecated(msg)
+ return oldinterp.extract_nearest_neighbour(cube, sample_points)
- Returns:
- A cube that represents uninterpolated data as near to the given points as possible.
-
- """
- return cube[nearest_neighbour_indices(cube, sample_points)]
+extract_nearest_neighbour.__doc__ = oldinterp.extract_nearest_neighbour.__doc__
def nearest_neighbour_data_value(cube, sample_points):
- """
- Returns the data value closest to the given coordinate point values.
-
- The sample_points mapping must include coordinate values corresponding to all data
- dimensions.
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0), ('longitude', 10)])
- 299.21564
- >>> iris.analysis.interpolate.nearest_neighbour_data_value(cube, [('latitude', 0)])
- Traceback (most recent call last):
- ...
- ValueError: The sample points [('latitude', 0)] was not specific enough to return a single value from the cube.
-
-
- Args:
-
- * cube:
- An :class:`iris.cube.Cube`.
- * sample_points
- A list of tuple pairs mapping coordinate instances or unique coordinate names in the cube to point values.
-
- Returns:
- The data value at the point in the cube closest to the supplied coordinate values.
-
- """
- indices = nearest_neighbour_indices(cube, sample_points)
- for ind in indices:
- if isinstance(ind, slice):
- raise ValueError('The sample points given (%s) were not specific enough to return a '
- 'single value from the cube.' % sample_points)
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of '
+ 'iris.analysis.interpolate.nearest_neighbour_data_value() with '
+ 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Nearest()).')
+ _warn_deprecated(msg)
+ return oldinterp.nearest_neighbour_data_value(cube, sample_points)
- return cube.data[indices]
+nearest_neighbour_data_value.__doc__ = \
+ oldinterp.nearest_neighbour_data_value.__doc__
def regrid(source_cube, grid_cube, mode='bilinear', **kwargs):
- """
- Returns a new cube with values derived from the source_cube on the horizontal grid specified
- by the grid_cube.
-
- Fundamental input requirements:
- 1) Both cubes must have a CoordSystem.
- 2) The source 'x' and 'y' coordinates must not share data dimensions with any other coordinates.
-
- In addition, the algorithm currently used requires:
- 3) Both CS instances must be compatible:
- i.e. of the same type, with the same attribute values, and with compatible coordinates.
- 4) No new data dimensions can be created.
- 5) Source cube coordinates to map to a single dimension.
-
- Args:
-
- * source_cube:
- An instance of :class:`iris.cube.Cube` which supplies the source data and metadata.
- * grid_cube:
- An instance of :class:`iris.cube.Cube` which supplies the horizontal grid definition.
-
- Kwargs:
-
- * mode (string):
- Regridding interpolation algorithm to be applied, which may be one of the following:
-
- * 'bilinear' for bi-linear interpolation (default), see :func:`iris.analysis.interpolate.linear`.
- * 'nearest' for nearest neighbour interpolation.
-
- Returns:
- A new :class:`iris.cube.Cube` instance.
-
- .. note::
-
- The masked status of values are currently ignored. See :func:\
-`~iris.experimental.regrid.regrid_bilinear_rectilinear_src_and_grid`
- for regrid support with mask awareness.
-
- """
- if mode == 'bilinear':
- scheme = iris.analysis.Linear(**kwargs)
- return source_cube.regrid(grid_cube, scheme)
-
- # Condition 1
- source_cs = source_cube.coord_system(iris.coord_systems.CoordSystem)
- grid_cs = grid_cube.coord_system(iris.coord_systems.CoordSystem)
- if (source_cs is None) != (grid_cs is None):
- raise ValueError("The source and grid cubes must both have a CoordSystem or both have None.")
-
- # Condition 2: We can only have one x coordinate and one y coordinate with the source CoordSystem, and those coordinates
- # must be the only ones occupying their respective dimension
- source_x = source_cube.coord(axis='x', coord_system=source_cs)
- source_y = source_cube.coord(axis='y', coord_system=source_cs)
-
- source_x_dims = source_cube.coord_dims(source_x)
- source_y_dims = source_cube.coord_dims(source_y)
-
- source_x_dim = None
- if source_x_dims:
- if len(source_x_dims) > 1:
- raise ValueError('The source x coordinate may not describe more than one data dimension.')
- source_x_dim = source_x_dims[0]
- dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_x_dim) if coord is not source_x])
- if dim_sharers:
- raise ValueError('No coordinates may share a dimension (dimension %s) with the x '
- 'coordinate, but (%s) do.' % (source_x_dim, dim_sharers))
-
- source_y_dim = None
- if source_y_dims:
- if len(source_y_dims) > 1:
- raise ValueError('The source y coordinate may not describe more than one data dimension.')
- source_y_dim = source_y_dims[0]
- dim_sharers = ', '.join([coord.name() for coord in source_cube.coords(contains_dimension=source_y_dim) if coord is not source_y])
- if dim_sharers:
- raise ValueError('No coordinates may share a dimension (dimension %s) with the y '
- 'coordinate, but (%s) do.' % (source_y_dim, dim_sharers))
-
- if source_x_dim is not None and source_y_dim == source_x_dim:
- raise ValueError('The source x and y coords may not describe the same data dimension.')
-
-
- # Condition 3
- # Check for compatible horizontal CSs. Currently that means they're exactly the same except for the coordinate
- # values.
- # The same kind of CS ...
- compatible = (source_cs == grid_cs)
- if compatible:
- grid_x = grid_cube.coord(axis='x', coord_system=grid_cs)
- grid_y = grid_cube.coord(axis='y', coord_system=grid_cs)
- compatible = source_x.is_compatible(grid_x) and \
- source_y.is_compatible(grid_y)
- if not compatible:
- raise ValueError("The new grid must be defined on the same coordinate system, and have the same coordinate "
- "metadata, as the source.")
-
- # Condition 4
- if grid_cube.coord_dims(grid_x) and not source_x_dims or \
- grid_cube.coord_dims(grid_y) and not source_y_dims:
- raise ValueError("The new grid must not require additional data dimensions.")
-
- x_coord = grid_x.copy()
- y_coord = grid_y.copy()
-
-
- #
- # Adjust the data array to match the new grid.
- #
-
- # get the new shape of the data
- new_shape = list(source_cube.shape)
- if source_x_dims:
- new_shape[source_x_dims[0]] = grid_x.shape[0]
- if source_y_dims:
- new_shape[source_y_dims[0]] = grid_y.shape[0]
-
- new_data = np.empty(new_shape, dtype=source_cube.data.dtype)
-
- # Prepare the index pattern which will be used to insert a single "column" of data.
- # NB. A "column" is a slice constrained to a single XY point, which therefore extends over *all* the other axes.
- # For an XYZ cube this means a column only extends over Z and corresponds to the normal definition of "column".
- indices = [slice(None, None)] * new_data.ndim
-
- if mode == 'bilinear':
- # Perform bilinear interpolation, passing through any keywords.
- points_dict = [(source_x, list(x_coord.points)), (source_y, list(y_coord.points))]
- new_data = linear(source_cube, points_dict, **kwargs).data
- else:
- # Perform nearest neighbour interpolation on each column in turn.
- for iy, y in enumerate(y_coord.points):
- for ix, x in enumerate(x_coord.points):
- column_pos = [(source_x, x), (source_y, y)]
- column_data = extract_nearest_neighbour(source_cube, column_pos).data
- if source_y_dim is not None:
- indices[source_y_dim] = iy
- if source_x_dim is not None:
- indices[source_x_dim] = ix
- new_data[tuple(indices)] = column_data
-
- # Special case to make 0-dimensional results take the same form as NumPy
- if new_data.shape == ():
- new_data = new_data.flat[0]
-
- # Start with just the metadata and the re-sampled data...
- new_cube = iris.cube.Cube(new_data)
- new_cube.metadata = source_cube.metadata
-
- # ... and then copy across all the unaffected coordinates.
-
- # Record a mapping from old coordinate IDs to new coordinates,
- # for subsequent use in creating updated aux_factories.
- coord_mapping = {}
-
- def copy_coords(source_coords, add_method):
- for coord in source_coords:
- if coord is source_x or coord is source_y:
- continue
- dims = source_cube.coord_dims(coord)
- new_coord = coord.copy()
- add_method(new_coord, dims)
- coord_mapping[id(coord)] = new_coord
-
- copy_coords(source_cube.dim_coords, new_cube.add_dim_coord)
- copy_coords(source_cube.aux_coords, new_cube.add_aux_coord)
-
- for factory in source_cube.aux_factories:
- new_cube.add_aux_factory(factory.updated(coord_mapping))
-
- # Add the new coords
- if source_x in source_cube.dim_coords:
- new_cube.add_dim_coord(x_coord, source_x_dim)
- else:
- new_cube.add_aux_coord(x_coord, source_x_dims)
-
- if source_y in source_cube.dim_coords:
- new_cube.add_dim_coord(y_coord, source_y_dim)
- else:
- new_cube.add_aux_coord(y_coord, source_y_dims)
-
- return new_cube
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of iris.analysis.interpolate.regrid() '
+ 'with iris.cube.Cube.regrid().')
+ _warn_deprecated(msg)
+ return oldinterp.regrid(source_cube, grid_cube, mode=mode, **kwargs)
+regrid.__doc__ = oldinterp.regrid.__doc__
-def regrid_to_max_resolution(cubes, **kwargs):
- """
- Returns all the cubes re-gridded to the highest horizontal resolution.
-
- Horizontal resolution is defined by the number of grid points/cells covering the horizontal plane.
- See :func:`iris.analysis.interpolation.regrid` regarding mode of interpolation.
-
- Args:
- * cubes:
- An iterable of :class:`iris.cube.Cube` instances.
-
- Returns:
- A list of new :class:`iris.cube.Cube` instances.
+def regrid_to_max_resolution(cubes, **kwargs):
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of '
+ 'iris.analysis.interpolate.regrid_to_max_resolution() '
+ 'with iris.cube.Cube.regrid().')
+ _warn_deprecated(msg)
+ return oldinterp.regrid_to_max_resolution(cubes, **kwargs)
- """
- # TODO: This could be significantly improved for readability and functionality.
- resolution = lambda cube_: (cube_.shape[cube_.coord_dims(cube_.coord(axis="x"))[0]]) * (cube_.shape[cube_.coord_dims(cube_.coord(axis="y"))[0]])
- grid_cube = max(cubes, key=resolution)
- return [cube.regridded(grid_cube, **kwargs) for cube in cubes]
+regrid_to_max_resolution.__doc__ = oldinterp.regrid_to_max_resolution.__doc__
def linear(cube, sample_points, extrapolation_mode='linear'):
- """
- Return a cube of the linearly interpolated points given the desired
- sample points.
-
- Given a list of tuple pairs mapping coordinates (or coordinate names)
- to their desired values, return a cube with linearly interpolated values.
- If more than one coordinate is specified, the linear interpolation will be
- carried out in sequence, thus providing n-linear interpolation
- (bi-linear, tri-linear, etc.).
-
- If the input cube's data is masked, the result cube will have a data
- mask interpolated to the new sample points
-
- .. testsetup::
-
- import numpy as np
-
- For example:
-
- >>> cube = iris.load_cube(iris.sample_data_path('air_temp.pp'))
- >>> sample_points = [('latitude', np.linspace(-90, 90, 10)),
- ... ('longitude', np.linspace(-180, 180, 20))]
- >>> iris.analysis.interpolate.linear(cube, sample_points)
-
-
- .. note::
-
- By definition, linear interpolation requires all coordinates to
- be 1-dimensional.
-
- .. note::
-
- If a specified coordinate is single valued its value will be
- extrapolated to the desired sample points by assuming a gradient of
- zero.
-
- Args:
-
- * cube
- The cube to be interpolated.
-
- * sample_points
- List of one or more tuple pairs mapping coordinate to desired
- points to interpolate. Points may be a scalar or a numpy array
- of values. Multi-dimensional coordinates are not supported.
+ msg = (_INTERPOLATE_DEPRECATION_WARNING + '\n' +
+ 'Please replace usage of iris.analysis.interpolate.linear() with '
+ 'iris.cube.Cube.interpolate(..., scheme=iris.analysis.Linear()).')
+ _warn_deprecated(msg)
+ return oldinterp.linear(cube, sample_points,
+ extrapolation_mode=extrapolation_mode)
- Kwargs:
+linear.__doc__ = oldinterp.linear.__doc__
- * extrapolation_mode - string - one of 'linear', 'nan' or 'error'
-
- * If 'linear' the point will be calculated by extending the
- gradient of closest two points.
- * If 'nan' the extrapolation point will be put as a NaN.
- * If 'error' a value error will be raised notifying of the
- attempted extrapolation.
-
- .. note::
-
- If the source cube's data, or any of its resampled coordinates,
- have an integer data type they will be promoted to a floating
- point data type in the result.
-
- """
- if isinstance(sample_points, dict):
- sample_points = list(sample_points.items())
-
- # catch the case where a user passes a single (coord/name, value) pair rather than a list of pairs
- if sample_points and not (isinstance(sample_points[0], collections.Container) and not isinstance(sample_points[0], six.string_types)):
- raise TypeError('Expecting the sample points to be a list of tuple pairs representing (coord, points), got a list of %s.' % type(sample_points[0]))
-
- scheme = Linear(extrapolation_mode)
- return cube.interpolate(sample_points, scheme)
-
-
-def _interp1d_rolls_y():
- """
- Determines if :class:`scipy.interpolate.interp1d` rolls its array `y` by
- comparing the shape of y passed into interp1d to the shape of its internal
- representation of y.
-
- SciPy v0.13.x+ no longer rolls the axis of its internal representation
- of y so we test for this occurring to prevent us subsequently
- extrapolating along the wrong axis.
-
- For further information on this change see, for example:
- * https://github.com/scipy/scipy/commit/0d906d0fc54388464603c63119b9e35c9a9c4601
- (the commit that introduced the change in behaviour).
- * https://github.com/scipy/scipy/issues/2621
- (a discussion on the change - note the issue is not resolved
- at time of writing).
-
- """
- y = np.arange(12).reshape(3, 4)
- f = interp1d(np.arange(3), y, axis=0)
- # If the initial shape of y and the shape internal to interp1d are *not*
- # the same then scipy.interp1d rolls y.
- return y.shape != f.y.shape
-
-
-class Linear1dExtrapolator(object):
- """
- Extension class to :class:`scipy.interpolate.interp1d` to provide linear extrapolation.
-
- See also: :mod:`scipy.interpolate`.
-
- """
- roll_y = _interp1d_rolls_y()
+class Linear1dExtrapolator(six.with_metaclass(ClassWrapperSameDocstring,
+ oldinterp.Linear1dExtrapolator)):
+ @wraps(oldinterp.Linear1dExtrapolator.__init__)
def __init__(self, interpolator):
- """
- Given an already created :class:`scipy.interpolate.interp1d` instance, return a callable object
- which supports linear extrapolation.
-
- """
- self._interpolator = interpolator
- self.x = interpolator.x
- # Store the y values given to the interpolator.
- self.y = interpolator.y
- """
- The y values given to the interpolator object.
-
- .. note:: These are stored with the interpolator.axis last.
-
- """
- # Roll interpolator.axis to the end if scipy no longer does it for us.
- if not self.roll_y:
- self.y = np.rollaxis(self.y, self._interpolator.axis, self.y.ndim)
-
- def all_points_in_range(self, requested_x):
- """Given the x points, do all of the points sit inside the interpolation range."""
- test = (requested_x >= self.x[0]) & (requested_x <= self.x[-1])
- if isinstance(test, np.ndarray):
- test = test.all()
- return test
-
- def __call__(self, requested_x):
- if not self.all_points_in_range(requested_x):
- # cast requested_x to a numpy array if it is not already.
- if not isinstance(requested_x, np.ndarray):
- requested_x = np.array(requested_x)
-
- # we need to catch the special case of providing a single value...
- remember_that_i_was_0d = requested_x.ndim == 0
-
- requested_x = requested_x.flatten()
-
- gt = np.where(requested_x > self.x[-1])[0]
- lt = np.where(requested_x < self.x[0])[0]
- ok = np.where( (requested_x >= self.x[0]) & (requested_x <= self.x[-1]) )[0]
-
- data_shape = list(self.y.shape)
- data_shape[-1] = len(requested_x)
- result = np.empty(data_shape, dtype=self._interpolator(self.x[0]).dtype)
-
- # Make a variable to represent the slice into the resultant data. (This will be updated in each of gt, lt & ok)
- interpolator_result_index = [slice(None, None)] * self.y.ndim
-
- if len(ok) != 0:
- interpolator_result_index[-1] = ok
-
- r = self._interpolator(requested_x[ok])
- # Reshape the properly formed array to put the interpolator.axis last i.e. dims 0, 1, 2 -> 0, 2, 1 if axis = 1
- axes = list(range(r.ndim))
- del axes[self._interpolator.axis]
- axes.append(self._interpolator.axis)
-
- result[interpolator_result_index] = r.transpose(axes)
-
- if len(lt) != 0:
- interpolator_result_index[-1] = lt
-
- grad = (self.y[..., 1:2] - self.y[..., 0:1]) / (self.x[1] - self.x[0])
- result[interpolator_result_index] = self.y[..., 0:1] + (requested_x[lt] - self.x[0]) * grad
-
- if len(gt) != 0:
- interpolator_result_index[-1] = gt
-
- grad = (self.y[..., -1:] - self.y[..., -2:-1]) / (self.x[-1] - self.x[-2])
- result[interpolator_result_index] = self.y[..., -1:] + (requested_x[gt] - self.x[-1]) * grad
-
- axes = list(range(len(interpolator_result_index)))
- axes.insert(self._interpolator.axis, axes.pop(axes[-1]))
- result = result.transpose(axes)
-
- if remember_that_i_was_0d:
- new_shape = list(result.shape)
- del new_shape[self._interpolator.axis]
- result = result.reshape(new_shape)
-
- return result
- else:
- return self._interpolator(requested_x)
+ _warn_deprecated()
+ super(Linear1dExtrapolator, self).__init__(interpolator)
diff --git a/lib/iris/analysis/trajectory.py b/lib/iris/analysis/trajectory.py
index 8d7fcf7b3c..38519b816a 100644
--- a/lib/iris/analysis/trajectory.py
+++ b/lib/iris/analysis/trajectory.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
+# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
@@ -30,6 +30,8 @@
import iris.coord_systems
import iris.coords
import iris.analysis
+from iris.analysis._interpolate_private import \
+ _nearest_neighbour_indices_ndcoords, linear as linear_regrid
class _Segment(object):
@@ -239,10 +241,10 @@ def interpolate(cube, sample_points, method=None):
point = [(coord, values[i]) for coord, values in sample_points]
if method in ["linear", None]:
- column = iris.analysis.interpolate.linear(cube, point)
+ column = linear_regrid(cube, point)
new_cube.data[..., i] = column.data
elif method == "nearest":
- column_index = iris.analysis.interpolate._nearest_neighbour_indices_ndcoords(cube, point, cache=cache)
+ column_index = _nearest_neighbour_indices_ndcoords(cube, point, cache=cache)
column = cube[column_index]
new_cube.data[..., i] = column.data
diff --git a/lib/iris/cube.py b/lib/iris/cube.py
index 9fb94f8101..0fa492353e 100644
--- a/lib/iris/cube.py
+++ b/lib/iris/cube.py
@@ -39,7 +39,7 @@
import iris.analysis
from iris.analysis.cartography import wrap_lons
import iris.analysis.maths
-import iris.analysis.interpolate
+import iris.analysis._interpolate_private
import iris.aux_factory
import iris.coord_systems
import iris.coords
@@ -3117,11 +3117,16 @@ def add_history(self, string):
# START ANALYSIS ROUTINES
regridded = iris.util._wrap_function_for_method(
- iris.analysis.interpolate.regrid,
+ iris.analysis._interpolate_private.regrid,
"""
Returns a new cube with values derived from this cube on the
horizontal grid specified by the grid_cube.
+ .. deprecated:: 1.10
+ Please replace usage of :meth:`~Cube.regridded` with
+ :meth:`~Cube.regrid`. See :meth:`iris.analysis.interpolate.regrid`
+ for details of exact usage equivalents.
+
""")
# END ANALYSIS ROUTINES
diff --git a/lib/iris/fileformats/ff.py b/lib/iris/fileformats/ff.py
index e6f9623348..4ea7600115 100644
--- a/lib/iris/fileformats/ff.py
+++ b/lib/iris/fileformats/ff.py
@@ -36,21 +36,24 @@
from six.moves import (filter, input, map, range, zip) # noqa
import six
+from functools import wraps
import warnings
from iris.fileformats import _ff
from iris.fileformats import pp
+from iris._deprecation_helpers import ClassWrapperSameDocstring
-_DEPRECATION_WARNSTRING = "The module 'iris.fileformats.ff' is deprecated."
+
+_FF_DEPRECATION_WARNING = "The module 'iris.fileformats.ff' is deprecated."
# Define a standard mechanism for deprecation messages.
def _warn_deprecated(message=None):
if message is None:
- message = _DEPRECATION_WARNSTRING
+ message = _FF_DEPRECATION_WARNING
warnings.warn(message)
-# Issue a deprecation message when the module is loaded, if enabled.
+# Issue a deprecation message when the module is loaded.
_warn_deprecated()
# Directly import various simple data items from the 'old' ff module.
@@ -76,75 +79,74 @@ def _warn_deprecated(message=None):
)
-# Make new wrappers for classes and functions, with the original public names,
-# but which emit deprecation warnings when used.
-class _DeprecationWrapperMetaclass(type):
- def __new__(metacls, classname, bases, class_dict):
- # Patch the subclass to duplicate docstrings from the parent class, and
- # provide an __init__ that issues a deprecation warning and then calls
- # the parent constructor.
- parent_class = bases[0]
- # Copy the original class docstring.
- class_dict['__doc__'] = parent_class.__doc__
-
- # Copy the init docstring too.
- init_docstring = parent_class.__init__.__doc__
-
- # Define a warning message.
- depr_warnstring = _DEPRECATION_WARNSTRING
- # Use a special class variable for an augmented warning message.
- alternative_note = class_dict.get('_DEPRECATION_ALTERNATIVE_NOTE')
- if alternative_note:
- depr_warnstring = '{}\n{}\n'.format(
- depr_warnstring, alternative_note)
-
- # Save the parent class *on* the wrapper class, so we can chain to its
- # __init__ call.
- class_dict['_target_parent_class'] = parent_class
-
- # Create a wrapper init function which issues the deprecation.
- def initfn(self, *args, **kwargs):
- _warn_deprecated(depr_warnstring)
- self._target_parent_class.__init__(self, *args, **kwargs)
-
- # Set this as the init for the wrapper class.
- initfn.func_name = '__init__'
- initfn.__doc__ = init_docstring
- class_dict['__init__'] = initfn
-
- # Return the result.
- return super(_DeprecationWrapperMetaclass, metacls).__new__(
- metacls, classname, bases, class_dict)
-
+# Define wrappers to all public classes, that emit deprecation warnings.
+# Note: it seems we are obliged to provide an __init__ with a suitable matching
+# signature for each one, as Sphinx will take its constructor signature from
+# any overriding definition of __init__ or __new__.
+# So without this, the docs don't look right.
-class Grid(six.with_metaclass(_DeprecationWrapperMetaclass, _ff.Grid)):
- pass
+class Grid(six.with_metaclass(ClassWrapperSameDocstring, _ff.Grid)):
+ @wraps(_ff.Grid.__init__)
+ def __init__(self, column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type):
+ _warn_deprecated()
+ super(Grid, self).__init__(
+ column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type)
-class ArakawaC(six.with_metaclass(_DeprecationWrapperMetaclass, _ff.ArakawaC)):
- pass
+class ArakawaC(six.with_metaclass(ClassWrapperSameDocstring, _ff.ArakawaC)):
+ @wraps(_ff.ArakawaC.__init__)
+ def __init__(self, column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type):
+ _warn_deprecated()
+ super(ArakawaC, self).__init__(
+ column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type)
-class NewDynamics(six.with_metaclass(_DeprecationWrapperMetaclass,
+class NewDynamics(six.with_metaclass(ClassWrapperSameDocstring,
_ff.NewDynamics)):
- pass
-
-
-class ENDGame(six.with_metaclass(_DeprecationWrapperMetaclass, _ff.ENDGame)):
- pass
-
-
-class FFHeader(six.with_metaclass(_DeprecationWrapperMetaclass, _ff.FFHeader)):
- pass
-
-
-class FF2PP(six.with_metaclass(_DeprecationWrapperMetaclass, _ff.FF2PP)):
- _DEPRECATION_ALTERNATIVE_NOTE = (
- "Please use 'iris.fileformats.um.um_to_pp' in place of "
- "'iris.fileformats.ff.FF2PP.")
-
-
-# Make wrappers to the loader functions which also issue a warning,
+ @wraps(_ff.NewDynamics.__init__)
+ def __init__(self, column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type):
+ _warn_deprecated()
+ super(NewDynamics, self).__init__(
+ column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type)
+
+
+class ENDGame(six.with_metaclass(ClassWrapperSameDocstring, _ff.ENDGame)):
+ @wraps(_ff.ENDGame.__init__)
+ def __init__(self, column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type):
+ _warn_deprecated()
+ super(ENDGame, self).__init__(
+ column_dependent_constants, row_dependent_constants,
+ real_constants, horiz_grid_type)
+
+
+class FFHeader(six.with_metaclass(ClassWrapperSameDocstring, _ff.FFHeader)):
+ @wraps(_ff.FFHeader.__init__)
+ def __init__(self, filename, word_depth=DEFAULT_FF_WORD_DEPTH):
+ _warn_deprecated()
+ super(FFHeader, self).__init__(filename, word_depth=word_depth)
+
+
+class FF2PP(six.with_metaclass(ClassWrapperSameDocstring, _ff.FF2PP)):
+ @wraps(_ff.FF2PP.__init__)
+ def __init__(self, filename, read_data=False,
+ word_depth=DEFAULT_FF_WORD_DEPTH):
+ # Provide an enhanced deprecation message for this one.
+ msg = (_FF_DEPRECATION_WARNING + '\n' +
+ "Please use 'iris.fileformats.um.um_to_pp' in place of "
+ "'iris.fileformats.ff.FF2PP.")
+ _warn_deprecated(msg)
+ super(FF2PP, self).__init__(filename, read_data=read_data,
+ word_depth=word_depth)
+
+
+# Provide alternative loader functions which issue a deprecation warning,
# using the original dosctrings with an appended deprecation warning.
def load_cubes(filenames, callback, constraints=None):
diff --git a/lib/iris/fileformats/grib/__init__.py b/lib/iris/fileformats/grib/__init__.py
index 839e822754..4707e87394 100644
--- a/lib/iris/fileformats/grib/__init__.py
+++ b/lib/iris/fileformats/grib/__init__.py
@@ -37,7 +37,7 @@
import numpy.ma as ma
import scipy.interpolate
-from iris.analysis.interpolate import Linear1dExtrapolator
+from iris.analysis._interpolate_private import Linear1dExtrapolator
import iris.coord_systems as coord_systems
from iris.exceptions import TranslationError
# NOTE: careful here, to avoid circular imports (as iris imports grib)
diff --git a/lib/iris/fileformats/rules.py b/lib/iris/fileformats/rules.py
index 3ed7a25ee4..0656e35d16 100644
--- a/lib/iris/fileformats/rules.py
+++ b/lib/iris/fileformats/rules.py
@@ -40,6 +40,7 @@
import numpy as np
import numpy.ma as ma
+from iris.analysis._interpolate_private import linear as regrid_linear
import iris.config as config
import iris.cube
import iris.exceptions
@@ -787,7 +788,7 @@ def _dereference_args(factory, reference_targets, regrid_cache, cube):
def _regrid_to_target(src_cube, target_coords, target_cube):
# Interpolate onto the target grid.
sample_points = [(coord, coord.points) for coord in target_coords]
- result_cube = iris.analysis.interpolate.linear(src_cube, sample_points)
+ result_cube = regrid_linear(src_cube, sample_points)
# Any scalar coords on the target_cube will have become vector
# coords on the resample src_cube (i.e. result_cube).
diff --git a/lib/iris/tests/analysis/test_interpolate.py b/lib/iris/tests/analysis/test_interpolate.py
index 6f0be96c56..ece73d16d1 100644
--- a/lib/iris/tests/analysis/test_interpolate.py
+++ b/lib/iris/tests/analysis/test_interpolate.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2013 - 2015, Met Office
+# (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
@@ -28,7 +28,7 @@
import numpy as np
-import iris.analysis.interpolate as interpolate
+import iris.analysis._interpolate_private as interpolate
from iris.coords import DimCoord
from iris.cube import Cube
from iris.tests.test_interpolation import normalise_order
diff --git a/lib/iris/tests/integration/test_regrid_equivalence.py b/lib/iris/tests/integration/test_regrid_equivalence.py
new file mode 100644
index 0000000000..fc5cacf6e6
--- /dev/null
+++ b/lib/iris/tests/integration/test_regrid_equivalence.py
@@ -0,0 +1,263 @@
+# (C) British Crown Copyright 2016, Met Office
+#
+# This file is part of Iris.
+#
+# Iris is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Iris is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Iris. If not, see .
+"""
+Tests to check the validity of replacing
+"iris.analysis._interpolate.regrid`('nearest')" with
+"iris.cube.Cube.regrid(scheme=iris.analysis.Nearest())".
+
+"""
+from __future__ import (absolute_import, division, print_function)
+from six.moves import (filter, input, map, range, zip) # noqa
+
+# Import iris.tests first so that some things can be initialised before
+# importing anything else.
+import iris.tests as tests
+
+import numpy as np
+
+import iris
+from iris.analysis._interpolate_private import regrid
+from iris.analysis import Nearest
+from iris.cube import Cube
+from iris.coords import AuxCoord, DimCoord
+
+
+def grid_cube(xx, yy, data=None):
+ nx, ny = len(xx), len(yy)
+ if data is not None:
+ data = np.array(data).reshape((ny, nx))
+ else:
+ data = np.zeros((ny, nx))
+ cube = Cube(data)
+ y_coord = DimCoord(yy, standard_name='latitude', units='degrees')
+ x_coord = DimCoord(xx, standard_name='longitude', units='degrees')
+ cube.add_dim_coord(y_coord, 0)
+ cube.add_dim_coord(x_coord, 1)
+ return cube
+
+
+ENABLE_DEBUG_OUTPUT = False
+
+
+def _debug_data(cube, test_id):
+ if ENABLE_DEBUG_OUTPUT:
+ print
+ data = cube.data
+ print('CUBE: {}'.format(test_id))
+ print(' x={!r}'.format(cube.coord('longitude').points))
+ print(' y={!r}'.format(cube.coord('latitude').points))
+ print('data[{}]:'.format(type(data)))
+ print(repr(data))
+
+
+class MixinCheckingCode(object):
+ def test_basic(self):
+ src_x = [30., 40., 50.]
+ dst_x = [32., 42.]
+ src_y = [-10., 0., 10.]
+ dst_y = [-8., 2.]
+ data = [[3., 4., 5.],
+ [23., 24., 25.],
+ [43., 44., 45.]]
+ expected_result = [[3., 4.],
+ [23., 24.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ _debug_data(src_cube, "basic SOURCE")
+ dst_cube = grid_cube(dst_x, dst_y)
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "basic RESULT")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_src_extrapolation(self):
+ src_x = [30., 40., 50.]
+ dst_x = [0., 29.0, 39.0]
+ src_y = [-10., 0., 10.]
+ dst_y = [-50., -9., -1.]
+ data = [[3., 4., 5.],
+ [23., 24., 25.],
+ [43., 44., 45.]]
+ expected_result = [[3., 3., 4.],
+ [3., 3., 4.],
+ [23., 23., 24.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ _debug_data(src_cube, "extrapolate SOURCE")
+ dst_cube = grid_cube(dst_x, dst_y)
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "extrapolate RESULT")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_exact_matching_points(self):
+ src_x = [10.0, 20.0, 30.0]
+ src_y = [10.0, 20.0, 30.0]
+ dst_x = [14.9, 15.1, 20.0, 24.9, 25.1]
+ dst_y = [14.9, 15.1, 20.0, 24.9, 25.1]
+ data = [[3., 4., 5.],
+ [23., 24., 25.],
+ [43., 44., 45.]]
+ expected_result = [[3., 4., 4., 4., 5.],
+ [23., 24., 24., 24., 25.],
+ [23., 24., 24., 24., 25.],
+ [23., 24., 24., 24., 25.],
+ [43., 44., 44., 44., 45.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ _debug_data(src_cube, "matching SOURCE")
+ dst_cube = grid_cube(dst_x, dst_y)
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "matching RESULt")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_source_mask(self):
+ src_x = [40.0, 50.0, 60.0]
+ src_y = [40.0, 50.0, 60.0]
+ dst_x = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
+ dst_y = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
+ data = np.ma.masked_equal([[3., 4., 5.],
+ [23., 999, 25.],
+ [43., 44., 45.]],
+ 999)
+ expected_result = np.ma.masked_equal(
+ [[3., 4., 4., 4., 4., 4., 5.],
+ [23., 999, 999, 999, 999, 999, 25.],
+ [23., 999, 999, 999, 999, 999, 25.],
+ [23., 999, 999, 999, 999, 999, 25.],
+ [23., 999, 999, 999, 999, 999, 25.],
+ [23., 999, 999, 999, 999, 999, 25.],
+ [43., 44., 44., 44., 44., 44., 45.]],
+ 999)
+ src_cube = grid_cube(src_x, src_y, data)
+ src_cube.data = np.ma.masked_array(src_cube.data)
+ src_cube.data[1, 1] = np.ma.masked
+ _debug_data(src_cube, "masked SOURCE")
+ dst_cube = grid_cube(dst_x, dst_y)
+ result_cube = self.regrid(src_cube, dst_cube,
+ translate_nans_to_mask=True)
+ _debug_data(result_cube, "masked RESULT")
+ self.assertMaskedArrayEqual(result_cube.data, expected_result)
+
+ def test_wrapping_non_circular(self):
+ src_x = [-10., 0., 10.]
+ dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
+ src_y = [0., 10.]
+ dst_y = [0., 10.]
+ data = [[3., 4., 5.],
+ [3., 4., 5.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ dst_cube = grid_cube(dst_x, dst_y)
+ # Account for a behavioural difference in this case :
+ # The Nearest scheme does wrapping of modular coordinate values.
+ # Thus target of 352.0 --> -8.0, which is nearest to -10.
+ # This looks just like "circular" handling, but only because it happens
+ # to produce the same results *for nearest-neighbour in particular*.
+ if isinstance(self, TestInterpolateRegridNearest):
+ # interpolate.regrid --> Wrapping-free results (non-circular).
+ expected_result = [[3., 3., 4., 4., 5., 5., 5., 5.],
+ [3., 3., 4., 4., 5., 5., 5., 5.]]
+ else:
+ # cube regrid --> Wrapped results.
+ expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
+ [4., 3., 4., 4., 5., 5., 3., 4.]]
+ _debug_data(src_cube, "noncircular SOURCE")
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "noncircular RESULT")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_wrapping_circular(self):
+ # When x-coord is "circular", the above distinction does not apply :
+ # results are the same for both calculations.
+ src_x = [-10., 0., 10.]
+ dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
+ src_y = [0., 10.]
+ dst_y = [0., 10.]
+ data = [[3., 4., 5.],
+ [3., 4., 5.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ dst_cube = grid_cube(dst_x, dst_y)
+ src_cube.coord('longitude').circular = True
+ expected_result = [[4., 3., 4., 4., 5., 5., 3., 4.],
+ [4., 3., 4., 4., 5., 5., 3., 4.]]
+ _debug_data(src_cube, "circular SOURCE")
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "circular RESULT")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_wrapping_non_angular(self):
+ src_x = [-10., 0., 10.]
+ dst_x = [-360.0, -170., -1.0, 1.0, 50.0, 170.0, 352.0, 720.0]
+ src_y = [0., 10.]
+ dst_y = [0., 10.]
+ data = [[3., 4., 5.],
+ [3., 4., 5.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ dst_cube = grid_cube(dst_x, dst_y)
+ for co_name in ('longitude', 'latitude'):
+ for cube in (src_cube, dst_cube):
+ coord = cube.coord(co_name)
+ coord.coord_system = None
+ coord.convert_units('1')
+ # interpolate.regrid --> Wrapping-free results (non-circular).
+ expected_result = [[3., 3., 4., 4., 5., 5., 5., 5.],
+ [3., 3., 4., 4., 5., 5., 5., 5.]]
+ _debug_data(src_cube, "non-angle-lons SOURCE")
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "non-angle-lons RESULT")
+ self.assertArrayAllClose(result_cube.data, expected_result)
+
+ def test_source_nan(self):
+ src_x = [40.0, 50.0, 60.0]
+ src_y = [40.0, 50.0, 60.0]
+ dst_x = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
+ dst_y = [44.99, 45.01, 48.0, 50.0, 52.0, 54.99, 55.01]
+ nan = np.nan
+ data = [[3., 4., 5.],
+ [23., nan, 25.],
+ [43., 44., 45.]]
+ expected_result = [[3., 4., 4., 4., 4., 4., 5.],
+ [23., nan, nan, nan, nan, nan, 25.],
+ [23., nan, nan, nan, nan, nan, 25.],
+ [23., nan, nan, nan, nan, nan, 25.],
+ [23., nan, nan, nan, nan, nan, 25.],
+ [23., nan, nan, nan, nan, nan, 25.],
+ [43., 44., 44., 44., 44., 44., 45.]]
+ src_cube = grid_cube(src_x, src_y, data)
+ _debug_data(src_cube, "nan SOURCE")
+ dst_cube = grid_cube(dst_x, dst_y)
+ result_cube = self.regrid(src_cube, dst_cube)
+ _debug_data(result_cube, "nan RESULT")
+ self.assertArrayEqual(result_cube.data, expected_result)
+
+
+# perform identical tests on the old + new approaches
+class TestInterpolateRegridNearest(MixinCheckingCode, tests.IrisTest):
+ def regrid(self, src_cube, dst_cube,
+ translate_nans_to_mask=False, **kwargs):
+ result = regrid(src_cube, dst_cube, mode='nearest')
+ data = result.data
+ if translate_nans_to_mask and np.any(np.isnan(data)):
+ data = np.ma.masked_array(data, mask=np.isnan(data))
+ result.data = data
+ return result
+
+
+class TestCubeRegridNearest(MixinCheckingCode, tests.IrisTest):
+ scheme = Nearest(extrapolation_mode='extrapolate')
+
+ def regrid(self, src_cube, dst_cube, **kwargs):
+ return src_cube.regrid(dst_cube, scheme=self.scheme)
+
+
+if __name__ == '__main__':
+ tests.main()
diff --git a/lib/iris/tests/test_coding_standards.py b/lib/iris/tests/test_coding_standards.py
index 230b01c4bf..ec0c19aba5 100644
--- a/lib/iris/tests/test_coding_standards.py
+++ b/lib/iris/tests/test_coding_standards.py
@@ -72,7 +72,7 @@
class StandardReportWithExclusions(pep8.StandardReport):
expected_bad_files = [
'*/iris/std_names.py',
- '*/iris/analysis/interpolate.py',
+ '*/iris/analysis/_interpolate_private.py',
'*/iris/analysis/trajectory.py',
'*/iris/fileformats/cf.py',
'*/iris/fileformats/dot.py',
diff --git a/lib/iris/tests/test_interpolation.py b/lib/iris/tests/test_interpolation.py
index 9640d27001..f3648e3297 100644
--- a/lib/iris/tests/test_interpolation.py
+++ b/lib/iris/tests/test_interpolation.py
@@ -34,8 +34,7 @@
import iris.cube
import iris.analysis.interpolate
import iris.tests.stock
-from iris.analysis.interpolate import Linear1dExtrapolator
-import iris.analysis.interpolate as iintrp
+import iris.analysis._interpolate_private as iintrp
def normalise_order(cube):
@@ -50,7 +49,7 @@ def normalise_order(cube):
class TestLinearExtrapolator(tests.IrisTest):
def test_simple_axis0(self):
a = np.arange(12.).reshape(3, 4)
- r = Linear1dExtrapolator(interp1d(np.arange(3), a, axis=0))
+ r = iintrp.Linear1dExtrapolator(interp1d(np.arange(3), a, axis=0))
np.testing.assert_array_equal(r(0), np.array([ 0., 1., 2., 3.]))
np.testing.assert_array_equal(r(-1), np.array([-4., -3., -2., -1.]))
@@ -88,7 +87,7 @@ def test_simple_axis0(self):
def test_simple_axis1(self):
a = np.arange(12).reshape(3, 4)
- r = Linear1dExtrapolator(interp1d(np.arange(4), a, axis=1))
+ r = iintrp.Linear1dExtrapolator(interp1d(np.arange(4), a, axis=1))
# check non-extrapolation given the Extrapolator object
np.testing.assert_array_equal(r(0), np.array([ 0., 4., 8.]))
@@ -140,7 +139,7 @@ def test_simple_axis1(self):
def test_simple_3d_axis1(self):
a = np.arange(24.).reshape(3, 4, 2)
- r = Linear1dExtrapolator(interp1d(np.arange(4.), a, axis=1))
+ r = iintrp.Linear1dExtrapolator(interp1d(np.arange(4.), a, axis=1))
# a:
# [[[ 0 1]
@@ -206,7 +205,7 @@ def test_simple_3d_axis1(self):
def test_variable_gradient(self):
a = np.array([[2, 4, 8], [0, 5, 11]])
- r = Linear1dExtrapolator(interp1d(np.arange(2), a, axis=0))
+ r = iintrp.Linear1dExtrapolator(interp1d(np.arange(2), a, axis=0))
np.testing.assert_array_equal(r(0), np.array([ 2., 4., 8.]))
np.testing.assert_array_equal(r(-1), np.array([ 4., 3., 5.]))
@@ -228,27 +227,27 @@ def setUp(self):
def test_single_point(self):
# Slice to form (3, 1) shaped cube.
cube = self.cube[:, 2:3]
- r = iris.analysis.interpolate.linear(cube, [('longitude', [1.])])
+ r = iintrp.linear(cube, [('longitude', [1.])])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_single_pt_0'))
# Slice to form (1, 4) shaped cube.
cube = self.cube[1:2, :]
- r = iris.analysis.interpolate.linear(cube, [('latitude', [1.])])
+ r = iintrp.linear(cube, [('latitude', [1.])])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_single_pt_1'))
def test_multiple_points(self):
# Slice to form (3, 1) shaped cube.
cube = self.cube[:, 2:3]
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
[1., 2., 3., 4.])])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_0'))
# Slice to form (1, 4) shaped cube.
cube = self.cube[1:2, :]
- r = iris.analysis.interpolate.linear(cube, [('latitude',
+ r = iintrp.linear(cube, [('latitude',
[1., 2., 3., 4.])])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_1'))
@@ -256,13 +255,13 @@ def test_multiple_points(self):
def test_single_point_to_scalar(self):
# Slice to form (3, 1) shaped cube.
cube = self.cube[:, 2:3]
- r = iris.analysis.interpolate.linear(cube, [('longitude', 1.)])
+ r = iintrp.linear(cube, [('longitude', 1.)])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_scalar_0'))
# Slice to form (1, 4) shaped cube.
cube = self.cube[1:2, :]
- r = iris.analysis.interpolate.linear(cube, [('latitude', 1.)])
+ r = iintrp.linear(cube, [('latitude', 1.)])
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_scalar_1'))
@@ -270,15 +269,15 @@ def test_extrapolation_mode_same_pt(self):
# Slice to form (3, 1) shaped cube.
cube = self.cube[:, 2:3]
src_points = cube.coord('longitude').points
- r = iris.analysis.interpolate.linear(cube, [('longitude', src_points)],
+ r = iintrp.linear(cube, [('longitude', src_points)],
extrapolation_mode='linear')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_same_pt'))
- r = iris.analysis.interpolate.linear(cube, [('longitude', src_points)],
+ r = iintrp.linear(cube, [('longitude', src_points)],
extrapolation_mode='nan')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_same_pt'))
- r = iris.analysis.interpolate.linear(cube, [('longitude', src_points)],
+ r = iintrp.linear(cube, [('longitude', src_points)],
extrapolation_mode='error')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_same_pt'))
@@ -288,15 +287,15 @@ def test_extrapolation_mode_multiple_same_pts(self):
cube = self.cube[:, 2:3]
src_points = cube.coord('longitude').points
new_points = [src_points[0]] * 3
- r = iris.analysis.interpolate.linear(cube, [('longitude', new_points)],
+ r = iintrp.linear(cube, [('longitude', new_points)],
extrapolation_mode='linear')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_same'))
- r = iris.analysis.interpolate.linear(cube, [('longitude', new_points)],
+ r = iintrp.linear(cube, [('longitude', new_points)],
extrapolation_mode='nan')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_same'))
- r = iris.analysis.interpolate.linear(cube, [('longitude', new_points)],
+ r = iintrp.linear(cube, [('longitude', new_points)],
extrapolation_mode='error')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_same'))
@@ -312,17 +311,17 @@ def test_extrapolation_mode_different_pts(self):
new_points_scalar = src_points[0] + 0.2
# 'nan' mode
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_single)],
extrapolation_mode='nan')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_single_pt_nan'))
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_multiple)],
extrapolation_mode='nan')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
'single_pt_to_many_nan'))
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_scalar)],
extrapolation_mode='nan')
self.assertCMLApproxData(r, ('analysis', 'interpolation', 'linear',
@@ -330,15 +329,15 @@ def test_extrapolation_mode_different_pts(self):
# 'error' mode
with self.assertRaises(ValueError):
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_single)],
extrapolation_mode='error')
with self.assertRaises(ValueError):
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_multiple)],
extrapolation_mode='error')
with self.assertRaises(ValueError):
- r = iris.analysis.interpolate.linear(cube, [('longitude',
+ r = iintrp.linear(cube, [('longitude',
new_points_scalar)],
extrapolation_mode='error')
@@ -380,45 +379,45 @@ def test_dim_to_aux(self):
cube = self.simple2d_cube
other = iris.coords.DimCoord([1, 2, 3, 4], long_name='was_dim')
cube.add_aux_coord(other, 0)
- r = iris.analysis.interpolate.linear(cube, [('dim1', [7, 3, 5])])
+ r = iintrp.linear(cube, [('dim1', [7, 3, 5])])
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'dim_to_aux.cml'))
def test_bad_sample_point_format(self):
- self.assertRaises(TypeError, iris.analysis.interpolate.linear, self.simple2d_cube, ('dim1', 4))
+ self.assertRaises(TypeError, iintrp.linear, self.simple2d_cube, ('dim1', 4))
def test_simple_single_point(self):
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', 4)])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', 4)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_single_point.cml'), checksum=False)
np.testing.assert_array_equal(r.data, np.array([1.5, 2.5, 3.5], dtype=self.simple2d_cube.data.dtype))
def test_monotonic_decreasing_coord(self):
c = self.simple2d_cube[::-1]
- r = iris.analysis.interpolate.linear(c, [('dim1', 4)])
+ r = iintrp.linear(c, [('dim1', 4)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_single_point.cml'), checksum=False)
np.testing.assert_array_equal(r.data, np.array([1.5, 2.5, 3.5], dtype=self.simple2d_cube.data.dtype))
def test_overspecified(self):
- self.assertRaises(ValueError, iris.analysis.interpolate.linear, self.simple2d_cube[0, :], [('dim1', 4)])
+ self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube[0, :], [('dim1', 4)])
def test_bounded_coordinate(self):
# The results should be exactly the same as for the
# non-bounded case.
cube = self.simple2d_cube
cube.coord('dim1').guess_bounds()
- r = iris.analysis.interpolate.linear(cube, [('dim1', [4, 5])])
+ r = iintrp.linear(cube, [('dim1', [4, 5])])
np.testing.assert_array_equal(r.data, np.array([[ 1.5, 2.5, 3.5], [ 3., 4., 5. ]]))
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
def test_simple_multiple_point(self):
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', [4, 5])])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', [4, 5])])
np.testing.assert_array_equal(r.data, np.array([[ 1.5, 2.5, 3.5], [ 3., 4., 5. ]]))
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
# Check that numpy arrays specifications work
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', np.array([4, 5]))])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', np.array([4, 5]))])
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points.cml'))
@@ -427,88 +426,88 @@ def test_circular_vs_non_circular_coord(self):
other = iris.coords.AuxCoord([10, 6, 7, 4], long_name='other')
cube.add_aux_coord(other, 1)
samples = [0, 60, 300]
- r = iris.analysis.interpolate.linear(cube, [('theta', samples)])
+ r = iintrp.linear(cube, [('theta', samples)])
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'circular_vs_non_circular.cml'))
def test_simple_multiple_points_circular(self):
- r = iris.analysis.interpolate.linear(self.simple2d_cube_circular, [('theta', [0., 60., 120., 180.])])
+ r = iintrp.linear(self.simple2d_cube_circular, [('theta', [0., 60., 120., 180.])])
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_points_circular.cml'))
# check that the values returned by theta 0 & 360 are the same...
- r1 = iris.analysis.interpolate.linear(self.simple2d_cube_circular, [('theta', 360)])
- r2 = iris.analysis.interpolate.linear(self.simple2d_cube_circular, [('theta', 0)])
+ r1 = iintrp.linear(self.simple2d_cube_circular, [('theta', 360)])
+ r2 = iintrp.linear(self.simple2d_cube_circular, [('theta', 0)])
np.testing.assert_array_almost_equal(r1.data, r2.data)
def test_simple_multiple_coords(self):
expected_result = np.array(2.5)
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', 4), ('dim2', 3.5), ])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', 4), ('dim2', 3.5), ])
np.testing.assert_array_equal(r.data, expected_result)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
# Check that it doesn't matter if you do the interpolation in separate steps...
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim2', 3.5)])
- r = iris.analysis.interpolate.linear(r, [('dim1', 4)])
+ r = iintrp.linear(self.simple2d_cube, [('dim2', 3.5)])
+ r = iintrp.linear(r, [('dim1', 4)])
np.testing.assert_array_equal(r.data, expected_result)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', 4)])
- r = iris.analysis.interpolate.linear(r, [('dim2', 3.5)])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', 4)])
+ r = iintrp.linear(r, [('dim2', 3.5)])
np.testing.assert_array_equal(r.data, expected_result)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords.cml'), checksum=False)
def test_coord_not_found(self):
- self.assertRaises(KeyError, iris.analysis.interpolate.linear, self.simple2d_cube,
+ self.assertRaises(KeyError, iintrp.linear, self.simple2d_cube,
[('non_existant_coord', [3.5, 3.25])])
def test_simple_coord_error_extrapolation(self):
- self.assertRaises(ValueError, iris.analysis.interpolate.linear, self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='error')
+ self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='error')
def test_simple_coord_linear_extrapolation(self):
- r = iris.analysis.interpolate.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='linear')
+ r = iintrp.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='linear')
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation.cml'))
np.testing.assert_array_equal(r.data, np.array([-1., 2., 5., 8.], dtype=self.simple2d_cube.data.dtype))
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', 1)])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', 1)])
np.testing.assert_array_equal(r.data, np.array([-3., -2., -1.], dtype=self.simple2d_cube.data.dtype))
def test_simple_coord_linear_extrapolation_multipoint1(self):
- r = iris.analysis.interpolate.linear( self.simple2d_cube, [('dim1', [-1, 1, 10, 11])], extrapolation_mode='linear')
+ r = iintrp.linear( self.simple2d_cube, [('dim1', [-1, 1, 10, 11])], extrapolation_mode='linear')
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation_multipoint1.cml'))
def test_simple_coord_linear_extrapolation_multipoint2(self):
- r = iris.analysis.interpolate.linear( self.simple2d_cube, [('dim1', [1, 10])], extrapolation_mode='linear')
+ r = iintrp.linear( self.simple2d_cube, [('dim1', [1, 10])], extrapolation_mode='linear')
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_linear_extrapolation_multipoint2.cml'))
def test_simple_coord_nan_extrapolation(self):
- r = iris.analysis.interpolate.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='nan')
+ r = iintrp.linear( self.simple2d_cube, [('dim2', 2.5)], extrapolation_mode='nan')
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_coord_nan_extrapolation.cml'))
def test_multiple_coord_extrapolation(self):
- self.assertRaises(ValueError, iris.analysis.interpolate.linear, self.simple2d_cube, [('dim2', 2.5), ('dim1', 12.5)], extrapolation_mode='error')
+ self.assertRaises(ValueError, iintrp.linear, self.simple2d_cube, [('dim2', 2.5), ('dim1', 12.5)], extrapolation_mode='error')
def test_multiple_coord_linear_extrapolation(self):
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim2', 9), ('dim1', 1.5)])
+ r = iintrp.linear(self.simple2d_cube, [('dim2', 9), ('dim1', 1.5)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_multiple_coords_extrapolation.cml'))
def test_lots_of_points(self):
- r = iris.analysis.interpolate.linear(self.simple2d_cube, [('dim1', np.linspace(3, 9, 20))])
+ r = iintrp.linear(self.simple2d_cube, [('dim1', np.linspace(3, 9, 20))])
# XXX Implement a test!?!
def test_shared_axis(self):
c = self.simple2d_cube_extended
- r = iris.analysis.interpolate.linear(c, [('dim2', [3.5, 3.25])])
+ r = iintrp.linear(c, [('dim2', [3.5, 3.25])])
normalise_order(r)
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_shared_axis.cml'))
- self.assertRaises(ValueError, iris.analysis.interpolate.linear, c, [('dim2', [3.5, 3.25]), ('shared_x_coord', [9, 7])])
+ self.assertRaises(ValueError, iintrp.linear, c, [('dim2', [3.5, 3.25]), ('shared_x_coord', [9, 7])])
def test_points_datatype_casting(self):
# this test tries to extract a float from an array of type integer. the result should be of type float.
- r = iris.analysis.interpolate.linear(self.simple2d_cube_extended, [('shared_x_coord', 7.5)])
+ r = iintrp.linear(self.simple2d_cube_extended, [('shared_x_coord', 7.5)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'simple_casting_datatype.cml'))
@@ -519,15 +518,15 @@ def setUp(self):
self.cube = iris.load_cube(file)
def test_slice(self):
- r = iris.analysis.interpolate.linear(self.cube, [('latitude', 0)])
+ r = iintrp.linear(self.cube, [('latitude', 0)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'real_2dslice.cml'))
def test_2slices(self):
- r = iris.analysis.interpolate.linear(self.cube, [('latitude', 0.0), ('longitude', 0.0)])
+ r = iintrp.linear(self.cube, [('latitude', 0.0), ('longitude', 0.0)])
self.assertCML(r, ('analysis', 'interpolation', 'linear', 'real_2slices.cml'))
def test_circular(self):
- res = iris.analysis.interpolate.linear(self.cube,
+ res = iintrp.linear(self.cube,
[('longitude', 359.8)])
normalise_order(res)
lon_coord = self.cube.coord('longitude').points
@@ -538,17 +537,20 @@ def test_circular(self):
self.assertArrayAllClose(res.data, expected, rtol=1.0e-6)
# check that the values returned by lon 0 & 360 are the same...
- r1 = iris.analysis.interpolate.linear(self.cube, [('longitude', 360)])
- r2 = iris.analysis.interpolate.linear(self.cube, [('longitude', 0)])
+ r1 = iintrp.linear(self.cube, [('longitude', 360)])
+ r2 = iintrp.linear(self.cube, [('longitude', 0)])
np.testing.assert_array_equal(r1.data, r2.data)
self.assertCML(res, ('analysis', 'interpolation', 'linear',
'real_circular_2dslice.cml'), checksum=False)
-@tests.skip_data
-class TestNearestNeighbour(tests.IrisTest):
- def setUp(self):
+class MixinNearestNeighbour(object):
+ # Define standard tests for the three 'nearest_neighbour' routines.
+ # Cast as a 'mixin' as it used to test both (a) the original routines and
+ # (b) replacement operations to justify deprecation.
+
+ def _common_setUp(self):
self.cube = iris.tests.stock.global_pp()
points = np.arange(self.cube.coord('latitude').shape[0], dtype=np.float32)
coord_to_add = iris.coords.DimCoord(points, long_name='i', units='meters')
@@ -557,10 +559,10 @@ def setUp(self):
def test_nearest_neighbour(self):
point_spec = [('latitude', 40), ('longitude', 39)]
- indices = iris.analysis.interpolate.nearest_neighbour_indices(self.cube, point_spec)
+ indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
self.assertEqual(indices, (20, 10))
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
# Check that the data has not been loaded on either the original cube,
# nor the interpolated one.
@@ -568,7 +570,7 @@ def test_nearest_neighbour(self):
self.assertTrue(self.cube.has_lazy_data())
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude_longitude.cml'))
- value = iris.analysis.interpolate.nearest_neighbour_data_value(self.cube, point_spec)
+ value = iintrp.nearest_neighbour_data_value(self.cube, point_spec)
self.assertEqual(value, np.array(285.98785, dtype=np.float32))
# Check that the value back is that which was returned by the extract method
@@ -576,26 +578,26 @@ def test_nearest_neighbour(self):
def test_nearest_neighbour_slice(self):
point_spec = [('latitude', 40)]
- indices = iris.analysis.interpolate.nearest_neighbour_indices(self.cube, point_spec)
+ indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
self.assertEqual(indices, (20, slice(None, None)))
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude.cml'))
# cannot get a specific point from these point specifications
- self.assertRaises(ValueError, iris.analysis.interpolate.nearest_neighbour_data_value, self.cube, point_spec)
+ self.assertRaises(ValueError, iintrp.nearest_neighbour_data_value, self.cube, point_spec)
def test_nearest_neighbour_over_specification_which_is_consistent(self):
# latitude 40 is the 20th point
point_spec = [('latitude', 40), ('i', 20), ('longitude', 38)]
- indices = iris.analysis.interpolate.nearest_neighbour_indices(self.cube, point_spec)
+ indices = iintrp.nearest_neighbour_indices(self.cube, point_spec)
self.assertEqual(indices, (20, 10))
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude_longitude.cml'))
- value = iris.analysis.interpolate.nearest_neighbour_data_value(self.cube, point_spec)
+ value = iintrp.nearest_neighbour_data_value(self.cube, point_spec)
# Check that the value back is that which was returned by the extract method
self.assertEqual(value, b.data)
@@ -604,7 +606,7 @@ def test_nearest_neighbour_over_specification_mis_aligned(self):
point_spec = [('latitude', 40), ('i', 10), ('longitude', 38)]
# assert that we get a ValueError for over specifying our interpolation
- self.assertRaises(ValueError, iris.analysis.interpolate.nearest_neighbour_data_value, self.cube, point_spec)
+ self.assertRaises(ValueError, iintrp.nearest_neighbour_data_value, self.cube, point_spec)
def test_nearest_neighbour_bounded_simple(self):
point_spec = [('latitude', 37), ('longitude', 38)]
@@ -612,7 +614,7 @@ def test_nearest_neighbour_bounded_simple(self):
coord = self.cube.coord('latitude')
coord.guess_bounds(0.5)
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_bounded.cml'))
def test_nearest_neighbour_bounded_requested_midpoint(self):
@@ -622,13 +624,13 @@ def test_nearest_neighbour_bounded_requested_midpoint(self):
coord = self.cube.coord('latitude')
coord.guess_bounds(0.5)
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_bounded_mid_point.cml'))
def test_nearest_neighbour_locator_style_coord(self):
point_spec = [('latitude', 39)]
- b = iris.analysis.interpolate.extract_nearest_neighbour(self.cube, point_spec)
+ b = iintrp.extract_nearest_neighbour(self.cube, point_spec)
self.assertCML(b, ('analysis', 'interpolation', 'nearest_neighbour_extract_latitude.cml'))
def test_nearest_neighbour_circular(self):
@@ -696,5 +698,66 @@ def near_value(val, vals):
self.assertArrayAlmostEqual(lon_nearest_vals, lon_expect_vals)
+@tests.skip_data
+class TestNearestNeighbour(tests.IrisTest, MixinNearestNeighbour):
+ def setUp(self):
+ self._common_setUp()
+
+
+@tests.skip_data
+class TestNearestNeighbour__Equivalent(tests.IrisTest, MixinNearestNeighbour):
+ # Class that repeats the tests of "TestNearestNeighbour", to check that the
+ # behaviour of the three 'nearest_neighbour' routines in
+ # iris.analysis.interpolation can be completely replicated with alternative
+ # (newer) functionality.
+
+ def setUp(self):
+ self.patch(
+ 'iris.analysis._interpolate_private.nearest_neighbour_indices',
+ self._equivalent_nn_indices)
+ self.patch(
+ 'iris.analysis._interpolate_private.nearest_neighbour_data_value',
+ self._equivalent_nn_data_value)
+ self.patch(
+ 'iris.analysis._interpolate_private.extract_nearest_neighbour',
+ self._equivalent_extract_nn)
+ self._common_setUp()
+
+ @staticmethod
+ def _equivalent_nn_indices(cube, sample_points,
+ require_single_point=False):
+ indices = [slice(None) for _ in cube.shape]
+ for coord_spec, point in sample_points:
+ coord = cube.coord(coord_spec)
+ dim, = cube.coord_dims(coord) # expect only 1d --> single dim !
+ dim_index = coord.nearest_neighbour_index(point)
+ if require_single_point:
+ # Mimic error behaviour of the original "data-value" function:
+ # Any dim already addressed must get the same index.
+ if indices[dim] != slice(None) and indices[dim] != dim_index:
+ raise ValueError('indices over-specified')
+ indices[dim] = dim_index
+ if require_single_point:
+ # Mimic error behaviour of the original "data-value" function:
+ # All dims must have an index.
+ if any(index == slice(None) for index in indices):
+ raise ValueError('result expected to be a single point')
+ return tuple(indices)
+
+ @staticmethod
+ def _equivalent_extract_nn(cube, sample_points):
+ indices = TestNearestNeighbour__Equivalent._equivalent_nn_indices(
+ cube, sample_points)
+ new_cube = cube[indices]
+ return new_cube
+
+ @staticmethod
+ def _equivalent_nn_data_value(cube, sample_points):
+ indices = TestNearestNeighbour__Equivalent._equivalent_nn_indices(
+ # for this routine only, enable extra index checks.
+ cube, sample_points, require_single_point=True)
+ return cube.data[indices]
+
+
if __name__ == "__main__":
tests.main()
diff --git a/lib/iris/tests/test_regrid.py b/lib/iris/tests/test_regrid.py
index 463bd8a7cb..67e82fae80 100644
--- a/lib/iris/tests/test_regrid.py
+++ b/lib/iris/tests/test_regrid.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2010 - 2015, Met Office
+# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
@@ -25,7 +25,7 @@
import iris
from iris import load_cube
-from iris.analysis.interpolate import regrid_to_max_resolution
+from iris.analysis._interpolate_private import regrid_to_max_resolution
from iris.cube import Cube
from iris.coords import DimCoord
from iris.coord_systems import GeogCS
diff --git a/lib/iris/tests/unit/analysis/interpolate/test_linear.py b/lib/iris/tests/unit/analysis/interpolate/test_linear.py
index c9b7f8e1dd..22c9523de5 100644
--- a/lib/iris/tests/unit/analysis/interpolate/test_linear.py
+++ b/lib/iris/tests/unit/analysis/interpolate/test_linear.py
@@ -1,4 +1,4 @@
-# (C) British Crown Copyright 2014 - 2015, Met Office
+# (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
@@ -28,7 +28,7 @@
import numpy as np
import iris
-from iris.analysis.interpolate import linear
+from iris.analysis._interpolate_private import linear
from iris.tests import mock
import iris.tests.stock as stock
@@ -39,7 +39,8 @@ def setUp(self):
self.extrapolation = 'extrapolation_mode'
self.scheme = mock.Mock(name='linear scheme')
- @mock.patch('iris.analysis.interpolate.Linear', name='linear_patch')
+ @mock.patch('iris.analysis._interpolate_private.Linear',
+ name='linear_patch')
@mock.patch('iris.cube.Cube.interpolate', name='cube_interp_patch')
def _assert_expected_call(self, sample_points, sample_points_call,
cinterp_patch, linear_patch):
diff --git a/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py b/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py
index e0203dd8c4..e141650a4c 100644
--- a/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py
+++ b/lib/iris/tests/unit/fileformats/ff/test__ff_equivalents.py
@@ -181,7 +181,7 @@ class Test_FFHeader(Mixin_ConstructorTest, tests.IrisTest):
target_class_name = 'FFHeader'
@staticmethod
- def dummy_constructor_call(self, filename, word_depth=16):
+ def dummy_constructor_call(self, filename, word_depth=12345):
# A replacement for the 'real' constructor call in the parent class.
# Used to check for correct args and kwargs in the call.
# Just record the call arguments in a global, for testing.
@@ -196,8 +196,10 @@ def setUp(self):
def test__basic(self):
# Call with just a filename.
+ # NOTE: this ignores "our" constructor word_depth default, as the
+ # default is now re-implemented in the wrapper class definition.
self.check_call(['filename'], {},
- expected_result=('filename', 16))
+ expected_result=('filename', 8))
def test__word_depth(self):
# Call with a word-depth.
@@ -220,7 +222,7 @@ class Test_FF2PP(Mixin_ConstructorTest, tests.IrisTest):
@staticmethod
def dummy_constructor_call(self, filename, read_data=False,
- word_depth=16):
+ word_depth=12345):
# A replacement for the 'real' constructor call in the parent class.
# Used to check for correct args and kwargs in the call.
# Just record the call arguments in a global, for testing.
@@ -235,13 +237,15 @@ def setUp(self):
def test__basic(self):
# Call with just a filename.
+ # NOTE: this ignores "our" constructor word_depth default, as the
+ # default is now re-implemented in the wrapper class definition.
self.check_call(['filename'], {},
- expected_result=('filename', False, 16))
+ expected_result=('filename', False, 8))
def test__read_data(self):
# Call with a word-depth.
self.check_call(['filename', True], {},
- expected_result=('filename', True, 16))
+ expected_result=('filename', True, 8))
def test__word_depth(self):
# Call with a word-depth keyword.