Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion lib/iris/analysis/_regrid.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,13 +288,15 @@ def _regrid(src_data, x_dim, y_dim,

interp_coords = np.dstack(interp_coords)

weights = interpolator.compute_interp_weights(interp_coords)

def interpolate(data):
# Update the interpolator for this data slice.
data = data.astype(interpolator.values.dtype)
if y_dim < x_dim:
data = data.T
interpolator.values = data
data = interpolator(interp_coords)
data = interpolator.interp_using_pre_computed_weights(weights)
if y_dim > x_dim:
data = data.T
return data
Expand Down
138 changes: 113 additions & 25 deletions lib/iris/analysis/_scipy_interpolate.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@

from __future__ import (absolute_import, division, print_function)
from six.moves import range, zip

import itertools

from scipy.sparse import csr_matrix
import numpy as np


Expand Down Expand Up @@ -138,20 +140,52 @@ def __call__(self, xi, method=None):
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
The coordinates to sample the gridded data at.

method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".

"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
# Note: No functionality should live in this method. It should all be
# decomposed into the two interfaces (compute weights + use weights).
weights = self.compute_interp_weights(xi, method)
return self.interp_using_pre_computed_weights(weights)

def compute_interp_weights(self, xi, method=None):
"""
Prepare the interpolator for interpolation to the given sample points.

.. note::
This interface provides the ability to reuse weights on multiple
data sources, such as in the case of regridding. For normal
interpolation, simply call the interpolator with the sample points.

Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at.

Returns
-------
A tuple of the items necessary for passing to
:meth:`interp_using_pre_computed_weights`. The contents of this return
value are not guaranteed to be consistent across Iris versions, and
should only be used for passing to
:meth:`interp_using_pre_computed_weights`.

Example
-------
>>> coords = np.array([[[50.7, -3.5],
[50.6, -3.5]],
[[50.7, -3.1],
[50.6, -3.1]]])
>>> compute_interp_weights(coords)

"""
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
if xi.shape[-1] != ndim:
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
Expand All @@ -166,10 +200,71 @@ def __call__(self, xi, method=None):
raise ValueError("One of the requested xi is out of "
"bounds in dimension %d" % i)

indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
method = self.method if method is None else method
prepared = (xi_shape, method) + self._find_indices(xi.T)

if method == 'linear':

xi_shape, method, indices, norm_distances, out_of_bounds = prepared

# Allocate arrays for describing the sparse matrix.
n_src_values_per_result_value = 2 ** ndim
n_result_values = len(indices[0])
n_non_zero = n_result_values * n_src_values_per_result_value
weights = np.ones(n_non_zero, dtype=norm_distances[0].dtype)
col_indices = np.empty(n_non_zero)
row_ptrs = np.arange(0, n_non_zero + n_src_values_per_result_value,
n_src_values_per_result_value)

corners = itertools.product(*[[(i, 1 - n), (i + 1, n)]
for i, n in zip(indices,
norm_distances)])
shape = self.values.shape[:ndim]

for i, corner in enumerate(corners):
corner_indices = [ci for ci, cw in corner]
n_indices = np.ravel_multi_index(corner_indices, shape,
mode='wrap')
col_indices[i::n_src_values_per_result_value] = n_indices
for ci, cw in corner:
weights[i::n_src_values_per_result_value] *= cw

n_src_values = np.prod(map(len, self.grid))
sparse_matrix = csr_matrix((weights, col_indices, row_ptrs),
shape=(n_result_values, n_src_values))

prepared = (xi_shape, method, sparse_matrix, None, out_of_bounds)

return prepared

def interp_using_pre_computed_weights(self, computed_weights):
"""
Perform the interpolation using pre-computed interpolation weights.

.. note::
This interface provides the ability to reuse weights on multiple
data sources, such as in the case of regridding. For normal
interpolation, simply call the interpolator with the sample points,
rather using this decomposed interface.

Parameters
----------
computed_weights : *intentionally undefined interface*
The pre-computed interpolation weights which come from calling
:meth:`compute_interp_weights`.

"""
[xi_shape, method, indices, norm_distances,
out_of_bounds] = computed_weights

method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)

ndim = len(self.grid)

if method == "linear":
result = self._evaluate_linear(
indices, norm_distances, out_of_bounds)
result = self._evaluate_linear_sparse(indices)
elif method == "nearest":
result = self._evaluate_nearest(
indices, norm_distances, out_of_bounds)
Expand All @@ -178,22 +273,15 @@ def __call__(self, xi, method=None):

return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])

def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (np.newaxis,) * \
(self.values.ndim - len(indices))

# find relevant values
# each i and i+1 represents a edge
import itertools
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_linear_sparse(self, sparse_matrix):
ndim = len(self.grid)
if ndim == self.values.ndim:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A comment on this case might help future readers.

result = sparse_matrix * self.values.reshape(-1)
else:
shape = (sparse_matrix.shape[1], -1)
result = sparse_matrix * self.values.reshape(shape)

return result

def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the
:func:`iris.analysis._scipy_interpolate._RegularGridInterpolator` class."""

from __future__ import (absolute_import, division, print_function)

# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests

import mock

import numpy as np
import iris

from iris.analysis._scipy_interpolate import _RegularGridInterpolator
from scipy.sparse.csr import csr_matrix
import iris.tests.stock as stock


class Test(tests.IrisTest):
def setUp(self):
# Load a source cube, then generate an interpolator instance, calculate
# the interpolation weights and set up a target grid.
self.cube = stock.simple_2d()
x_points = self.cube.coord('bar').points
y_points = self.cube.coord('foo').points
self.interpolator = _RegularGridInterpolator([x_points, y_points],
self.cube.data,
method='linear',
bounds_error=False,
fill_value=None)
newx = x_points + 0.7
newy = y_points + 0.7

d_0 = self.cube.data[0, 0]
d_1 = self.cube.data[0, 1]
d_2 = self.cube.data[1, 0]
d_3 = self.cube.data[1, 1]
px_0, px_1 = x_points[0], x_points[1]
py_0, py_1 = y_points[0], y_points[1]
px_t = px_0 + 0.7
py_t = py_0 + 0.7
dyt_0 = self._interpolate_point(py_t, py_0, py_1, d_0, d_1)
dyt_1 = self._interpolate_point(py_t, py_0, py_1, d_2, d_3)
self.test_increment = self._interpolate_point(px_t, px_0, px_1,
dyt_0, dyt_1)

xv, yv = np.meshgrid(newy, newx)
self.tgrid = np.dstack((yv, xv))
self.weights = self.interpolator.compute_interp_weights(self.tgrid)

@staticmethod
def _interpolate_point(p_t, p_0, p_1, d_0, d_1):
return d_0 + (d_1 - d_0)*((p_t - p_0)/(p_1 - p_0))

def test_compute_interp_weights(self):
weights = self.weights
self.assertIsInstance(weights, tuple)
self.assertEqual(len(weights), 5)
self.assertEqual(weights[0], self.tgrid.shape)
self.assertEqual(weights[1], 'linear')
self.assertIsInstance(weights[2], csr_matrix)

def test__evaluate_linear_sparse(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator._evaluate_linear_sparse(weights[2])
test_data = self.cube.data.reshape(-1) + self.test_increment
self.assertArrayAlmostEqual(output_data, test_data)

def test_interp_using_pre_computed_weights(self):
interpolator = self.interpolator
weights = self.weights
output_data = interpolator.interp_using_pre_computed_weights(weights)
test_data = self.cube.data + self.test_increment
self.assertEqual(output_data.shape, self.cube.data.shape)
self.assertArrayAlmostEqual(output_data, test_data)


if __name__ == "__main__":
tests.main()