Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion INSTALL
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ mock 1.0.1 (http://pypi.python.org/pypi/mock/)
nose 1.1.2 or later (https://nose.readthedocs.io/en/latest/)
Python package for software testing. Iris is not compatible with nose2.

pep8 1.4.6* (https://pypi.python.org/pypi/pep8)
pep8 1.4.6 (https://pypi.python.org/pypi/pep8)
Python package for software testing.

pandas 0.11.0 or later (http://pandas.pydata.org)
Expand Down
2 changes: 1 addition & 1 deletion conda-requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ setuptools
# Iris testing/documentation dependencies
mock
nose
pep8=1.5.7
pep8
sphinx
iris_sample_data
filelock
Expand Down
12 changes: 6 additions & 6 deletions lib/iris/_concatenate.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,12 +341,13 @@ def __init__(self, cube):
# Collate the auxiliary coordinate metadata and scalar coordinates.
#
axes = dict(T=0, Z=1, Y=2, X=3)

# Coordinate sort function - by guessed coordinate axis, then
# by coordinate definition, then by dimensions, in ascending order.
key_func = lambda coord: (axes.get(guess_coord_axis(coord),
len(axes) + 1),
coord._as_defn(),
cube.coord_dims(coord))
def key_func(coord):
return (axes.get(guess_coord_axis(coord), len(axes) + 1),
coord._as_defn(),
cube.coord_dims(coord))

for coord in sorted(cube.aux_coords, key=key_func):
dims = cube.coord_dims(coord)
Expand Down Expand Up @@ -647,8 +648,7 @@ def concatenate(self):

# Sequence the skeleton segments into the correct order
# pending concatenation.
key_func = lambda skeleton: skeleton.signature.dim_extents
skeletons.sort(key=key_func,
skeletons.sort(key=lambda skeleton: skeleton.signature.dim_extents,
reverse=(order == _DECREASING))

# Concatenate the new dimension coordinate.
Expand Down
10 changes: 7 additions & 3 deletions lib/iris/_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,11 +264,15 @@ def extract(self, cube):
desired_values = list(self._coord_thing)
# A dramatic speedup can be had if we don't have bounds.
if coord.has_bounds():
call_func = lambda cell: cell in desired_values
def call_func(cell):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be a one-liner like the original:

def call_func(cell): return cell in desired_values

Any strong preferences?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i find reading over multiple lines clearer than having a def and func on one line, I prefer @QuLogic 's current, I think

return cell in desired_values
else:
call_func = lambda cell: cell.point in desired_values
def call_func(cell):
return cell.point in desired_values
else:
call_func = lambda c: c == self._coord_thing
def call_func(c):
return c == self._coord_thing

try_quick = (isinstance(coord, iris.coords.DimCoord) and
not isinstance(self._coord_thing, iris.coords.Cell))

Expand Down
25 changes: 18 additions & 7 deletions lib/iris/analysis/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -283,39 +283,50 @@ def coord_comparison(*cubes):

# Get all coordinate groups which aren't complete (i.e. there is a
# None in the group)
coord_is_None_fn = lambda cube, coord: coord is None
def coord_is_None_fn(cube, coord):
return coord is None

if coord_group.matches_any(coord_is_None_fn):
ungroupable.add(coord_group)

# Get all coordinate groups which don't all equal one another
# (None -> group not all equal)
not_equal_fn = lambda cube, coord: coord != first_coord
def not_equal_fn(cube, coord):
return coord != first_coord

if coord_group.matches_any(not_equal_fn):
not_equal.add(coord_group)

# Get all coordinate groups which don't all share the same shape
# (None -> group has different shapes)
diff_shape_fn = lambda cube, coord: coord.shape != first_coord.shape
def diff_shape_fn(cube, coord):
return coord.shape != first_coord.shape

if coord_group.matches_any(diff_shape_fn):
different_shaped_coords.add(coord_group)

# Get all coordinate groups which don't all share the same data
# dimension on their respective cubes
# (None -> group describes a different dimension)
diff_data_dim_fn = lambda cube, coord: \
cube.coord_dims(coord) != first_cube.coord_dims(first_coord)
def diff_data_dim_fn(cube, coord):
return cube.coord_dims(coord) != first_cube.coord_dims(first_coord)

if coord_group.matches_any(diff_data_dim_fn):
different_data_dimension.add(coord_group)

# get all coordinate groups which don't describe a dimension
# (None -> doesn't describe a dimension)
no_data_dim_fn = lambda cube, coord: cube.coord_dims(coord) == ()
def no_data_dim_fn(cube, coord):
return cube.coord_dims(coord) == ()

if coord_group.matches_all(no_data_dim_fn):
no_data_dimension.add(coord_group)

# get all coordinate groups which don't describe a dimension
# (None -> not a scalar coordinate)
no_data_dim_fn = lambda cube, coord: coord.shape == (1, )
def no_data_dim_fn(cube, coord):
return coord.shape == (1, )

if coord_group.matches_all(no_data_dim_fn):
scalar_coords.add(coord_group)

Expand Down
8 changes: 4 additions & 4 deletions lib/iris/analysis/_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,8 +405,8 @@ def _setup(self):
# Only DimCoords can be circular.
if circular:
coord_points = extend_circular_coord(coord, coord_points)
offset = ((coord_points.max() + coord_points.min() - modulus)
* 0.5)
offset = 0.5 * (coord_points.max() + coord_points.min() -
modulus)
self._circulars.append((circular, modulus,
index, coord_dims[0],
offset))
Expand Down Expand Up @@ -634,8 +634,8 @@ def construct_new_coord(coord):
return new_coord, dims

def gen_new_cube():
if (isinstance(new_coord, DimCoord) and len(dims) > 0
and dims[0] not in dims_with_dim_coords):
if (isinstance(new_coord, DimCoord) and len(dims) > 0 and
dims[0] not in dims_with_dim_coords):
new_cube._add_unique_dim_coord(new_coord, dims)
dims_with_dim_coords.append(dims[0])
else:
Expand Down
6 changes: 4 additions & 2 deletions lib/iris/coord_categorisation.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,10 @@ def add_categorised_coord(cube, name, from_coord, category_function,
str_vectorised_fn = np.vectorize(category_function, otypes=[object])
# Use a common type for string arrays (N.B. limited to 64 chars)
all_cases_string_type = '|S64' if six.PY2 else '|U64'
vectorised_fn = lambda *args: str_vectorised_fn(*args).astype(
all_cases_string_type)

def vectorised_fn(*args):
return str_vectorised_fn(*args).astype(all_cases_string_type)

else:
vectorised_fn = np.vectorize(category_function)
new_coord = iris.coords.AuxCoord(vectorised_fn(from_coord,
Expand Down
6 changes: 3 additions & 3 deletions lib/iris/coord_systems.py
Original file line number Diff line number Diff line change
Expand Up @@ -911,9 +911,9 @@ def __init__(self, latitude_of_projection_origin=0.0,
self.ellipsoid = ellipsoid

def __repr__(self):
return "LambertAzimuthalEqualArea(latitude_of_projection_origin={!r}, "\
"longitude_of_projection_origin={!r}, false_easting={!r}, "\
"false_northing={!r}, ellipsoid={!r})".format(
return ("LambertAzimuthalEqualArea(latitude_of_projection_origin={!r},"
" longitude_of_projection_origin={!r}, false_easting={!r},"
" false_northing={!r}, ellipsoid={!r})").format(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
self.false_easting,
Expand Down
3 changes: 2 additions & 1 deletion lib/iris/coords.py
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,8 @@ def collapsed(self, dims_to_collapse=None):
if np.issubdtype(self.dtype, np.str):
# Collapse the coordinate by serializing the points and
# bounds as strings.
serialize = lambda x: '|'.join([str(i) for i in x.flatten()])
def serialize(x):
return '|'.join([str(i) for i in x.flatten()])
bounds = None
string_type_fmt = 'S{}' if six.PY2 else 'U{}'
if self.bounds is not None:
Expand Down
40 changes: 23 additions & 17 deletions lib/iris/cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,8 +600,8 @@ def _is_single_item(testee):
We count string types as 'single', also.

"""
return (isinstance(testee, six.string_types)
or not isinstance(testee, collections.Iterable))
return (isinstance(testee, six.string_types) or
not isinstance(testee, collections.Iterable))


class Cube(CFVariableMixin):
Expand Down Expand Up @@ -1132,7 +1132,8 @@ def coord_dims(self, coord):
# Search derived aux coords
target_defn = coord._as_defn()
if not matches:
match = lambda factory: factory._as_defn() == target_defn
def match(factory):
return factory._as_defn() == target_defn
factories = filter(match, self._aux_factories)
matches = [factory.derived_dims(self.coord_dims) for factory in
factories]
Expand Down Expand Up @@ -1341,9 +1342,11 @@ def coords(self, name_or_coord=None, standard_name=None,
msg = 'The attributes keyword was expecting a dictionary ' \
'type, but got a %s instead.' % type(attributes)
raise ValueError(msg)
attr_filter = lambda coord_: all(k in coord_.attributes and
coord_.attributes[k] == v for
k, v in six.iteritems(attributes))

def attr_filter(coord_):
return all(k in coord_.attributes and coord_.attributes[k] == v
for k, v in six.iteritems(attributes))

coords_and_factories = [coord_ for coord_ in coords_and_factories
if attr_filter(coord_)]

Expand Down Expand Up @@ -2157,13 +2160,16 @@ def __getitem__(self, keys):
# multiple times)
dimension_mapping, slice_gen = iris.util.column_slices_generator(
full_slice, len(self.shape))
new_coord_dims = lambda coord_: [dimension_mapping[d] for d in
self.coord_dims(coord_) if
dimension_mapping[d] is not None]

new_cell_measure_dims = lambda cm_: [dimension_mapping[d] for d in
self.cell_measure_dims(cm_) if
dimension_mapping[d] is not None]
def new_coord_dims(coord_):
return [dimension_mapping[d]
for d in self.coord_dims(coord_)
if dimension_mapping[d] is not None]

def new_cell_measure_dims(cm_):
return [dimension_mapping[d]
for d in self.cell_measure_dims(cm_)
if dimension_mapping[d] is not None]

try:
first_slice = next(slice_gen)
Expand Down Expand Up @@ -2648,9 +2654,9 @@ def _as_list_of_coords(self, names_or_coords):
coords.append(self.coord(name_or_coord))
else:
# Don't know how to handle this type
msg = "Don't know how to handle coordinate of type %s. " \
"Ensure all coordinates are of type six.string_types or " \
"iris.coords.Coord." % type(name_or_coord)
msg = ("Don't know how to handle coordinate of type %s. "
"Ensure all coordinates are of type six.string_types "
"or iris.coords.Coord.") % (type(name_or_coord), )
raise TypeError(msg)
return coords

Expand Down Expand Up @@ -3316,8 +3322,8 @@ def collapsed(self, coords, aggregator, **kwargs):
data_result = unrolled_data

# Perform the aggregation in lazy form if possible.
elif (aggregator.lazy_func is not None
and len(dims_to_collapse) == 1 and self.has_lazy_data()):
elif (aggregator.lazy_func is not None and
len(dims_to_collapse) == 1 and self.has_lazy_data()):
# Use a lazy operation separately defined by the aggregator, based
# on the cube lazy array.
# NOTE: do not reform the data in this case, as 'lazy_aggregate'
Expand Down
6 changes: 3 additions & 3 deletions lib/iris/experimental/equalise_cubes.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# (C) British Crown Copyright 2013 - 2015, Met Office
# (C) British Crown Copyright 2013 - 2016, Met Office
#
# This file is part of Iris.
#
Expand Down Expand Up @@ -45,8 +45,8 @@ def equalise_attributes(cubes):
cube_keys = list(cube.attributes.keys())
common_keys = [
key for key in common_keys
if key in cube_keys
and np.all(cube.attributes[key] == cubes[0].attributes[key])]
if (key in cube_keys and
np.all(cube.attributes[key] == cubes[0].attributes[key]))]

# Remove all the other attributes.
for cube in cubes:
Expand Down
3 changes: 2 additions & 1 deletion lib/iris/experimental/fieldsfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,8 @@ def adjust(dims):

def _bind_coords(coords_and_dims, dim_coord_dims, dim_coords_and_dims,
aux_coords_and_dims):
key_func = lambda item: _HINTS.get(item[0].name(), len(_HINTS))
def key_func(item):
return _HINTS.get(item[0].name(), len(_HINTS))
# Target the first DimCoord for a dimension at dim_coords,
# and target everything else at aux_coords.
for coord, dims in sorted(coords_and_dims, key=key_func):
Expand Down
12 changes: 7 additions & 5 deletions lib/iris/fileformats/grib/_load_convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,9 @@ def unscale(value, factor):
is returned.

"""
_unscale = lambda v, f: v / 10.0 ** f
def _unscale(v, f):
return v / 10.0 ** f

if isinstance(value, Iterable) or isinstance(factor, Iterable):
def _masker(item):
result = ma.masked_equal(item, _MDI)
Expand Down Expand Up @@ -1072,15 +1074,15 @@ def grid_definition_template_90(section, metadata):
raise TranslationError('Unsupported space-view orientation.')

# Determine the coordinate system.
sub_satellite_lat = (section['latitudeOfSubSatellitePoint']
* _GRID_ACCURACY_IN_DEGREES)
sub_satellite_lat = (section['latitudeOfSubSatellitePoint'] *
_GRID_ACCURACY_IN_DEGREES)
# The subsequent calculations to determine the apparent Earth
# diameters rely on the satellite being over the equator.
if sub_satellite_lat != 0:
raise TranslationError('Unsupported non-zero latitude for '
'space-view perspective.')
sub_satellite_lon = (section['longitudeOfSubSatellitePoint']
* _GRID_ACCURACY_IN_DEGREES)
sub_satellite_lon = (section['longitudeOfSubSatellitePoint'] *
_GRID_ACCURACY_IN_DEGREES)
major, minor, radius = ellipsoid_geometry(section)
geog_cs = ellipsoid(section['shapeOfTheEarth'], major, minor, radius)
height_above_centre = geog_cs.semi_major_axis * section['Nr'] / 1e6
Expand Down
10 changes: 6 additions & 4 deletions lib/iris/fileformats/netcdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,7 +525,9 @@ def _load_cube(engine, cf, cf_var, filename):
# Populate coordinate attributes with the untouched attributes from the
# associated CF-netCDF variable.
coordinates = engine.provides.get('coordinates', [])
attribute_predicate = lambda item: item[0] not in _CF_ATTRS

def attribute_predicate(item):
return item[0] not in _CF_ATTRS

for coord, cf_var_name in coordinates:
tmpvar = filter(attribute_predicate,
Expand Down Expand Up @@ -1836,9 +1838,9 @@ def _create_cf_data_variable(self, cube, dimension_names, local_keys=None,
cf_name = self._increment_name(cf_name)

# if netcdf3 avoid streaming due to dtype handling
if (not cube.has_lazy_data()
or self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
if (not cube.has_lazy_data() or
self._dataset.file_format in ('NETCDF3_CLASSIC',
'NETCDF3_64BIT')):
# Determine whether there is a cube MDI value.
fill_value = None
if isinstance(cube.data, ma.core.MaskedArray):
Expand Down
10 changes: 5 additions & 5 deletions lib/iris/fileformats/nimrod_load_rules.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# (C) British Crown Copyright 2010 - 2015, Met Office
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
Expand Down Expand Up @@ -156,10 +156,10 @@ def british_national_grid_y(cube, field):

"""
if field.origin_corner == 0: # top left
y_coord = DimCoord(np.arange(field.num_rows)[::-1] *
-field.row_step + field.y_origin,
standard_name="projection_y_coordinate", units="m",
coord_system=iris.coord_systems.OSGB())
y_coord = DimCoord(
np.arange(field.num_rows)[::-1] * -field.row_step + field.y_origin,
standard_name="projection_y_coordinate", units="m",
coord_system=iris.coord_systems.OSGB())
cube.add_dim_coord(y_coord, 0)
else:
raise TranslationError("Corner {0} not yet implemented".
Expand Down
4 changes: 2 additions & 2 deletions lib/iris/fileformats/pp.py
Original file line number Diff line number Diff line change
Expand Up @@ -1411,8 +1411,8 @@ def save(self, file_handle):
header_elem = int(header_elem)
lb[index] = header_elem
else:
index = slice(pos[0] - NUM_LONG_HEADERS, pos[-1]
- NUM_LONG_HEADERS + 1)
index = slice(pos[0] - NUM_LONG_HEADERS,
pos[-1] - NUM_LONG_HEADERS + 1)
b[index] = header_elem

# Although all of the elements are now populated, we still need to
Expand Down
11 changes: 7 additions & 4 deletions lib/iris/fileformats/um/_fast_load_structured_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,10 +120,13 @@ def _field_vector_element_arrays(self):
"""Define the field components used in the structure analysis."""
# Define functions to make t1 and t2 values as date-time tuples.
# These depend on header version (PPField2 has no seconds values).
t1_fn = lambda fld: (fld.lbyr, fld.lbmon, fld.lbdat,
fld.lbhr, fld.lbmin, getattr(fld, 'lbsec', 0))
t2_fn = lambda fld: (fld.lbyrd, fld.lbmond, fld.lbdatd,
fld.lbhrd, fld.lbmind, getattr(fld, 'lbsecd', 0))
def t1_fn(fld):
return (fld.lbyr, fld.lbmon, fld.lbdat, fld.lbhr, fld.lbmin,
getattr(fld, 'lbsec', 0))

def t2_fn(fld):
return (fld.lbyrd, fld.lbmond, fld.lbdatd, fld.lbhrd, fld.lbmind,
getattr(fld, 'lbsecd', 0))

# Return a list of (name, array) for the vectorizable elements.
component_arrays = [
Expand Down
Loading