diff --git a/lib/iris/fileformats/_ff.py b/lib/iris/fileformats/_ff.py index 301a45736e..d2ce1bcefb 100644 --- a/lib/iris/fileformats/_ff.py +++ b/lib/iris/fileformats/_ff.py @@ -184,7 +184,7 @@ def vectors(self, subgrid): Returns ------- - A 2-tuple of X-vector, Y-vector. + A 2-tuple of X-vector, Y-vector. """ x_p, x_u = self._x_vectors() @@ -223,7 +223,7 @@ def regular_x(self, subgrid): Returns ------- - A 2-tuple of BZX, BDX. + A 2-tuple of BZX, BDX. """ bdx = self.ew_spacing @@ -245,7 +245,7 @@ def regular_y(self, subgrid): Returns ------- - A 2-tuple of BZY, BDY. + A 2-tuple of BZY, BDY. """ bdy = self.ns_spacing @@ -307,6 +307,7 @@ def __init__(self, filename, word_depth=DEFAULT_FF_WORD_DEPTH): ---------- filename : str Specify the name of the FieldsFile. + word_depth : int, default=DEFAULT_FF_WORD_DEPTH Returns ------- @@ -445,9 +446,10 @@ def __init__(self, filename, read_data=False, word_depth=DEFAULT_FF_WORD_DEPTH): ---------- filename : str Specify the name of the FieldsFile. - read_data : bool, optional + read_data : bool, default=False Specify whether to read the associated PPField data within the FieldsFile. Default value is False. + word_depth : int, default=DEFAULT_FF_WORD_DEPTH Returns ------- diff --git a/lib/iris/fileformats/_nc_load_rules/actions.py b/lib/iris/fileformats/_nc_load_rules/actions.py index 8f408716e6..fefa58ad10 100644 --- a/lib/iris/fileformats/_nc_load_rules/actions.py +++ b/lib/iris/fileformats/_nc_load_rules/actions.py @@ -7,6 +7,7 @@ For now, we are still emulating various aspects of how our original Pyke-based code used the Pyke 'engine' to hold translation data, both Pyke-specific and not : + 1) basic details from the iris.fileformats.cf analysis of the file are recorded before translating each output cube, using "engine.assert_case_specific_fact(name, args)". diff --git a/lib/iris/fileformats/_nc_load_rules/helpers.py b/lib/iris/fileformats/_nc_load_rules/helpers.py index f656667e63..e4a9c60a96 100644 --- a/lib/iris/fileformats/_nc_load_rules/helpers.py +++ b/lib/iris/fileformats/_nc_load_rules/helpers.py @@ -268,7 +268,7 @@ def _split_cell_methods(nc_cell_methods: str) -> List[re.Match]: Parameters ---------- - nc_cell_methods : + nc_cell_methods : str The value of the cell methods attribute to be split. Returns @@ -342,6 +342,7 @@ def parse_cell_methods(nc_cell_methods, cf_name=None): ---------- nc_cell_methods : str The value of the cell methods attribute to be parsed. + cf_name : optional Returns ------- @@ -1331,8 +1332,10 @@ def build_ancil_var(engine, cf_av_var): def _is_lat_lon(cf_var, ud_units, std_name, std_name_grid, axis_name, prefixes): """Determine whether the CF coordinate variable is a latitude/longitude variable. - Ref: [CF] Section 4.1 Latitude Coordinate. - [CF] Section 4.2 Longitude Coordinate. + Ref: + + * [CF] Section 4.1 Latitude Coordinate. + * [CF] Section 4.2 Longitude Coordinate. """ is_valid = False diff --git a/lib/iris/fileformats/abf.py b/lib/iris/fileformats/abf.py index 76f3573882..6dd8dfd14f 100644 --- a/lib/iris/fileformats/abf.py +++ b/lib/iris/fileformats/abf.py @@ -65,11 +65,14 @@ class ABFField: def __init__(self, filename): """Create an ABFField object from the given filename. - Args: + Parameters + ---------- + filename : str + An ABF filename. - * filename - An ABF filename. - - Example:: + Examples + -------- + :: field = ABFField("AVHRRBUVI01.1985feba.abl") @@ -194,14 +197,15 @@ def to_cube(self): def load_cubes(filespecs, callback=None): """Load cubes from a list of ABF filenames. - Args: - - * filenames - list of ABF filenames to load - - Kwargs: - - * callback - a function that can be passed to :func:`iris.io.run_callback` + Parameters + ---------- + filenames : + List of ABF filenames to load + callback : optional + A function that can be passed to :func:`iris.io.run_callback` + Notes + ----- .. note:: The resultant cubes may not be in the same order as in the file. diff --git a/lib/iris/fileformats/cf.py b/lib/iris/fileformats/cf.py index d6dab22305..5a0230d5eb 100644 --- a/lib/iris/fileformats/cf.py +++ b/lib/iris/fileformats/cf.py @@ -119,11 +119,11 @@ def identify(self, variables, ignore=None, target=None, warn=True): ---------- variables : Dictionary of netCDF4.Variable instance by variable name. - ignore : optional, default=None + ignore : optional List of variable names to ignore. - target : optional, default=None + target : optional Name of a single variable to check. - warn : optional, default=None + warn : bool, default=True Issue a warning if a missing variable is referenced. Returns @@ -300,8 +300,10 @@ class CFAuxiliaryCoordinateVariable(CFVariable): Identified by the CF-netCDF variable attribute 'coordinates'. Also see :class:`iris.fileformats.cf.CFLabelVariable`. - Ref: [CF] Chapter 5. Coordinate Systems. - [CF] Section 6.2. Alternative Coordinates. + Ref: + + * [CF] Chapter 5. Coordinate Systems. + * [CF] Section 6.2. Alternative Coordinates. """ @@ -554,8 +556,10 @@ class _CFFormulaTermsVariable(CFVariable): Identified by the CF-netCDF variable attribute 'formula_terms'. - Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate. - [CF] Appendix D. Dimensionless Vertical Coordinates. + Ref: + + * [CF] Section 4.3.2. Dimensional Vertical Coordinate. + * [CF] Appendix D. Dimensionless Vertical Coordinates. """ @@ -628,8 +632,10 @@ class CFGridMappingVariable(CFVariable): Identified by the CF-netCDF variable attribute 'grid_mapping'. - Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections. - [CF] Appendix F. Grid Mappings. + Ref: + + * [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections. + * [CF] Appendix F. Grid Mappings. """ diff --git a/lib/iris/fileformats/dot.py b/lib/iris/fileformats/dot.py index daa3f2fd61..53f85794c6 100644 --- a/lib/iris/fileformats/dot.py +++ b/lib/iris/fileformats/dot.py @@ -58,7 +58,7 @@ def save(cube, target): Parameters ---------- cube: :class:`iris.cube.Cube`. - target + target : A filename or open file handle. See Also @@ -94,9 +94,8 @@ def save_png(source, target, launch=False): target : A filename or open file handle. If passing a file handle, take care to open it for binary output. - **kwargs : - * launch - Display the image. Default is False. + launch : bool, default=False + Display the image. Default is False. See Also -------- @@ -342,6 +341,7 @@ def _dot_node(indent, id, name, attributes): The visual name of the node. attributes : An iterable of (name, value) attribute pairs. + """ # noqa: D410, D411 attributes = r"\n".join("%s: %s" % item for item in attributes) template = """%(indent)s"%(id)s" [ diff --git a/lib/iris/fileformats/netcdf/loader.py b/lib/iris/fileformats/netcdf/loader.py index 6f69b6e64d..1dcf026d84 100644 --- a/lib/iris/fileformats/netcdf/loader.py +++ b/lib/iris/fileformats/netcdf/loader.py @@ -562,10 +562,8 @@ def load_cubes(file_sources, callback=None, constraints=None): file_sources : str or list One or more NetCDF filenames/OPeNDAP URLs to load from. OR open datasets. - callback : function, optional Function which can be passed on to :func:`iris.io.run_callback`. - constraints : optional Returns @@ -801,6 +799,7 @@ def as_dask(self) -> None: Notes ----- This function acts as a context manager, for use in a ``with`` block. + """ old_mode = self.mode old_var_dim_chunksizes = deepcopy(self.var_dim_chunksizes) diff --git a/lib/iris/fileformats/netcdf/saver.py b/lib/iris/fileformats/netcdf/saver.py index b35b85bbae..a607d7de97 100644 --- a/lib/iris/fileformats/netcdf/saver.py +++ b/lib/iris/fileformats/netcdf/saver.py @@ -182,9 +182,9 @@ def append(self, name, coord): Parameters ---------- - name: + name : CF name of the associated coordinate. - coord: + coord : The coordinate of the associated CF name. Returns @@ -209,7 +209,7 @@ def name(self, coord): Parameters ---------- - coord: + coord : The coordinate of the associated CF name. Returns @@ -229,12 +229,13 @@ def coord(self, name): Parameters ---------- - name: + name : CF name of the associated coordinate, or None if not recognised. Returns ------- CF name or None. + """ result = None for pair in self._map: @@ -338,7 +339,7 @@ def _fillvalue_report(fill_info, is_masked, contains_fill_value, warn=False): whether the data array was masked contains_fill_value : bool whether the data array contained the fill-value - warn : bool, optional + warn : bool, default=False if True, also issue any resulting warning immediately. Returns @@ -390,11 +391,9 @@ def __init__(self, filename, netcdf_format, compute=True): filename : str or netCDF4.Dataset Name of the netCDF file to save the cube. OR a writeable object supporting the :class:`netCF4.Dataset` api. - netcdf_format : str Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format. - compute : bool, default=True If ``True``, delayed variable saves will be completed on exit from the Saver context (after first closing the target file), equivalent to @@ -404,7 +403,7 @@ def __init__(self, filename, netcdf_format, compute=True): variables for which the source data was lazy. These writes can be completed later, see :meth:`delayed_completion`. - .. Note:: + .. note:: If ``filename`` is an open dataset, rather than a filepath, then the caller must specify ``compute=False``, **close the dataset**, and complete delayed saving afterwards. @@ -543,39 +542,38 @@ def write( ---------- cube : :class:`iris.cube.Cube` A :class:`iris.cube.Cube` to be saved to a netCDF file. - local_keys : iterable of str + local_keys : iterable of str, optional An interable of cube attribute keys. Any cube attributes with matching keys will become attributes on the data variable rather than global attributes. - .. Note:: + .. note:: Has no effect if :attr:`iris.FUTURE.save_split_attrs` is ``True``. - - unlimited_dimensions : iterable of str and/or :class:`iris.coords.Coord` + unlimited_dimensions : iterable of str and/or :class:`iris.coords.Coord`, optional List of coordinate names (or coordinate objects) corresponding to coordinate dimensions of `cube` to save with the NetCDF dimension variable length 'UNLIMITED'. By default, no unlimited dimensions are saved. Only the 'NETCDF4' format supports multiple 'UNLIMITED' dimensions. - zlib : bool + zlib : bool, default=False If `True`, the data will be compressed in the netCDF file using gzip compression (default `False`). - complevel : int + complevel : int, default=4 An integer between 1 and 9 describing the level of compression desired (default 4). Ignored if `zlib=False`. - shuffle : bool + shuffle : bool, default=True If `True`, the HDF5 shuffle filter will be applied before compressing the data (default `True`). This significantly improves compression. Ignored if `zlib=False`. - fletcher32 : bool + fletcher32 : bool, default=False If `True`, the Fletcher32 HDF5 checksum algorithm is activated to detect errors. Default `False`. - contiguous : bool + contiguous : bool, default=False If `True`, the variable data is stored contiguously on disk. Default `False`. Setting to `True` for a variable with an unlimited dimension will trigger an error. - chunksizes : tuple of int + chunksizes : tuple of int, optional Used to manually specify the HDF5 chunksizes for each dimension of the variable. A detailed discussion of HDF chunking and I/O performance is available @@ -583,7 +581,7 @@ def write( Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. `chunksizes` cannot be set if `contiguous=True`. - endian : str + endian : str, default="native" Used to control whether the data is stored in little or big endian format on disk. Possible values are 'little', 'big' or 'native' (default). The library will automatically handle endian conversions @@ -591,7 +589,7 @@ def write( on a computer with the opposite format as the one used to create the file, there may be some performance advantage to be gained by setting the endian-ness. - least_significant_digit : int + least_significant_digit : int, optional If `least_significant_digit` is specified, variable data will be truncated (quantized). In conjunction with `zlib=True` this produces 'lossy', but significantly more efficient compression. For @@ -603,7 +601,7 @@ def write( "least_significant_digit -- power of ten of the smallest decimal place in unpacked data that is a reliable value". Default is `None`, or no quantization, or 'lossless' compression. - packing : type or str or dict or list + packing : type or str or dict or list, optional A numpy integer datatype (signed or unsigned) or a string that describes a numpy integer dtype(i.e. 'i2', 'short', 'u4') or a dict of packing parameters as described below. This provides @@ -618,7 +616,7 @@ def write( manually using a dict to avoid this. The default is `None`, in which case the datatype is determined from the cube and no packing will occur. - fill_value: + fill_value : optional The value to use for the `_FillValue` attribute on the netCDF variable. If `packing` is specified the value of `fill_value` should be in the domain of the packed data. @@ -783,7 +781,7 @@ def update_global_attributes(self, attributes=None, **kwargs): Parameters ---------- - attributes : dict or iterable of key, value pairs + attributes : dict or iterable of key, value pairs, optional CF global attributes to be updated. """ # TODO: when when iris.FUTURE.save_split_attrs is removed, this routine will @@ -807,12 +805,14 @@ def _create_cf_dimensions(self, cube, dimension_names, unlimited_dimensions=None ---------- cube : :class:`iris.cube.Cube` A :class:`iris.cube.Cube` in which to lookup coordinates. + dimension_names : unlimited_dimensions : iterable of strings and/or :class:`iris.coords.Coord` objects): List of coordinates to make unlimited (None by default). Returns ------- None. + """ unlimited_dim_names = [] if unlimited_dimensions is not None: @@ -1497,7 +1497,7 @@ def _create_cf_bounds(self, coord, cf_var, cf_name): ---------- coord : :class:`iris.coords.Coord` A coordinate of a cube. - cf_var: + cf_var : CF-netCDF variable cf_name : str name of the CF-NetCDF variable. @@ -1763,12 +1763,12 @@ def _create_generic_cf_array_var( An Iris :class:`iris.coords._DimensionalMetadata`, belonging to the cube. Provides data, units and standard/long/var names. Not used if 'element_dims' is not None. - element_dims : list of str, or None + element_dims : list of str, optionsl If set, contains the variable dimension (names), otherwise these are taken from `element.cube_dims[cube]`. For Mesh components (element coordinates and connectivities), this *must* be passed in, as "element.cube_dims" does not function. - fill_value : number or None + fill_value : number, optional If set, create the variable with this fill-value, and fill any masked data points with this value. If not set, standard netcdf4-python behaviour : the variable has no @@ -2207,6 +2207,8 @@ def _create_cf_data_variable( fill_value : optional See :func:`iris.fileformats.netcdf.Saver.write` + Notes + ----- All other keywords are passed through to the dataset's `createVariable` method. @@ -2619,7 +2621,7 @@ def save( When saving to a dataset, ``compute`` **must** be ``False`` : See the ``compute`` parameter. - netcdf_format : str + netcdf_format : str, default="NETCDF" Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format. local_keys : iterable of str, optional @@ -2637,20 +2639,20 @@ def save( variable length 'UNLIMITED'. By default, no unlimited dimensions are saved. Only the 'NETCDF4' format supports multiple 'UNLIMITED' dimensions. - zlib : bool, optional + zlib : bool, default=False If `True`, the data will be compressed in the netCDF file using gzip compression (default `False`). - complevel : int + complevel : int, default=4 An integer between 1 and 9 describing the level of compression desired (default 4). Ignored if `zlib=False`. - shuffle : bool, optional + shuffle : bool, default=True If `True`, the HDF5 shuffle filter will be applied before compressing the data (default `True`). This significantly improves compression. Ignored if `zlib=False`. - fletcher32 : bool, optional + fletcher32 : bool, default=False If `True`, the Fletcher32 HDF5 checksum algorithm is activated to detect errors. Default `False`. - contiguous : bool, optional + contiguous : bool, default=False If `True`, the variable data is stored contiguously on disk. Default `False`. Setting to `True` for a variable with an unlimited dimension will trigger an error. @@ -2662,7 +2664,7 @@ def save( Basically, you want the chunk size for each dimension to match as closely as possible the size of the data block that users will read from the file. `chunksizes` cannot be set if `contiguous=True`. - endian : str + endian : str, default="native" Used to control whether the data is stored in little or big endian format on disk. Possible values are 'little', 'big' or 'native' (default). The library will automatically handle endian conversions @@ -2707,7 +2709,7 @@ def save( same number of elements as `cube` if `cube` is a :class:`iris.cube.CubeList`, or a single element, and each element of this argument will be applied to each cube separately. - compute : bool, optional + compute : bool, default=True Default is ``True``, meaning complete the file immediately, and return ``None``. When ``False``, create the output file but don't write any lazy array content to @@ -2717,12 +2719,12 @@ def save( Several such data saves can be performed in parallel, by passing a list of them into a :func:`dask.compute` call. - .. Note:: + .. note:: when computed, the returned :class:`dask.delayed.Delayed` object returns a list of :class:`Warning` : These are any warnings which *would* have been issued in the save call, if ``compute`` had been ``True``. - .. Note:: + .. note:: If saving to an open dataset instead of a filepath, then the caller **must** specify ``compute=False``, and complete delayed saves **after closing the dataset**. @@ -2732,7 +2734,7 @@ def save( Returns ------- - result: None or dask.delayed.Delayed + result : None or dask.delayed.Delayed If `compute=True`, returns `None`. Otherwise returns a :class:`dask.delayed.Delayed`, which implements delayed writing to fill in the variables data. diff --git a/lib/iris/fileformats/nimrod.py b/lib/iris/fileformats/nimrod.py index 009535dc8f..55927df3ef 100644 --- a/lib/iris/fileformats/nimrod.py +++ b/lib/iris/fileformats/nimrod.py @@ -171,9 +171,9 @@ class NimrodField: References ---------- - Met Office (2003): Met Office Rain Radar Data from the NIMROD System. - NCAS British Atmospheric Data Centre, date of citation. - https://catalogue.ceda.ac.uk/uuid/82adec1f896af6169112d09cc1174499 + Met Office (2003): Met Office Rain Radar Data from the NIMROD System. + NCAS British Atmospheric Data Centre, date of citation. + https://catalogue.ceda.ac.uk/uuid/82adec1f896af6169112d09cc1174499 """ @@ -296,7 +296,7 @@ def load_cubes(filenames, callback=None): ---------- filenames : List of NIMROD filenames to load - callback : + callback : optional A function which can be passed on to :func:`iris.io.run_callback` Notes diff --git a/lib/iris/fileformats/nimrod_load_rules.py b/lib/iris/fileformats/nimrod_load_rules.py index 3c4c56bc3b..5ca9ef4be3 100644 --- a/lib/iris/fileformats/nimrod_load_rules.py +++ b/lib/iris/fileformats/nimrod_load_rules.py @@ -890,11 +890,10 @@ def time_averaging(cube, field): def run(field, handle_metadata_errors=True): """Convert a NIMROD field to an Iris cube. - Args - ---- - field: :class:`~iris.fileformats.nimrod.NimrodField` - - handle_metadata_errors + Parameters + ---------- + field : :class:`~iris.fileformats.nimrod.NimrodField` + handle_metadata_errors : bool, default=True Set to False to omit handling of known meta-data deficiencies in Nimrod-format data @@ -902,6 +901,7 @@ def run(field, handle_metadata_errors=True): ------- :class:`~iris.cube.Cube` A new :class:`~iris.cube.Cube`, created from the NimrodField. + """ cube = iris.cube.Cube(field.data) diff --git a/lib/iris/fileformats/pp.py b/lib/iris/fileformats/pp.py index 9a205fb1be..2780c52625 100644 --- a/lib/iris/fileformats/pp.py +++ b/lib/iris/fileformats/pp.py @@ -275,8 +275,8 @@ class STASH(collections.namedtuple("STASH", "model section item")): def __new__(cls, model, section, item): """Create namedtuple STASH instance. - Args - ---- + Parameters + ---------- model : A positive integer less than 100, or None. section : @@ -370,28 +370,27 @@ class SplittableInt: Notes ----- - No support for negative numbers + No support for negative numbers. """ def __init__(self, value, name_mapping_dict=None): """Build a SplittableInt given the positive integer value provided. - Args - ---- - **kwargs - * name_mapping_dict - (dict) - A special mapping to provide name based access to specific - integer positions: + Parameters + ---------- + name_mapping_dict : dict + A special mapping to provide name based access to specific + integer positions: - >>> a = SplittableInt(1234, {'hundreds': 2}) - >>> print(a.hundreds) - 2 - >>> a.hundreds = 9 - >>> print(a.hundreds) - 9 - >>> print(a) - 1934 + >>> a = SplittableInt(1234, {'hundreds': 2}) + >>> print(a.hundreds) + 2 + >>> a.hundreds = 9 + >>> print(a.hundreds) + 9 + >>> print(a) + 1934 """ @@ -1617,17 +1616,16 @@ def make_pp_field(header): def load(filename, read_data=False, little_ended=False): """Return an iterator of PPFields given a filename. - Args - ---- - filename + Parameters + ---------- + filename : str string of the filename to load. - **kwargs - * read_data - boolean - Flag whether or not the data should be read, if False an empty - data manager will be provided which can subsequently load the data - on demand. Default False. - * little_ended - boolean - If True, file contains all little-ended words (header and data). + read_data : bool, default=False + Flag whether or not the data should be read, if False an empty + data manager will be provided which can subsequently load the data + on demand. Default False. + little_ended : bool, default=False + If True, file contains all little-ended words (header and data). Notes ----- @@ -2007,11 +2005,10 @@ def load_cubes(filenames, callback=None, constraints=None): ---------- filenames : list of pp filenames to load - **kwargs : - * constraints - a list of Iris constraints - * callback - a function which can be passed on to :func:`iris.io.run_callback` + constraints : optional + A list of Iris constraints + callback : optional + A function which can be passed on to :func:`iris.io.run_callback` Notes ----- @@ -2028,21 +2025,20 @@ def load_cubes(filenames, callback=None, constraints=None): def load_cubes_little_endian(filenames, callback=None, constraints=None): """Load cubes from a list of pp filenames containing little-endian data. - Args - ---- - filenames + Parameters + ---------- + filenames : list of pp filenames to load - **kwargs - * constraints - a list of Iris constraints - * callback - a function which can be passed on to :func:`iris.io.run_callback` + constraints : optional + a list of Iris constraints + callback : optional + a function which can be passed on to :func:`iris.io.run_callback` Notes ----- The resultant cubes may not be in the order that they are in the file (order is not preserved when there is a field with orography - references) + references). """ return _load_cubes_variable_loader( @@ -2057,9 +2053,9 @@ def load_cubes_little_endian(filenames, callback=None, constraints=None): def load_pairs_from_fields(pp_fields): r"""Convert an iterable of PP fields into an iterable of tuples of (Cubes, PPField). - Args - ---- - pp_fields: + Parameters + ---------- + pp_fields : An iterable of :class:`iris.fileformats.pp.PPField`. Returns @@ -2152,27 +2148,24 @@ def _load_cubes_variable_loader( def save(cube, target, append=False, field_coords=None): """Use the PP saving rules (and any user rules) to save a cube to a PP file. - Args - ---- - cube: :class:`iris.cube.Cube` - + Parameters + ---------- + cube : :class:`iris.cube.Cube` target A filename or open file handle. - - **kwargs - * append - Whether to start a new file afresh or add the cube(s) - to the end of the file. - Only applicable when target is a filename, not a file - handle. - Default is False. - * field_coords - list of 2 coords or coord names which are to be used - for reducing the given cube into 2d slices, - which will ultimately determine the x and y - coordinates of the resulting fields. - If None, the final two dimensions are chosen - for slicing. + append : bool, default=False + Whether to start a new file afresh or add the cube(s) + to the end of the file. + Only applicable when target is a filename, not a file + handle. + Default is False. + field_coords : optional + list of 2 coords or coord names which are to be used + for reducing the given cube into 2d slices, + which will ultimately determine the x and y + coordinates of the resulting fields. + If None, the final two dimensions are chosen + for slicing. Notes ----- @@ -2191,18 +2184,17 @@ def save_pairs_from_cube(cube, field_coords=None, target=None): Use the PP saving rules to convert a cube or iterable of cubes to an iterable of (2D cube, PP field) pairs. - Args - ---- - cube: + Parameters + ---------- + cube : A :class:`iris.cube.Cube` - **kwargs - * field_coords: - List of 2 coords or coord names which are to be used for - reducing the given cube into 2d slices, which will ultimately - determine the x and y coordinates of the resulting fields. - If None, the final two dimensions are chosen for slicing. - * target: - A filename or open file handle. + field_coords : optional + List of 2 coords or coord names which are to be used for + reducing the given cube into 2d slices, which will ultimately + determine the x and y coordinates of the resulting fields. + If None, the final two dimensions are chosen for slicing. + target : optional + A filename or open file handle. """ # Open issues @@ -2309,18 +2301,16 @@ def as_fields(cube, field_coords=None, target=None): Use the PP saving rules (and any user rules) to convert a cube to an iterable of PP fields. - Args - ---- - cube - A :class:`iris.cube.Cube` - **kwargs : - * field_coords: - List of 2 coords or coord names which are to be used for - reducing the given cube into 2d slices, which will ultimately - determine the x and y coordinates of the resulting fields. - If None, the final two dimensions are chosen for slicing. - * target: - A filename or open file handle. + Parameters + ---------- + cube : :class:`iris.cube.Cube` + field_coords : optional + List of 2 coords or coord names which are to be used for + reducing the given cube into 2d slices, which will ultimately + determine the x and y coordinates of the resulting fields. + If None, the final two dimensions are chosen for slicing. + target : optional + A filename or open file handle. """ return ( @@ -2331,21 +2321,20 @@ def as_fields(cube, field_coords=None, target=None): ) -def save_fields(fields, target, append=False): +def save_fields(fields, target, append: bool = False): """Save an iterable of PP fields to a PP file. - Args - ---- - fields: + Parameters + ---------- + fields : An iterable of PP fields. - target: + target : A filename or open file handle. - **kwargs : - * append: - Whether to start a new file afresh or add the cube(s) to the end - of the file. - Only applicable when target is a filename, not a file handle. - Default is False. + append : bool, default=False + Whether to start a new file afresh or add the cube(s) to the end + of the file. + Only applicable when target is a filename, not a file handle. + Default is False. See Also -------- diff --git a/lib/iris/fileformats/pp_load_rules.py b/lib/iris/fileformats/pp_load_rules.py index f93f177ba8..8343afab40 100644 --- a/lib/iris/fileformats/pp_load_rules.py +++ b/lib/iris/fileformats/pp_load_rules.py @@ -354,7 +354,7 @@ def _collapse_degenerate_points_and_bounds(points, bounds=None, rtol=1.0e-7): ---------- points : :class:`numpy.ndarray` Array of points values. - bounds : :class:`numpy.ndarray`, optional, default=None + bounds : :class:`numpy.ndarray`, optional Array of bounds values. This array should have an additional vertex dimension (typically of length 2) when compared to the points array i.e. bounds.shape = points.shape + (nvertex,) @@ -463,12 +463,12 @@ def _new_coord_and_dims( Standard name of coordinate. units : str or cf_unit.Unit Units of coordinate. - lower_and_upper_bounds : pair of array-like or None, optional, default=None + lower_and_upper_bounds : pair of array-like or None, optional Corresponding bounds values (lower, upper), if any. Returns ------- - a new (coordinate, dims) pair. + A new (coordinate, dims) pair. """ bounds = lower_and_upper_bounds @@ -813,7 +813,7 @@ def _model_level_number(lblev): Returns ------- - Model level number (int). + Model level number (int). """ # See Word no. 33 (LBLEV) in section 4 of UM Model Docs (F3). diff --git a/lib/iris/fileformats/pp_save_rules.py b/lib/iris/fileformats/pp_save_rules.py index 9bcfd8d92f..20ed0bd618 100644 --- a/lib/iris/fileformats/pp_save_rules.py +++ b/lib/iris/fileformats/pp_save_rules.py @@ -489,9 +489,9 @@ def _non_std_cross_section_rules(cube, pp): Parameters ---------- - cube: + cube : The cube being saved as a series of PP fields. - pp: + pp : The current PP field having save rules applied. Returns @@ -622,9 +622,9 @@ def _lbproc_rules(cube, pp): Parameters ---------- - cube: + cube : The cube being saved as a series of PP fields. - pp: + pp : The current PP field having save rules applied. Returns diff --git a/lib/iris/fileformats/um/_fast_load_structured_fields.py b/lib/iris/fileformats/um/_fast_load_structured_fields.py index 6b09227491..976819ffd5 100644 --- a/lib/iris/fileformats/um/_fast_load_structured_fields.py +++ b/lib/iris/fileformats/um/_fast_load_structured_fields.py @@ -280,14 +280,6 @@ def group_structured_fields( **collation_kwargs : dict Additional constructor keywords for collation creation. - Implicitly, within each result group, *all* other metadata components - should be either: - - * the same for all fields, - * completely irrelevant, or - * used by a vectorised rule function (such as - :func:`iris.fileformats.pp_load_rules._convert_time_coords`). - Returns ------- Generator of 'collation_class' objects @@ -296,6 +288,14 @@ def group_structured_fields( Notes ----- + Implicitly, within each result group, *all* other metadata components + should be either: + + * the same for all fields, + * completely irrelevant, or + * used by a vectorised rule function (such as + :func:`iris.fileformats.pp_load_rules._convert_time_coords`). + The function sorts and collates on phenomenon-relevant metadata only, defined as the field components: 'lbuser[3]' (stash), 'lbproc' (statistic), 'lbuser[6]' (model). diff --git a/lib/iris/fileformats/um/_ff_replacement.py b/lib/iris/fileformats/um/_ff_replacement.py index ee40bacafa..52afe343c3 100644 --- a/lib/iris/fileformats/um/_ff_replacement.py +++ b/lib/iris/fileformats/um/_ff_replacement.py @@ -23,10 +23,10 @@ def um_to_pp(filename, read_data=False, word_depth=None): ---------- filename : str Specify the name of the FieldsFile. - read_data : bool, optional, default=read_data + read_data : bool, default=False Specify whether to read the associated PPField data within the FieldsFile. Default value is False. - word_depth : optional, default=None + word_depth : optional Returns ------- @@ -57,10 +57,10 @@ def load_cubes(filenames, callback, constraints=None, _loader_kwargs=None): ---------- filenames : list of filenames to load - callback : + callback : optional A function which can be passed on to :func:`iris.io.run_callback` - constraints : optional, default=None - _loader_kwargs : optional, default=None + constraints : optional + _loader_kwargs : optional Notes ----- diff --git a/lib/iris/fileformats/um/_optimal_array_structuring.py b/lib/iris/fileformats/um/_optimal_array_structuring.py index 64f4e8fad2..7d006ebeff 100644 --- a/lib/iris/fileformats/um/_optimal_array_structuring.py +++ b/lib/iris/fileformats/um/_optimal_array_structuring.py @@ -65,8 +65,7 @@ def optimal_array_structure(ordering_elements, actual_values_elements=None): structure. The order of this is significant, in that earlier elements get priority when associating dimensions with specific elements. - - actual_values_elements : iterable of (name, 1-d array), optional, default=None + actual_values_elements : iterable of (name, 1-d array), optional The 'real' values used to construct the result arrays, if different from 'ordering_elements'. Must contain all the same names (but not necessarily in the same order).