Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 16 additions & 6 deletions doc/whats-new.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,20 @@ v0.14.1 (unreleased)
Breaking changes
~~~~~~~~~~~~~~~~

- Minimum cftime version is now 1.0.3. By `Deepak Cherian <https://github.com/dcherian>`_.
- Broken compatibility with cftime < 1.0.3.
By `Deepak Cherian <https://github.com/dcherian>`_.

.. note::

cftime version 1.0.4 is broken (`cftime/126 <https://github.com/Unidata/cftime/issues/126>`_), use version 1.0.4.2 instead.
cftime version 1.0.4 is broken
(`cftime/126 <https://github.com/Unidata/cftime/issues/126>`_);
please use version 1.0.4.2 instead.

- All leftover support for dates from non-standard calendars through netcdftime, the
module included in versions of netCDF4 prior to 1.4 that eventually became the
cftime package, has been removed in favor of relying solely on the standalone
cftime package (:pull:`3450`). By `Spencer Clark
<https://github.com/spencerkclark>`_.
cftime package (:pull:`3450`).
By `Spencer Clark <https://github.com/spencerkclark>`_.

New Features
~~~~~~~~~~~~
Expand All @@ -52,6 +55,14 @@ New Features
for now. Enable it with :py:meth:`xarray.set_options(display_style="html")`.
(:pull:`3425`) by `Benoit Bovy <https://github.com/benbovy>`_ and
`Julia Signell <https://github.com/jsignell>`_.
- Implement `dask deterministic hashing
<https://docs.dask.org/en/latest/custom-collections.html#deterministic-hashing>`_
for xarray objects. Note that xarray objects with a dask.array backend already used
deterministic hashing in previous releases; this change implements it when whole
xarray objects are embedded in a dask graph, e.g. when :meth:`DataArray.map` is
invoked. (:issue:`3378`, :pull:`3446`)
By `Deepak Cherian <https://github.com/dcherian>`_ and
`Guido Imperiale <https://github.com/crusaderky>`_.

Bug fixes
~~~~~~~~~
Expand Down Expand Up @@ -95,8 +106,7 @@ Internal Changes

- Use Python 3.6 idioms throughout the codebase. (:pull:3419)
By `Maximilian Roos <https://github.com/max-sixty>`_
- Implement :py:func:`__dask_tokenize__` for xarray objects.
By `Deepak Cherian <https://github.com/dcherian>`_ and `Guido Imperiale <https://github.com/crusaderky>`_.


.. _whats-new.0.14.0:

Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -755,7 +755,7 @@ def reset_coords(
return dataset

def __dask_tokenize__(self):
return (DataArray, self._variable, self._coords, self._name)
return (type(self), self._variable, self._coords, self._name)

def __dask_graph__(self):
return self._to_temp_dataset().__dask_graph__()
Expand Down
2 changes: 1 addition & 1 deletion xarray/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ def load(self, **kwargs) -> "Dataset":
return self

def __dask_tokenize__(self):
return (Dataset, self._variables, self._coord_names, self._attrs)
return (type(self), self._variables, self._coord_names, self._attrs)

def __dask_graph__(self):
graphs = {k: v.__dask_graph__() for k, v in self.variables.items()}
Expand Down
7 changes: 5 additions & 2 deletions xarray/core/variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,9 @@ def compute(self, **kwargs):
return new.load(**kwargs)

def __dask_tokenize__(self):
return Variable, self._dims, self.data, self._attrs
# Use v.data, instead of v._data, in order to cope with the wrappers
# around NetCDF and the like
return type(self), self._dims, self.data, self._attrs

def __dask_graph__(self):
if isinstance(self._data, dask_array_type):
Expand Down Expand Up @@ -1967,7 +1969,8 @@ def __init__(self, dims, data, attrs=None, encoding=None, fastpath=False):
self._data = PandasIndexAdapter(self._data)

def __dask_tokenize__(self):
return (IndexVariable, self._dims, self._data.array, self._attrs)
# Don't waste time converting pd.Index to np.ndarray
return (type(self), self._dims, self._data.array, self._attrs)

def load(self):
# data is already loaded into memory for IndexVariable
Expand Down
72 changes: 50 additions & 22 deletions xarray/tests/test_dask.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
assert_frame_equal,
assert_identical,
raises_regex,
requires_scipy_or_netCDF4,
)
from .test_backends import create_tmp_file

Expand Down Expand Up @@ -1141,7 +1142,9 @@ def test_make_meta(map_ds):
assert meta.data_vars[variable].shape == (0,) * meta.data_vars[variable].ndim


@pytest.mark.parametrize("obj", [make_da(), make_ds()])
@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
@pytest.mark.parametrize(
"transform",
[
Expand All @@ -1154,41 +1157,66 @@ def test_make_meta(map_ds):
lambda x: x.rename({"cxy": "cxynew"}),
],
)
def test_normalize_token_not_identical(obj, transform):
def test_token_changes_on_transform(obj, transform):
with raise_if_dask_computes():
assert not dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert not dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)
assert dask.base.tokenize(obj) != dask.base.tokenize(transform(obj))


@pytest.mark.parametrize(
"obj", [make_da(), make_da().compute(), make_ds(), make_ds().compute()]
)
def test_token_changes_when_data_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)

@pytest.mark.parametrize("transform", [lambda x: x, lambda x: x.compute()])
def test_normalize_differently_when_data_changes(transform):
obj = transform(make_ds())
new = obj.copy(deep=True)
new["a"] *= 2
if isinstance(obj, DataArray):
obj *= 2
else:
obj["a"] *= 2
with raise_if_dask_computes():
assert not dask.base.tokenize(obj) == dask.base.tokenize(new)
t2 = dask.base.tokenize(obj)
assert t2 != t1

obj = transform(make_da())
new = obj.copy(deep=True)
new *= 2
obj.coords["ndcoord"] *= 2
with raise_if_dask_computes():
assert not dask.base.tokenize(obj) == dask.base.tokenize(new)
t3 = dask.base.tokenize(obj)
assert t3 != t2


@pytest.mark.parametrize("obj", [make_da().compute(), make_ds().compute()])
def test_token_changes_when_buffer_changes(obj):
with raise_if_dask_computes():
t1 = dask.base.tokenize(obj)

if isinstance(obj, DataArray):
obj[0, 0] = 123
else:
obj["a"][0, 0] = 123
with raise_if_dask_computes():
t2 = dask.base.tokenize(obj)
assert t2 != t1

obj.coords["ndcoord"][0] = 123
with raise_if_dask_computes():
t3 = dask.base.tokenize(obj)
assert t3 != t2


@pytest.mark.parametrize(
"transform", [lambda x: x, lambda x: x.copy(), lambda x: x.copy(deep=True)]
)
@pytest.mark.parametrize(
"obj", [make_da(), make_ds(), make_da().indexes["x"], make_ds().variables["a"]]
"transform",
[lambda x: x, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
)
def test_normalize_token_identical(obj, transform):
@pytest.mark.parametrize("obj", [make_da(), make_ds(), make_ds().variables["a"]])
def test_token_identical(obj, transform):
with raise_if_dask_computes():
assert dask.base.tokenize(obj) == dask.base.tokenize(transform(obj))
assert dask.base.tokenize(obj.compute()) == dask.base.tokenize(
transform(obj.compute())
)


def test_normalize_token_netcdf_backend(map_ds):
@requires_scipy_or_netCDF4
def test_normalize_token_with_backend(map_ds):
with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
map_ds.to_netcdf(tmp_file)
read = xr.open_dataset(tmp_file)
Expand Down
18 changes: 13 additions & 5 deletions xarray/tests/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
from xarray.core.npcompat import IS_NEP18_ACTIVE
from xarray.core.pycompat import sparse_array_type

from . import assert_equal, assert_identical
from . import assert_equal, assert_identical, requires_dask

param = pytest.param
xfail = pytest.mark.xfail
Expand All @@ -22,7 +22,6 @@
)

sparse = pytest.importorskip("sparse")
dask = pytest.importorskip("dask")


def assert_sparse_equal(a, b):
Expand Down Expand Up @@ -852,12 +851,21 @@ def test_chunk():
assert_identical(dsc, ds)


def test_normalize_token():
@requires_dask
def test_dask_token():
import dask

s = sparse.COO.from_numpy(np.array([0, 0, 1, 2]))
a = DataArray(s)
dask.base.tokenize(a)
t1 = dask.base.tokenize(a)
t2 = dask.base.tokenize(a)
t3 = dask.base.tokenize(a + 1)
assert t1 == t2
assert t3 != t2
assert isinstance(a.data, sparse.COO)

ac = a.chunk(2)
dask.base.tokenize(ac)
t4 = dask.base.tokenize(ac)
t5 = dask.base.tokenize(ac + 1)
assert t4 != t5
assert isinstance(ac.data._meta, sparse.COO)