diff --git a/devito/finite_differences/differentiable.py b/devito/finite_differences/differentiable.py index a10c3932d8..3ed317048e 100644 --- a/devito/finite_differences/differentiable.py +++ b/devito/finite_differences/differentiable.py @@ -269,21 +269,32 @@ def laplace(self): Generates a symbolic expression for the Laplacian, the second derivative w.r.t all spatial Dimensions. """ + return self.laplacian() + + def laplacian(self, shift=None, order=None): + order = order or self.space_order space_dims = [d for d in self.dimensions if d.is_Space] + shift_x0 = make_shift_x0(shift, (len(space_dims),)) derivs = tuple('d%s2' % d.name for d in space_dims) - return Add(*[getattr(self, d) for d in derivs]) + return Add(*[getattr(self, d)(x0=shift_x0(shift, space_dims[i], None, i), + fd_order=order) + for i, d in enumerate(derivs)]) - def div(self, shift=None): + def div(self, shift=None, order=None): space_dims = [d for d in self.dimensions if d.is_Space] shift_x0 = make_shift_x0(shift, (len(space_dims),)) - return Add(*[getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i)) + order = order or self.space_order + return Add(*[getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), + fd_order=order) for i, d in enumerate(space_dims)]) - def grad(self, shift=None): + def grad(self, shift=None, order=None): from devito.types.tensor import VectorFunction, VectorTimeFunction space_dims = [d for d in self.dimensions if d.is_Space] shift_x0 = make_shift_x0(shift, (len(space_dims),)) - comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i)) + order = order or self.space_order + comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), + fd_order=order) for i, d in enumerate(space_dims)] vec_func = VectorTimeFunction if self.is_TimeDependent else VectorFunction return vec_func(name='grad_%s' % self.name, time_order=self.time_order, diff --git a/devito/finite_differences/operators.py b/devito/finite_differences/operators.py index 1d0a520d1a..1bf1ad5a42 100644 --- a/devito/finite_differences/operators.py +++ b/devito/finite_differences/operators.py @@ -1,4 +1,4 @@ -def div(func, shift=None): +def div(func, shift=None, order=None): """ Divergence of the input Function. @@ -7,12 +7,12 @@ def div(func, shift=None): func : Function or TensorFunction """ try: - return func.div(shift=shift) + return func.div(shift=shift, order=order) except AttributeError: return 0 -def grad(func, shift=None): +def grad(func, shift=None, order=None): """ Gradient of the input Function. @@ -21,12 +21,12 @@ def grad(func, shift=None): func : Function or VectorFunction """ try: - return func.grad(shift=shift) + return func.grad(shift=shift, order=order) except AttributeError: raise AttributeError("Gradient not supported for class %s" % func.__class__) -def curl(func): +def curl(func, shift=None, order=None): """ Curl of the input Function. @@ -35,12 +35,12 @@ def curl(func): func : VectorFunction """ try: - return func.curl + return func.curl(shift=shift, order=order) except AttributeError: raise AttributeError("Curl only supported for 3D VectorFunction") -def laplace(func): +def laplace(func, shift=None, order=None): """ Laplacian of the input Function. @@ -49,7 +49,7 @@ def laplace(func): func : Function or TensorFunction """ try: - return func.laplace + return func.laplacian(shift=shift, order=order) except AttributeError: return 0 diff --git a/devito/types/tensor.py b/devito/types/tensor.py index 62a555ef78..3f0c3c8e4c 100644 --- a/devito/types/tensor.py +++ b/devito/types/tensor.py @@ -129,6 +129,8 @@ def __getattr__(self, name): ----- This method acts as a fallback for __getattribute__ """ + if name in ['_sympystr', '_pretty', '_latex']: + return super().__getattr__(self, name) try: return self.applyfunc(lambda x: x if x == 0 else getattr(x, name)) except: @@ -190,7 +192,7 @@ def values(self): else: return super(TensorFunction, self).values() - def div(self, shift=None): + def div(self, shift=None, order=None): """ Divergence of the TensorFunction (is a VectorFunction). """ @@ -198,25 +200,36 @@ def div(self, shift=None): func = vec_func(self) ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim)) + order = order or self.space_order for i in range(len(self.space_dimensions)): comps.append(sum([getattr(self[j, i], 'd%s' % d.name) - (x0=shift_x0(shift, d, i, j)) + (x0=shift_x0(shift, d, i, j), fd_order=order) for j, d in enumerate(self.space_dimensions)])) return func._new(comps) @property def laplace(self): + """ + Laplacian of the TensorFunction. + """ + return self.laplacian() + + def laplacian(self, shift=None, order=None): """ Laplacian of the TensorFunction. """ comps = [] func = vec_func(self) - for j, d in enumerate(self.space_dimensions): + order = order or self.space_order + ndim = len(self.space_dimensions) + shift_x0 = make_shift_x0(shift, (ndim, ndim)) + for j in range(ndim): comps.append(sum([getattr(self[j, i], 'd%s2' % d.name) + (x0=shift_x0(shift, d, j, i), fd_order=order) for i, d in enumerate(self.space_dimensions)])) return func._new(comps) - def grad(self, shift=None): + def grad(self, shift=None, order=None): raise AttributeError("Gradient of a second order tensor not supported") def new_from_mat(self, mat): @@ -282,49 +295,72 @@ def __str__(self): __repr__ = __str__ - def div(self, shift=None): + def div(self, shift=None, order=None): """ Divergence of the VectorFunction, creates the divergence Function. """ shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) - return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i)) + order = order or self.space_order + return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i), + fd_order=order) for i, d in enumerate(self.space_dimensions)]) @property def laplace(self): + """ + Laplacian of the VectorFunction, creates the Laplacian VectorFunction. + """ + return self.laplacian() + + def laplacian(self, shift=None, order=None): """ Laplacian of the VectorFunction, creates the Laplacian VectorFunction. """ func = vec_func(self) - comps = [sum([getattr(s, 'd%s2' % d.name) for d in self.space_dimensions]) + shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),)) + order = order or self.space_order + comps = [sum([getattr(s, 'd%s2' % d.name)(x0=shift_x0(shift, d, None, i), + fd_order=order) + for i, d in enumerate(self.space_dimensions)]) for s in self] return func._new(comps) - @property - def curl(self): + def curl(self, shift=None, order=None): """ Gradient of the (3D) VectorFunction, creates the curl VectorFunction. """ - if len(self.space_dimensions) != 3: raise AttributeError("Curl only supported for 3D VectorFunction") # The curl of a VectorFunction is a VectorFunction - derivs = ['d%s' % d.name for d in self.space_dimensions] - comp1 = getattr(self[2], derivs[1]) - getattr(self[1], derivs[2]) - comp2 = getattr(self[0], derivs[2]) - getattr(self[2], derivs[0]) - comp3 = getattr(self[1], derivs[0]) - getattr(self[0], derivs[1]) - + dims = self.space_dimensions + derivs = ['d%s' % d.name for d in dims] + shift_x0 = make_shift_x0(shift, (len(dims), len(dims))) + order = order or self.space_order + comp1 = (getattr(self[2], derivs[1])(x0=shift_x0(shift, dims[1], 2, 1), + fd_order=order) - + getattr(self[1], derivs[2])(x0=shift_x0(shift, dims[2], 1, 2), + fd_order=order)) + comp2 = (getattr(self[0], derivs[2])(x0=shift_x0(shift, dims[2], 0, 2), + fd_order=order) - + getattr(self[2], derivs[0])(x0=shift_x0(shift, dims[0], 2, 0), + fd_order=order)) + comp3 = (getattr(self[1], derivs[0])(x0=shift_x0(shift, dims[0], 1, 0), + fd_order=order) - + getattr(self[0], derivs[1])(x0=shift_x0(shift, dims[1], 0, 1), + fd_order=order)) func = vec_func(self) return func._new(3, 1, [comp1, comp2, comp3]) - def grad(self, shift=None): + def grad(self, shift=None, order=None): """ Gradient of the VectorFunction, creates the gradient TensorFunction. """ func = tens_func(self) ndim = len(self.space_dimensions) shift_x0 = make_shift_x0(shift, (ndim, ndim)) - comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j)) + order = order or self.space_order + comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j), + fd_order=order) for j, d in enumerate(self.space_dimensions)] for i, f in enumerate(self)] return func._new(comps) diff --git a/tests/test_derivatives.py b/tests/test_derivatives.py index f1c1951c97..52c84956be 100644 --- a/tests/test_derivatives.py +++ b/tests/test_derivatives.py @@ -523,24 +523,27 @@ def test_fd_adjoint(self, so, ndim, derivative, adjoint_name): def test_shifted_div(self, shift, ndim): grid = Grid(tuple([11]*ndim)) f = Function(name="f", grid=grid, space_order=4) - df = div(f, shift=shift).evaluate - ref = 0 - for i, d in enumerate(grid.dimensions): - x0 = (None if shift is None else d + shift[i] * d.spacing if - type(shift) is tuple else d + shift * d.spacing) - ref += getattr(f, 'd%s' % d.name)(x0=x0) - assert df == ref.evaluate + for order in [None, 2]: + df = div(f, shift=shift, order=order).evaluate + ref = 0 + for i, d in enumerate(grid.dimensions): + x0 = (None if shift is None else d + shift[i] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + ref += getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order) + assert df == ref.evaluate @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), ((.5, .5, .5), 3)]) def test_shifted_grad(self, shift, ndim): grid = Grid(tuple([11]*ndim)) f = Function(name="f", grid=grid, space_order=4) - g = grad(f, shift=shift).evaluate - for i, (d, gi) in enumerate(zip(grid.dimensions, g)): - x0 = (None if shift is None else d + shift[i] * d.spacing if - type(shift) is tuple else d + shift * d.spacing) - assert gi == getattr(f, 'd%s' % d.name)(x0=x0).evaluate + for order in [None, 2]: + g = grad(f, shift=shift, order=order).evaluate + for i, (d, gi) in enumerate(zip(grid.dimensions, g)): + x0 = (None if shift is None else d + shift[i] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + gk = getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order).evaluate + assert gi == gk def test_substitution(self): grid = Grid((11, 11)) @@ -791,6 +794,18 @@ def test_tensor_algebra(self): assert all(isinstance(i, IndexDerivative) for i in v) assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate)) + def test_laplacian_opt(self): + grid = Grid(shape=(4, 4)) + f = Function(name='f', grid=grid, space_order=4) + + assert f.laplacian() == f.laplace + df = f.laplacian(order=2, shift=.5) + for (v, d) in zip(df.args, grid.dimensions): + assert v.dims[0] == d + assert v.fd_order == 2 + assert v.deriv_order == 2 + assert d in v.x0 + def bypass_uneval(expr): unevals = expr.find(EvalDerivative) diff --git a/tests/test_tensors.py b/tests/test_tensors.py index 690432b4cd..866a41739e 100644 --- a/tests/test_tensors.py +++ b/tests/test_tensors.py @@ -4,7 +4,8 @@ import pytest from devito import VectorFunction, TensorFunction, VectorTimeFunction, TensorTimeFunction -from devito import Grid, Function, TimeFunction, Dimension, Eq, div, grad +from devito import Grid, Function, TimeFunction, Dimension, Eq, div, grad, curl +from devito.symbolics import retrieve_derivatives from devito.types import NODE @@ -286,33 +287,34 @@ def test_partial_devito_tens(func1): def test_shifted_grad_of_vector(shift, ndim): grid = Grid(tuple([11]*ndim)) f = VectorFunction(name="f", grid=grid, space_order=4) - gf = grad(f, shift=shift).evaluate + for order in [None, 2]: + gf = grad(f, shift=shift, order=order).evaluate - ref = [] - for i in range(len(grid.dimensions)): - for j, d in enumerate(grid.dimensions): - x0 = (None if shift is None else d + shift[i][j] * d.spacing if - type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i], 'd%s' % d.name)(x0=x0) - ref.append(ge.evaluate) + ref = [] + for i in range(len(grid.dimensions)): + for j, d in enumerate(grid.dimensions): + x0 = (None if shift is None else d + shift[i][j] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + ge = getattr(f[i], 'd%s' % d.name)(x0=x0, fd_order=order) + ref.append(ge.evaluate) - for i, d in enumerate(gf): - assert d == ref[i] + for i, d in enumerate(gf): + assert d == ref[i] @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), ((.5, .5, .5), 3)]) def test_shifted_div_of_vector(shift, ndim): grid = Grid(tuple([11]*ndim)) v = VectorFunction(name="f", grid=grid, space_order=4) - df = div(v, shift=shift).evaluate - ref = 0 - - for i, d in enumerate(grid.dimensions): - x0 = (None if shift is None else d + shift[i] * d.spacing if - type(shift) is tuple else d + shift * d.spacing) - ref += getattr(v[i], 'd%s' % d.name)(x0=x0) + for order in [None, 2]: + df = div(v, shift=shift, order=order).evaluate + ref = 0 + for i, d in enumerate(grid.dimensions): + x0 = (None if shift is None else d + shift[i] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + ref += getattr(v[i], 'd%s' % d.name)(x0=x0, fd_order=order) - assert df == ref.evaluate + assert df == ref.evaluate @pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), @@ -320,17 +322,65 @@ def test_shifted_div_of_vector(shift, ndim): def test_shifted_div_of_tensor(shift, ndim): grid = Grid(tuple([11]*ndim)) f = TensorFunction(name="f", grid=grid, space_order=4) - df = div(f, shift=shift).evaluate + for order in [None, 2]: + df = div(f, shift=shift, order=order).evaluate - ref = [] - for i, a in enumerate(grid.dimensions): - elems = [] - for j, d in reversed(list(enumerate(grid.dimensions))): - x0 = (None if shift is None else d + shift[i][j] * d.spacing if - type(shift) is tuple else d + shift * d.spacing) - ge = getattr(f[i, j], 'd%s' % d.name)(x0=x0) - elems.append(ge.evaluate) - ref.append(sum(elems)) + ref = [] + for i, a in enumerate(grid.dimensions): + elems = [] + for j, d in reversed(list(enumerate(grid.dimensions))): + x0 = (None if shift is None else d + shift[i][j] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + ge = getattr(f[i, j], 'd%s' % d.name)(x0=x0, fd_order=order) + elems.append(ge.evaluate) + ref.append(sum(elems)) + + for i, d in enumerate(df): + assert d == ref[i] + + +@pytest.mark.parametrize('shift, ndim', [(None, 3), (.5, 3), + (tuple([tuple([.5]*3)]*3), 3)]) +def test_shifted_curl_of_vector(shift, ndim): + grid = Grid(tuple([11]*ndim)) + f = VectorFunction(name="f", grid=grid, space_order=4) + for order in [None, 2]: + df = curl(f, shift=shift, order=order) + drvs = retrieve_derivatives(df) + dorder = order or 4 + for drv in drvs: + assert drv.expr in f + assert drv.fd_order == dorder + if shift is None: + assert drv.x0 == {} + else: + assert drv.dims[0] in drv.x0 - for i, d in enumerate(df): - assert d == ref[i] + +@pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), ((.5, .5, .5), 3)]) +def test_shifted_lap_of_vector(shift, ndim): + grid = Grid(tuple([11]*ndim)) + v = VectorFunction(name="f", grid=grid, space_order=4) + assert v.laplacian() == v.laplace + for order in [None, 2]: + df = v.laplacian(shift=shift, order=order) + for (vi, dfvi) in zip(v, df): + ref = vi.laplacian(shift=shift, order=order) + assert dfvi == ref + + +@pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3), + (tuple([tuple([.5]*3)]*3), 3)]) +def test_shifted_lap_of_tensor(shift, ndim): + grid = Grid(tuple([11]*ndim)) + v = TensorFunction(name="f", grid=grid, space_order=4) + assert v.laplacian() == v.laplace + for order in [None, 2]: + df = v.laplacian(shift=shift, order=order) + for j in range(ndim): + ref = 0 + for i, d in enumerate(v.space_dimensions): + x0 = (None if shift is None else d + shift[i][j] * d.spacing if + type(shift) is tuple else d + shift * d.spacing) + ref += getattr(v[j, i], 'd%s2' % d.name)(x0=x0, fd_order=order) + assert df[j] == ref