Skip to content

Commit

Permalink
api: add shift and fd order option to all FD operators:
Browse files Browse the repository at this point in the history
  • Loading branch information
mloubout committed Nov 13, 2023
1 parent 39a8486 commit afeb337
Show file tree
Hide file tree
Showing 5 changed files with 185 additions and 73 deletions.
21 changes: 16 additions & 5 deletions devito/finite_differences/differentiable.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,21 +269,32 @@ def laplace(self):
Generates a symbolic expression for the Laplacian, the second
derivative w.r.t all spatial Dimensions.
"""
return self.laplacian()

def laplacian(self, shift=None, order=None):
order = order or self.space_order
space_dims = [d for d in self.dimensions if d.is_Space]
shift_x0 = make_shift_x0(shift, (len(space_dims),))
derivs = tuple('d%s2' % d.name for d in space_dims)
return Add(*[getattr(self, d) for d in derivs])
return Add(*[getattr(self, d)(x0=shift_x0(shift, space_dims[i], None, i),
fd_order=order)
for i, d in enumerate(derivs)])

def div(self, shift=None):
def div(self, shift=None, order=None):
space_dims = [d for d in self.dimensions if d.is_Space]
shift_x0 = make_shift_x0(shift, (len(space_dims),))
return Add(*[getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i))
order = order or self.space_order
return Add(*[getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i),
fd_order=order)
for i, d in enumerate(space_dims)])

def grad(self, shift=None):
def grad(self, shift=None, order=None):
from devito.types.tensor import VectorFunction, VectorTimeFunction
space_dims = [d for d in self.dimensions if d.is_Space]
shift_x0 = make_shift_x0(shift, (len(space_dims),))
comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i))
order = order or self.space_order
comps = [getattr(self, 'd%s' % d.name)(x0=shift_x0(shift, d, None, i),
fd_order=order)
for i, d in enumerate(space_dims)]
vec_func = VectorTimeFunction if self.is_TimeDependent else VectorFunction
return vec_func(name='grad_%s' % self.name, time_order=self.time_order,
Expand Down
16 changes: 8 additions & 8 deletions devito/finite_differences/operators.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
def div(func, shift=None):
def div(func, shift=None, order=None):
"""
Divergence of the input Function.
Expand All @@ -7,12 +7,12 @@ def div(func, shift=None):
func : Function or TensorFunction
"""
try:
return func.div(shift=shift)
return func.div(shift=shift, order=order)
except AttributeError:
return 0


def grad(func, shift=None):
def grad(func, shift=None, order=None):
"""
Gradient of the input Function.
Expand All @@ -21,12 +21,12 @@ def grad(func, shift=None):
func : Function or VectorFunction
"""
try:
return func.grad(shift=shift)
return func.grad(shift=shift, order=order)
except AttributeError:
raise AttributeError("Gradient not supported for class %s" % func.__class__)


def curl(func):
def curl(func, shift=None, order=None):
"""
Curl of the input Function.
Expand All @@ -35,12 +35,12 @@ def curl(func):
func : VectorFunction
"""
try:
return func.curl
return func.curl(shift=shift, order=order)
except AttributeError:
raise AttributeError("Curl only supported for 3D VectorFunction")


def laplace(func):
def laplace(func, shift=None, order=None):
"""
Laplacian of the input Function.
Expand All @@ -49,7 +49,7 @@ def laplace(func):
func : Function or TensorFunction
"""
try:
return func.laplace
return func.laplacian(shift=shift, order=order)
except AttributeError:
return 0

Expand Down
70 changes: 53 additions & 17 deletions devito/types/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,8 @@ def __getattr__(self, name):
-----
This method acts as a fallback for __getattribute__
"""
if name in ['_sympystr', '_pretty', '_latex']:
return super().__getattr__(self, name)
try:
return self.applyfunc(lambda x: x if x == 0 else getattr(x, name))
except:
Expand Down Expand Up @@ -190,33 +192,44 @@ def values(self):
else:
return super(TensorFunction, self).values()

def div(self, shift=None):
def div(self, shift=None, order=None):
"""
Divergence of the TensorFunction (is a VectorFunction).
"""
comps = []
func = vec_func(self)
ndim = len(self.space_dimensions)
shift_x0 = make_shift_x0(shift, (ndim, ndim))
order = order or self.space_order
for i in range(len(self.space_dimensions)):
comps.append(sum([getattr(self[j, i], 'd%s' % d.name)
(x0=shift_x0(shift, d, i, j))
(x0=shift_x0(shift, d, i, j), fd_order=order)
for j, d in enumerate(self.space_dimensions)]))
return func._new(comps)

@property
def laplace(self):
"""
Laplacian of the TensorFunction.
"""
return self.laplacian()

def laplacian(self, shift=None, order=None):
"""
Laplacian of the TensorFunction.
"""
comps = []
func = vec_func(self)
for j, d in enumerate(self.space_dimensions):
order = order or self.space_order
ndim = len(self.space_dimensions)
shift_x0 = make_shift_x0(shift, (ndim, ndim))
for j in range(ndim):
comps.append(sum([getattr(self[j, i], 'd%s2' % d.name)
(x0=shift_x0(shift, d, j, i), fd_order=order)
for i, d in enumerate(self.space_dimensions)]))
return func._new(comps)

def grad(self, shift=None):
def grad(self, shift=None, order=None):
raise AttributeError("Gradient of a second order tensor not supported")

def new_from_mat(self, mat):
Expand Down Expand Up @@ -282,49 +295,72 @@ def __str__(self):

__repr__ = __str__

def div(self, shift=None):
def div(self, shift=None, order=None):
"""
Divergence of the VectorFunction, creates the divergence Function.
"""
shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),))
return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i))
order = order or self.space_order
return sum([getattr(self[i], 'd%s' % d.name)(x0=shift_x0(shift, d, None, i),
fd_order=order)
for i, d in enumerate(self.space_dimensions)])

@property
def laplace(self):
"""
Laplacian of the VectorFunction, creates the Laplacian VectorFunction.
"""
return self.laplacian()

def laplacian(self, shift=None, order=None):
"""
Laplacian of the VectorFunction, creates the Laplacian VectorFunction.
"""
func = vec_func(self)
comps = [sum([getattr(s, 'd%s2' % d.name) for d in self.space_dimensions])
shift_x0 = make_shift_x0(shift, (len(self.space_dimensions),))
order = order or self.space_order
comps = [sum([getattr(s, 'd%s2' % d.name)(x0=shift_x0(shift, d, None, i),
fd_order=order)
for i, d in enumerate(self.space_dimensions)])
for s in self]
return func._new(comps)

@property
def curl(self):
def curl(self, shift=None, order=None):
"""
Gradient of the (3D) VectorFunction, creates the curl VectorFunction.
"""

if len(self.space_dimensions) != 3:
raise AttributeError("Curl only supported for 3D VectorFunction")
# The curl of a VectorFunction is a VectorFunction
derivs = ['d%s' % d.name for d in self.space_dimensions]
comp1 = getattr(self[2], derivs[1]) - getattr(self[1], derivs[2])
comp2 = getattr(self[0], derivs[2]) - getattr(self[2], derivs[0])
comp3 = getattr(self[1], derivs[0]) - getattr(self[0], derivs[1])

dims = self.space_dimensions
derivs = ['d%s' % d.name for d in dims]
shift_x0 = make_shift_x0(shift, (len(dims), len(dims)))
order = order or self.space_order
comp1 = (getattr(self[2], derivs[1])(x0=shift_x0(shift, dims[1], 2, 1),
fd_order=order) -
getattr(self[1], derivs[2])(x0=shift_x0(shift, dims[2], 1, 2),
fd_order=order))
comp2 = (getattr(self[0], derivs[2])(x0=shift_x0(shift, dims[2], 0, 2),
fd_order=order) -
getattr(self[2], derivs[0])(x0=shift_x0(shift, dims[0], 2, 0),
fd_order=order))
comp3 = (getattr(self[1], derivs[0])(x0=shift_x0(shift, dims[0], 1, 0),
fd_order=order) -
getattr(self[0], derivs[1])(x0=shift_x0(shift, dims[1], 0, 1),
fd_order=order))
func = vec_func(self)
return func._new(3, 1, [comp1, comp2, comp3])

def grad(self, shift=None):
def grad(self, shift=None, order=None):
"""
Gradient of the VectorFunction, creates the gradient TensorFunction.
"""
func = tens_func(self)
ndim = len(self.space_dimensions)
shift_x0 = make_shift_x0(shift, (ndim, ndim))
comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j))
order = order or self.space_order
comps = [[getattr(f, 'd%s' % d.name)(x0=shift_x0(shift, d, i, j),
fd_order=order)
for j, d in enumerate(self.space_dimensions)]
for i, f in enumerate(self)]
return func._new(comps)
Expand Down
39 changes: 27 additions & 12 deletions tests/test_derivatives.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,24 +523,27 @@ def test_fd_adjoint(self, so, ndim, derivative, adjoint_name):
def test_shifted_div(self, shift, ndim):
grid = Grid(tuple([11]*ndim))
f = Function(name="f", grid=grid, space_order=4)
df = div(f, shift=shift).evaluate
ref = 0
for i, d in enumerate(grid.dimensions):
x0 = (None if shift is None else d + shift[i] * d.spacing if
type(shift) is tuple else d + shift * d.spacing)
ref += getattr(f, 'd%s' % d.name)(x0=x0)
assert df == ref.evaluate
for order in [None, 2]:
df = div(f, shift=shift, order=order).evaluate
ref = 0
for i, d in enumerate(grid.dimensions):
x0 = (None if shift is None else d + shift[i] * d.spacing if
type(shift) is tuple else d + shift * d.spacing)
ref += getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order)
assert df == ref.evaluate

@pytest.mark.parametrize('shift, ndim', [(None, 2), (.5, 2), (.5, 3),
((.5, .5, .5), 3)])
def test_shifted_grad(self, shift, ndim):
grid = Grid(tuple([11]*ndim))
f = Function(name="f", grid=grid, space_order=4)
g = grad(f, shift=shift).evaluate
for i, (d, gi) in enumerate(zip(grid.dimensions, g)):
x0 = (None if shift is None else d + shift[i] * d.spacing if
type(shift) is tuple else d + shift * d.spacing)
assert gi == getattr(f, 'd%s' % d.name)(x0=x0).evaluate
for order in [None, 2]:
g = grad(f, shift=shift, order=order).evaluate
for i, (d, gi) in enumerate(zip(grid.dimensions, g)):
x0 = (None if shift is None else d + shift[i] * d.spacing if
type(shift) is tuple else d + shift * d.spacing)
gk = getattr(f, 'd%s' % d.name)(x0=x0, fd_order=order).evaluate
assert gi == gk

def test_substitution(self):
grid = Grid((11, 11))
Expand Down Expand Up @@ -791,6 +794,18 @@ def test_tensor_algebra(self):
assert all(isinstance(i, IndexDerivative) for i in v)
assert all(zip([Add(*i.args) for i in grad(f).evaluate], v.evaluate))

def test_laplacian_opt(self):
grid = Grid(shape=(4, 4))
f = Function(name='f', grid=grid, space_order=4)

assert f.laplacian() == f.laplace
df = f.laplacian(order=2, shift=.5)
for (v, d) in zip(df.args, grid.dimensions):
assert v.dims[0] == d
assert v.fd_order == 2
assert v.deriv_order == 2
assert d in v.x0


def bypass_uneval(expr):
unevals = expr.find(EvalDerivative)
Expand Down
Loading

0 comments on commit afeb337

Please sign in to comment.