Skip to content

Commit

Permalink
CI: add large radius precomputed interp test
Browse files Browse the repository at this point in the history
  • Loading branch information
mloubout committed Jul 27, 2023
1 parent cc3008e commit 3d51c78
Show file tree
Hide file tree
Showing 10 changed files with 56 additions and 40 deletions.
2 changes: 1 addition & 1 deletion devito/ir/clusters/analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def _callback(self, clusters, d, prefix):
# False alarm, the dependence is over a locally-defined symbol
continue

if dep.is_reduction and not (d.is_Custom and d.is_Derived):
if dep.is_reduction:
is_parallel_atomic = True
continue

Expand Down
6 changes: 3 additions & 3 deletions devito/ir/support/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -660,9 +660,9 @@ def is_const(self, dim):
"""
True if a constant dependence, that is no Dimensions involved, False otherwise.
"""
return (self.source.aindices[dim] is None and
self.sink.aindices[dim] is None and
self.distance_mapper[dim] == 0)
return (self.source.aindices.get(dim, None) is None and
self.sink.aindices.get(dim, None) is None and
self.distance_mapper.get(dim, 0) == 0)

@memoized_meth
def is_carried(self, dim=None):
Expand Down
13 changes: 6 additions & 7 deletions devito/operations/interpolators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
import sympy
from cached_property import cached_property

from devito.finite_differences.differentiable import Mul
from devito.finite_differences.elementary import floor
from devito.symbolics import retrieve_function_carriers, INT
from devito.tools import as_tuple, flatten, prod
from devito.tools import as_tuple, flatten
from devito.types import (ConditionalDimension, Eq, Inc, Evaluable, Symbol,
CustomDimension)
from devito.types.utils import DimensionTuple
Expand Down Expand Up @@ -328,11 +329,9 @@ class LinearInterpolator(WeightedInterpolator):
"""
@property
def _weights(self):
# (1 - p) * (1 - rd) + rd * p
# simplified for better arithmetic
c = [1 - p + rd * (2*p - 1)
for (p, d, rd) in zip(self._point_symbols, self._gdim, self._rdim)]
return prod(c)
c = [(1 - p) * (1 - r) + p * r
for (p, d, r) in zip(self._point_symbols, self._gdim, self._rdim)]
return Mul(*c)

@cached_property
def _point_symbols(self):
Expand Down Expand Up @@ -375,5 +374,5 @@ def interpolation_coeffs(self):
@property
def _weights(self):
ddim, cdim = self.interpolation_coeffs.dimensions[1:]
return prod([self.interpolation_coeffs.subs({ddim: ri, cdim: rd-rd.symbolic_min})
return Mul(*[self.interpolation_coeffs.subs({ddim: ri, cdim: rd-rd.symbolic_min})
for (ri, rd) in enumerate(self._rdim)])
6 changes: 3 additions & 3 deletions devito/passes/iet/mpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,9 @@ def _drop_halospots(iet):

# If a HaloSpot is outside any iteration it is not needed
for iters, halo_spots in MapNodes(Iteration, HaloSpot, 'groupby').visit(iet).items():
if not iters and halo_spots:
for hs in halo_spots:
for f in hs.fmapper:
for hs in halo_spots:
for f, v in hs.fmapper.items():
if not iters and v.loc_indices:
mapper[hs].add(f)

# Transform the IET introducing the "reduced" HaloSpots
Expand Down
6 changes: 5 additions & 1 deletion devito/passes/iet/parpragma.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,6 +285,10 @@ def _select_candidates(self, candidates):
if i.is_Vectorized:
break

# Also, we do not want to collapse small atomic reductions
if i.is_ParallelAtomic and i.dim.is_Custom:
break

# Would there be enough work per parallel iteration?
nested = candidates[n+1:]
if nested:
Expand Down Expand Up @@ -422,7 +426,7 @@ def _make_guard(self, parregion):

def _make_nested_partree(self, partree):
# Apply heuristic
if self.nhyperthreads <= self.nested:
if self.nhyperthreads <= self.nested or partree.root.is_ParallelAtomic:
return partree

# Note: there might be multiple sub-trees amenable to nested parallelism,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_buffering.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ def test_over_injection():

# Check generated code
assert len(retrieve_iteration_tree(op1)) == \
7 + bool(configuration['language'] != 'C')
8 + bool(configuration['language'] != 'C')
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1

Expand Down
5 changes: 2 additions & 3 deletions tests/test_dimension.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
Dimension, DefaultDimension, SubDimension, switchconfig,
SubDomain, Lt, Le, Gt, Ge, Ne, Buffer, sin, SpaceDimension,
CustomDimension, dimensions, configuration)
from devito.arch.compiler import IntelCompiler, OneapiCompiler
from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes,
FindSymbols, retrieve_iteration_tree)
from devito.symbolics import indexify, retrieve_functions, IntDiv, INT
Expand Down Expand Up @@ -1417,8 +1416,7 @@ def test_affiness(self):
iterations = [i for i in FindNodes(Iteration).visit(op) if i.dim is not time]
assert all(i.is_Affine for i in iterations)

@switchconfig(condition=isinstance(configuration['compiler'],
(IntelCompiler, OneapiCompiler)), safe_math=True)
@switchconfig(safe_math=True)
def test_sparse_time_function(self):
nt = 20

Expand Down Expand Up @@ -1452,6 +1450,7 @@ def test_sparse_time_function(self):
# Note the endpoint of the range is 12 because we inject at p.forward
for i in range(1, 12):
assert p.data[i].sum() == i - 1
print(p.data[i, 10, 10, 10])
assert p.data[i, 10, 10, 10] == i - 1
for i in range(12, 20):
assert np.all(p.data[i] == 0)
Expand Down
8 changes: 4 additions & 4 deletions tests/test_dse.py
Original file line number Diff line number Diff line change
Expand Up @@ -2629,7 +2629,7 @@ def test_issue_2163(self):

def test_dtype_aliases(self):
a = np.arange(64).reshape((8, 8))
grid = Grid(shape=a.shape, extent=(8, 8))
grid = Grid(shape=a.shape, extent=(7, 7))

so = 2
f = Function(name='f', grid=grid, space_order=so, dtype=np.int32)
Expand All @@ -2640,7 +2640,7 @@ def test_dtype_aliases(self):
op.apply()

assert FindNodes(Expression).visit(op)[0].dtype == np.float32
assert np.all(fo.data[:-1, :-1] == 6)
assert np.all(fo.data[:-1, :-1] == 8)


class TestIsoAcoustic(object):
Expand Down Expand Up @@ -2685,13 +2685,13 @@ def test_fullopt(self):
bns, _ = assert_blocking(op1, {'x0_blk0'}) # due to loop blocking

assert summary0[('section0', None)].ops == 50
assert summary0[('section1', None)].ops == 50
assert summary0[('section1', None)].ops == 44
assert np.isclose(summary0[('section0', None)].oi, 2.851, atol=0.001)

assert summary1[('section0', None)].ops == 9
assert summary1[('section1', None)].ops == 9
assert summary1[('section2', None)].ops == 31
assert summary1[('section3', None)].ops == 32
assert summary1[('section3', None)].ops == 26
assert np.isclose(summary1[('section2', None)].oi, 1.767, atol=0.001)

assert np.allclose(u0.data, u1.data, atol=10e-5)
Expand Down
1 change: 1 addition & 0 deletions tests/test_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
class TestGradient(object):

@skipif(['chkpnt', 'cpu64-icc'])
@switchconfig(safe_math=True)
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('opt', [('advanced', {'openmp': True}),
('noop', {'openmp': True})])
Expand Down
47 changes: 30 additions & 17 deletions tests/test_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,25 +84,31 @@ def custom_points(grid, ranges, npoints, name='points'):
return points


def precompute_linear_interpolation(points, grid, origin):
""" Sample precompute function that, given point and grid information
precomputes gridpoints and interpolation coefficients according to a linear
scheme to be used in PrecomputedSparseFunction.
def precompute_linear_interpolation(points, grid, origin, r=2):
"""
Sample precompute function that, given point and grid information
precomputes gridpoints and interpolation coefficients according to a linear
scheme to be used in PrecomputedSparseFunction.
Allow larger radius with zero weights for testing.
"""
gridpoints = [tuple(floor((point[i]-origin[i])/grid.spacing[i])
for i in range(len(point))) for point in points]

interpolation_coeffs = np.zeros((len(points), 2, 2))
interpolation_coeffs = np.zeros((len(points), grid.dim, r))
rs = r // 2 - 1
for i, point in enumerate(points):
for d in range(grid.dim):
interpolation_coeffs[i, d, 0] = ((gridpoints[i][d] + 1)*grid.spacing[d] -
point[d])/grid.spacing[d]
interpolation_coeffs[i, d, 1] = (point[d]-gridpoints[i][d]*grid.spacing[d])\
gd = gridpoints[i][d]
interpolation_coeffs[i, d, rs] = ((gd + 1)*grid.spacing[d] -
point[d])/grid.spacing[d]
interpolation_coeffs[i, d, rs+1] = (point[d]-gd*grid.spacing[d])\
/ grid.spacing[d]
return gridpoints, interpolation_coeffs


def test_precomputed_interpolation():
@pytest.mark.parametrize('r', [2, 4, 6])
def test_precomputed_interpolation(r):
""" Test interpolation with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients
"""
Expand All @@ -123,7 +129,8 @@ def init(data):
m = Function(name='m', grid=grid, initializer=init, space_order=0)

gridpoints, interpolation_coeffs = precompute_linear_interpolation(points,
grid, origin)
grid, origin,
r=r)

sf = PrecomputedSparseFunction(name='s', grid=grid, r=r, npoint=len(points),
gridpoints=gridpoints,
Expand All @@ -136,7 +143,8 @@ def init(data):
assert(all(np.isclose(sf.data, expected_values, rtol=1e-6)))


def test_precomputed_interpolation_time():
@pytest.mark.parametrize('r', [2, 4, 6])
def test_precomputed_interpolation_time(r):
""" Test interpolation with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients, but this time
with a TimeFunction
Expand All @@ -154,7 +162,8 @@ def test_precomputed_interpolation_time():
u.data[it, :] = it

gridpoints, interpolation_coeffs = precompute_linear_interpolation(points,
grid, origin)
grid, origin,
r=r)

sf = PrecomputedSparseTimeFunction(name='s', grid=grid, r=r, npoint=len(points),
nt=5, gridpoints=gridpoints,
Expand All @@ -171,7 +180,8 @@ def test_precomputed_interpolation_time():
assert np.allclose(sf.data[it, :], it)


def test_precomputed_injection():
@pytest.mark.parametrize('r', [2, 4, 6])
def test_precomputed_injection(r):
"""Test injection with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients
"""
Expand All @@ -188,7 +198,8 @@ def test_precomputed_injection():
m.data[:] = 0.

gridpoints, interpolation_coeffs = precompute_linear_interpolation(coords,
m.grid, origin)
m.grid, origin,
r=r)

sf = PrecomputedSparseFunction(name='s', grid=m.grid, r=r, npoint=len(coords),
gridpoints=gridpoints,
Expand All @@ -206,7 +217,8 @@ def test_precomputed_injection():
assert np.allclose(m.data[indices], result, rtol=1.e-5)


def test_precomputed_injection_time():
@pytest.mark.parametrize('r', [2, 4, 6])
def test_precomputed_injection_time(r):
"""Test injection with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients
"""
Expand All @@ -224,7 +236,8 @@ def test_precomputed_injection_time():
m.data[:] = 0.

gridpoints, interpolation_coeffs = precompute_linear_interpolation(coords,
m.grid, origin)
m.grid, origin,
r=r)

sf = PrecomputedSparseTimeFunction(name='s', grid=m.grid, r=r, npoint=len(coords),
gridpoints=gridpoints, nt=nt,
Expand Down Expand Up @@ -718,7 +731,7 @@ class SparseFirst(SparseFunction):
# No time dependence so need the implicit dim
rec = s.interpolate(expr=s+fs, implicit_dims=grid.stepping_dim)
op = Operator(eqs + rec)
print(op)

op(time_M=10)
expected = 10*11/2 # n (n+1)/2
assert np.allclose(s.data, expected)

0 comments on commit 3d51c78

Please sign in to comment.