Skip to content

Commit

Permalink
Update boudary condition
Browse files Browse the repository at this point in the history
  • Loading branch information
utkinis committed Oct 18, 2023
1 parent 237ce07 commit fba369b
Show file tree
Hide file tree
Showing 8 changed files with 119 additions and 64 deletions.
23 changes: 18 additions & 5 deletions src/BoundaryConditions/BoundaryConditions.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
module BoundaryConditions

export FieldBoundaryConditions
export FieldBoundaryCondition, BoundaryConditionsBatch
export apply_boundary_conditions!, apply_all_boundary_conditions!

export DirichletBC, HalfCell, FullCell
Expand All @@ -17,14 +17,27 @@ using FastIce.Architectures
using KernelAbstractions
using Adapt

abstract type FieldBoundaryCondition end

"Overload this method for a custom boundary condition type."
apply_boundary_conditions!(::Val{S}, ::Val{D}, arch::Architecture, grid::CartesianGrid, bc::Nothing) where {S,D} = nothing

include("field_boundary_conditions.jl")
include("utils.jl")
include("boundary_function.jl")
include("dirichlet_bc.jl")
include("field_boundary_conditions.jl")
include("hide_boundaries.jl")

struct BoundaryConditionsBatch{F,BC}
fields::F
conditions::BC
end

@inline function apply_boundary_conditions!(::Val{S}, ::Val{D},
arch::Architecture,
grid::CartesianGrid,
batch::BoundaryConditionsBatch) where {S,D}
apply_boundary_conditions!(Val(S), Val(D), arch, grid, batch.fields, batch.conditions)
end

apply_boundary_conditions!(side, val, arch, grid, ::Nothing) = nothing
apply_boundary_conditions!(side, val, arch, grid, fields, ::Nothing) = nothing

end
2 changes: 1 addition & 1 deletion src/BoundaryConditions/dirichlet_bc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ struct HalfCell end
struct FullCell end

# First-kind Dirichlet boundary conditon parametrised by the gradient reconstruction kind (can be HalfCell or FullCell)
struct DirichletBC{Gradient,T}
struct DirichletBC{Gradient,T} <: FieldBoundaryCondition
condition::T
DirichletBC{Gradient}(condition::T) where {Gradient,T} = new{Gradient,T}(condition)
end
Expand Down
24 changes: 10 additions & 14 deletions src/BoundaryConditions/field_boundary_conditions.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,12 @@
struct FieldBoundaryConditions{F<:Tuple,B<:Tuple}
fields::F
conditions::B
end

function apply_boundary_conditions!(::Val{S}, ::Val{D}, arch::Architecture, grid::CartesianGrid,
bc::FieldBoundaryConditions; async=true) where {S,D}
_validate_boundary_conditions(bc, D, S)
sizes = ntuple(ifield -> remove_dim(Val(D), size(bc.fields[ifield])), Val(length(bc.fields)))
function apply_boundary_conditions!(::Val{S}, ::Val{D},
arch::Architecture,
grid::CartesianGrid,
fields::NTuple{N,Field},
conditions::NTuple{N,FieldBoundaryCondition}; async=true) where {S,D,N}
_validate_fields(fields, D, S)
sizes = ntuple(ifield -> remove_dim(Val(D), size(fields[ifield])), Val(length(fields)))
worksize = remove_dim(Val(D), size(grid, Vertex()))
_apply_boundary_conditions!(backend(arch), 256, worksize)(Val(S), Val(D), grid, sizes, bc.fields, bc.conditions)
_apply_boundary_conditions!(backend(arch), 256, worksize)(Val(S), Val(D), grid, sizes, fields, conditions)
async || KernelAbstractions.synchronize(backend(arch))
return
end
Expand All @@ -26,10 +24,8 @@ end
end
end

@inline _apply_field_boundary_condition!(side, dim, grid, f, loc, Ibc, ::Nothing) = nothing

function _validate_boundary_conditions(bc::FieldBoundaryConditions, dim, side)
for f in bc.fields
function _validate_fields(fields::NTuple{N,Field}, dim, side) where {N}
for f in fields
if (location(f, Val(dim)) == Center()) && (halo(f, dim, side) < 1)
error("to apply boundary conditions, halo width must be at least 1")
end
Expand Down
4 changes: 2 additions & 2 deletions src/BoundaryConditions/hide_boundaries.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ function hide(fun::F, hb::HideBoundaries{N}, arch::Architecture, grid::Cartesian
ntuple(Val(2)) do side
pipe = hb.pipelines[dim][side]
range = outer_ranges[dim][side]
bc = boundary_conditions[dim][side]
batch = boundary_conditions[dim][side]
# execute outer range and boundary conditions asynchronously
put!(pipe) do
fun(range)
apply_boundary_conditions!(Val(side), Val(dim), arch, grid, bc)
apply_boundary_conditions!(Val(side), Val(dim), arch, grid, batch)
Architectures.synchronize(arch)
end
end
Expand Down
4 changes: 4 additions & 0 deletions src/Distributed/Distributed.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module Distributed
using FastIce.Grids
using FastIce.Fields
using FastIce.Architectures
using FastIce.BoundaryConditions
import FastIce.BoundaryConditions: apply_boundary_conditions!

using MPI
Expand All @@ -13,6 +14,8 @@ export global_rank, shared_rank, node_name, cartesian_communicator, shared_commu
export dimensions, global_size, node_size
export global_grid_size, local_grid
export neighbors, neighbor
export override_boundary_conditions
export gather!

export DistributedBoundaryConditions

Expand All @@ -26,5 +29,6 @@ end

include("topology.jl")
include("boundary_conditions.jl")
include("gather.jl")

end
71 changes: 41 additions & 30 deletions src/Distributed/boundary_conditions.jl
Original file line number Diff line number Diff line change
@@ -1,58 +1,52 @@
struct DistributedBoundaryConditions{F,B}
fields::F
exchange_infos::B

function DistributedBoundaryConditions(::Val{S}, ::Val{D}, fields::NTuple{N}) where {S,D,N}
exchange_infos = ntuple(Val(N)) do idx
send_view = get_send_view(Val(S), Val(D), fields[idx])
recv_view = get_recv_view(Val(S), Val(D), fields[idx])
send_buffer = similar(parent(send_view), eltype(send_view), size(send_view))
recv_buffer = similar(parent(recv_view), eltype(recv_view), size(recv_view))
ExchangeInfo(send_buffer, recv_buffer)
end
return new{typeof(fields),typeof(exchange_infos)}(fields, exchange_infos)
end
end

mutable struct ExchangeInfo{SB,RB}
send_buffer::SB
recv_buffer::RB
send_request::MPI.Request
recv_request::MPI.Request
ExchangeInfo(send_buf, recv_buf) = new{typeof(send_buf),typeof(recv_buf)}(send_buf, recv_buf, MPI.REQUEST_NULL, MPI.REQUEST_NULL)
end

ExchangeInfo(send_buf, recv_buf) = ExchangeInfo(send_buf, recv_buf, MPI.REQUEST_NULL, MPI.REQUEST_NULL)
function ExchangeInfo(::Val{S}, ::Val{D}, field::Field) where {S,D}
send_view = get_send_view(Val(S), Val(D), field)
recv_view = get_recv_view(Val(S), Val(D), field)
send_buffer = similar(parent(send_view), eltype(send_view), size(send_view))
recv_buffer = similar(parent(recv_view), eltype(recv_view), size(recv_view))
return ExchangeInfo(send_buffer, recv_buffer)
end

function apply_boundary_conditions!(::Val{S}, ::Val{D}, arch::Architecture, grid::CartesianGrid,
bc::DistributedBoundaryConditions; async=true) where {S,D}
function apply_boundary_conditions!(::Val{S}, ::Val{D},
arch::Architecture,
grid::CartesianGrid,
fields::NTuple{N,Field},
exchange_infos::NTuple{N,ExchangeInfo}; async=true) where {S,D,N}
comm = cartesian_communicator(details(arch))
nbrank = neighbor(details(arch), D, S)

# initiate non-blocking MPI recieve and device-to-device copy to the send buffer
for idx in eachindex(bc.fields)
info = bc.exchange_infos[idx]
for idx in eachindex(fields)
info = exchange_infos[idx]
info.recv_request = MPI.Irecv!(info.recv_buffer, comm; source=nbrank)
send_view = get_send_view(Val(S), Val(D), bc.fields[idx])
send_view = get_send_view(Val(S), Val(D), fields[idx])
copyto!(info.send_buffer, send_view)
end
Architectures.synchronize(arch)

# initiate non-blocking MPI send
for idx in eachindex(bc.fields)
info = bc.exchange_infos[idx]
for idx in eachindex(fields)
info = exchange_infos[idx]
info.send_request = MPI.Isend(info.send_buffer, comm; dest=nbrank)
end

recv_ready = BitVector(false for _ in eachindex(bc.exchange_infos))
send_ready = BitVector(false for _ in eachindex(bc.exchange_infos))
recv_ready = BitVector(false for _ in eachindex(exchange_infos))
send_ready = BitVector(false for _ in eachindex(exchange_infos))

# test send and receive requests, initiating device-to-device copy
# to the receive buffer if the receive is complete
while !(all(recv_ready) && all(send_ready))
for idx in eachindex(bc.fields)
info = bc.exchange_infos[idx]
for idx in eachindex(fields)
info = exchange_infos[idx]
if MPI.Test(info.recv_request) && !recv_ready[idx]
recv_view = get_recv_view(Val(S), Val(D), bc.fields[idx])
recv_view = get_recv_view(Val(S), Val(D), fields[idx])
copyto!(recv_view, info.recv_buffer)
recv_ready[idx] = true
end
Expand All @@ -69,7 +63,10 @@ _overlap(::Vertex) = 1
_overlap(::Center) = 0

get_recv_view(side::Val{S}, dim::Val{D}, f::Field) where {S,D} = get_recv_view(side, dim, parent(f), halo(f, D, S))
get_send_view(side::Val{S}, dim::Val{D}, f::Field) where {S,D} = get_send_view(side, dim, parent(f), halo(f, D, S), _overlap(location(f, dim)))

function get_send_view(side::Val{S}, dim::Val{D}, f::Field) where {S,D}
get_send_view(side, dim, parent(f), halo(f, D, S), _overlap(location(f, dim)))
end

function get_recv_view(::Val{1}, ::Val{D}, array::AbstractArray, halo_width::Integer) where {D}
recv_range = Base.OneTo(halo_width)
Expand All @@ -94,3 +91,17 @@ function get_send_view(::Val{2}, ::Val{D}, array::AbstractArray, halo_width::Int
indices = ntuple(I -> I == D ? send_range : Colon(), Val(ndims(array)))
return view(array, indices...)
end

function override_boundary_conditions(arch::Architecture{DistributedMPI},
batches::NTuple{N, NTuple{2, BoundaryConditionsBatch}}; exchange=false) where {N}
return ntuple(Val(N)) do D
ntuple(Val(2)) do S
batch = batches[D][S]
if neighbor(details(arch), D, S) != MPI.PROC_NULL
exchange ? BoundaryConditionsBatch(batch.fields, ExchangeInfo.(Val(S), Val(D), batch.fields)) : nothing
else
batch
end
end
end
end
31 changes: 31 additions & 0 deletions src/Distributed/gather.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
function gather!(dst::Union{AbstractArray,Nothing}, src::AbstractArray, comm::MPI.Comm; root=0)
dims, _, _ = MPI.Cart_get(comm)
dims = Tuple(dims)
if MPI.Comm_rank(comm) == root
# make subtype for gather
subtype = MPI.Types.create_subarray(size(dst), size(src), (0, 0), MPI.Datatype(eltype(dst)))
subtype = MPI.Types.create_resized(subtype, 0, size(src, 1) * Base.elsize(dst))
MPI.Types.commit!(subtype)
# make VBuffer for collective communication
counts = fill(Cint(1), dims)
displs = zeros(Cint, dims)
d = zero(Cint)
for j in 1:dims[2]
for i in 1:dims[1]
displs[i, j] = d
d += 1
end
d += (size(src, 2) - 1) * dims[2]
end
# transpose displs as cartesian communicator is row-major
recvbuf = MPI.VBuffer(dst, vec(counts), vec(displs'), subtype)
MPI.Gatherv!(src, recvbuf, comm; root)
else
MPI.Gatherv!(src, nothing, comm; root)
end
return
end

function gather!(arch::Architecture{DistributedMPI}, dst, src::Field; kwargs...)
gather!(dst, interior(src), cartesian_communicator(details(arch)); kwargs...)
end
24 changes: 12 additions & 12 deletions test/test_bc.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,26 @@ using FastIce.BoundaryConditions
@testset "value" begin
@testset "x-dim" begin
data(field) .= 0.0
west_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(1.0),))
east_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(0.5),))
west_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(1.0),))
east_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(0.5),))
apply_boundary_conditions!(Val(1), Val(1), arch, grid, west_bc; async=false)
apply_boundary_conditions!(Val(2), Val(1), arch, grid, east_bc; async=false)
@test all((parent(field)[1, 2:ny+1, 2:nz+1] .+ parent(field)[2, 2:ny+1, 2:nz+1]) ./ 2 .≈ west_bc.conditions[1].condition)
@test all(parent(field)[nx+2, 2:ny+1, 2:nz+1] .≈ east_bc.conditions[1].condition)
end
@testset "y-dim" begin
data(field) .= 0.0
south_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(1.0),))
north_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(0.5),))
south_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(1.0),))
north_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(0.5),))
apply_boundary_conditions!(Val(1), Val(2), arch, grid, south_bc; async=false)
apply_boundary_conditions!(Val(2), Val(2), arch, grid, north_bc; async=false)
@test all((parent(field)[2:nx+1, 1, 2:nz+1] .+ parent(field)[2:nx+1, 2, 2:nz+1]) ./ 2 .≈ south_bc.conditions[1].condition)
@test all(parent(field)[2:nx+1, ny+2, 2:nz+1] .≈ north_bc.conditions[1].condition)
end
@testset "z-dim" begin
data(field) .= 0.0
bottom_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(1.0),))
top_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(0.5),))
bottom_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(1.0),))
top_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(0.5),))
apply_boundary_conditions!(Val(1), Val(3), arch, grid, bottom_bc; async=false)
apply_boundary_conditions!(Val(2), Val(3), arch, grid, top_bc; async=false)
@test all((parent(field)[2:nx+1, 2:ny+1, 1] .+ parent(field)[2:nx+1, 2:ny+1, 2]) ./ 2 .≈ bottom_bc.conditions[1].condition)
Expand All @@ -47,8 +47,8 @@ using FastIce.BoundaryConditions
bc_array_east = KernelAbstractions.allocate(backend, Float64, (size(grid, 2), size(grid, 3)))
bc_array_west .= 1.0
bc_array_east .= 0.5
west_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(bc_array_west),))
east_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(bc_array_east),))
west_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(bc_array_west),))
east_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(bc_array_east),))
apply_boundary_conditions!(Val(1), Val(1), arch, grid, west_bc; async=false)
apply_boundary_conditions!(Val(2), Val(1), arch, grid, east_bc; async=false)
@test all((parent(field)[1, 2:ny+1, 2:nz+1] .+ parent(field)[2, 2:ny+1, 2:nz+1]) ./ 2 .≈ west_bc.conditions[1].condition)
Expand All @@ -60,8 +60,8 @@ using FastIce.BoundaryConditions
bc_array_north = KernelAbstractions.allocate(backend, Float64, (size(grid, 2), size(grid, 3)))
bc_array_south .= 1.0
bc_array_north .= 0.5
south_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(bc_array_south),))
north_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(bc_array_north),))
south_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(bc_array_south),))
north_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(bc_array_north),))
apply_boundary_conditions!(Val(1), Val(2), arch, grid, south_bc; async=false)
apply_boundary_conditions!(Val(2), Val(2), arch, grid, north_bc; async=false)
@test all((parent(field)[2:nx+1, 1, 2:nz+1] .+ parent(field)[2:nx+1, 2, 2:nz+1]) ./ 2 .≈ south_bc.conditions[1].condition)
Expand All @@ -73,8 +73,8 @@ using FastIce.BoundaryConditions
bc_array_top = KernelAbstractions.allocate(backend, Float64, (size(grid, 2), size(grid, 3)))
bc_array_bot .= 1.0
bc_array_top .= 0.5
bottom_bc = FieldBoundaryConditions((field,), (DirichletBC{HalfCell}(bc_array_bot),))
top_bc = FieldBoundaryConditions((field,), (DirichletBC{FullCell}(bc_array_top),))
bottom_bc = BoundaryConditionsBatch((field,), (DirichletBC{HalfCell}(bc_array_bot),))
top_bc = BoundaryConditionsBatch((field,), (DirichletBC{FullCell}(bc_array_top),))
apply_boundary_conditions!(Val(1), Val(3), arch, grid, bottom_bc; async=false)
apply_boundary_conditions!(Val(2), Val(3), arch, grid, top_bc; async=false)
@test all((parent(field)[2:nx+1, 2:ny+1, 1] .+ parent(field)[2:nx+1, 2:ny+1, 2]) ./ 2 .≈ bottom_bc.conditions[1].condition)
Expand Down

0 comments on commit fba369b

Please sign in to comment.