diff --git a/.gitignore b/.gitignore index 838b6d86..91a3e882 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ *.jl.mem .DS_Store /Manifest.toml +/LocalPreferences.toml /dev/ /docs/build/ /docs/site/ diff --git a/Project.toml b/Project.toml index 9a3154cb..23fa0499 100644 --- a/Project.toml +++ b/Project.toml @@ -1,8 +1,17 @@ +authors = ["S. Badia ", "A. F. Martin ", "F. Verdugo "] name = "GridapDistributed" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" -authors = ["S. Badia ", "A. F. Martin ", "F. Verdugo "] version = "0.3.0" +[compat] +FillArrays = "0.8.4, 0.9, 0.10, 0.11, 0.12, 0.13" +Gridap = "0.17.17" +MPI = "0.16, 0.17, 0.18, 0.19, 0.20" +PartitionedArrays = "0.3.2" +SparseMatricesCSR = "0.6.6" +WriteVTK = "1.12.0" +julia = "1.3" + [deps] FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -13,16 +22,8 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" SparseMatricesCSR = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" WriteVTK = "64499a7a-5c06-52f2-abe2-ccb03c286192" -[compat] -FillArrays = "0.8.4, 0.9, 0.10, 0.11, 0.12, 0.13" -Gridap = "0.17.17" -MPI = "0.16, 0.17, 0.18, 0.19, 0.20" -PartitionedArrays = "0.3.2" -SparseMatricesCSR = "0.6.6" -WriteVTK = "1.12.0" -julia = "1.3" - [extras] +MPIPreferences = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] diff --git a/src/Adaptivity.jl b/src/Adaptivity.jl new file mode 100644 index 00000000..2df02533 --- /dev/null +++ b/src/Adaptivity.jl @@ -0,0 +1,326 @@ + +# DistributedAdaptedDiscreteModels + +const DistributedAdaptedDiscreteModel{Dc,Dp} = GenericDistributedDiscreteModel{Dc,Dp,<:AbstractArray{<:AdaptedDiscreteModel{Dc,Dp}}} + +function DistributedAdaptedDiscreteModel(model ::DistributedDiscreteModel, + parent ::DistributedDiscreteModel, + glue ::AbstractArray{<:AdaptivityGlue}) + models = map(local_views(model),local_views(parent),glue) do model, parent, glue + AdaptedDiscreteModel(model,parent,glue) + end + return GenericDistributedDiscreteModel(models,get_cell_gids(model)) +end + +function Adaptivity.get_adaptivity_glue(model::DistributedAdaptedDiscreteModel) + return map(Adaptivity.get_adaptivity_glue,local_views(model)) +end + +# RedistributeGlue : Redistributing discrete models + +""" + RedistributeGlue + + Glue linking two distributions of the same mesh. + - `new_parts`: Array with the new part IDs (and comms) + - `old_parts`: Array with the old part IDs (and comms) + - `parts_rcv`: Array with the part IDs from which each part receives + - `parts_snd`: Array with the part IDs to which each part sends + - `lids_rcv` : Local IDs of the entries that are received from each part + - `lids_snd` : Local IDs of the entries that are sent to each part + - `old2new` : Mapping of local IDs from the old to the new mesh + - `new2old` : Mapping of local IDs from the new to the old mesh +""" +struct RedistributeGlue + new_parts :: AbstractArray{<:Integer} + old_parts :: AbstractArray{<:Integer} + parts_rcv :: AbstractArray{<:AbstractVector{<:Integer}} + parts_snd :: AbstractArray{<:AbstractVector{<:Integer}} + lids_rcv :: AbstractArray{<:JaggedArray{<:Integer}} + lids_snd :: AbstractArray{<:JaggedArray{<:Integer}} + old2new :: AbstractArray{<:AbstractVector{<:Integer}} + new2old :: AbstractArray{<:AbstractVector{<:Integer}} +end + +get_parts(g::RedistributeGlue) = get_parts(g.parts_rcv) + +function get_old_and_new_parts(g::RedistributeGlue,reverse::Val{false}) + return g.old_parts, g.new_parts +end + +function get_old_and_new_parts(g::RedistributeGlue,reverse::Val{true}) + return g.new_parts, g.old_parts +end + +function get_glue_components(glue::RedistributeGlue,reverse::Val{false}) + return glue.lids_rcv, glue.lids_snd, glue.parts_rcv, glue.parts_snd, glue.new2old +end + +function get_glue_components(glue::RedistributeGlue,reverse::Val{true}) + return glue.lids_snd, glue.lids_rcv, glue.parts_snd, glue.parts_rcv, glue.old2new +end + +function allocate_rcv_buffer(t::Type{T},g::RedistributeGlue) where T + ptrs = local_indices_rcv.ptrs + data = zeros(T,ptrs[end]-1) + JaggedArray(data,ptrs) +end + +function allocate_snd_buffer(t::Type{T},g::RedistributeGlue) where T + ptrs = local_indices_snd.ptrs + data = zeros(T,ptrs[end]-1) + JaggedArray(data,ptrs) +end + +""" + Redistributes an DistributedDiscreteModel to optimally + rebalance the loads between the processors. + Returns the rebalanced model and a RedistributeGlue instance. +""" +function redistribute(::DistributedDiscreteModel,args...;kwargs...) + @abstractmethod +end + +# Redistribution of cell-wise dofs, free values and FEFunctions + +function _allocate_cell_wise_dofs(cell_to_ldofs) + map(cell_to_ldofs) do cell_to_ldofs + cache = array_cache(cell_to_ldofs) + ncells = length(cell_to_ldofs) + ptrs = Vector{Int32}(undef,ncells+1) + for cell in 1:ncells + ldofs = getindex!(cache,cell_to_ldofs,cell) + ptrs[cell+1] = length(ldofs) + end + PartitionedArrays.length_to_ptrs!(ptrs) + ndata = ptrs[end]-1 + data = Vector{Float64}(undef,ndata) + PartitionedArrays.JaggedArray(data,ptrs) + end +end + +function _update_cell_dof_values_with_local_info!(cell_dof_values_new, + cell_dof_values_old, + new2old) + map(cell_dof_values_new, + cell_dof_values_old, + new2old) do cell_dof_values_new,cell_dof_values_old,new2old + ocache = array_cache(cell_dof_values_old) + for (ncell,ocell) in enumerate(new2old) + if ocell!=0 + # Copy ocell to ncell + oentry = getindex!(ocache,cell_dof_values_old,ocell) + range = cell_dof_values_new.ptrs[ncell]:cell_dof_values_new.ptrs[ncell+1]-1 + cell_dof_values_new.data[range] .= oentry + end + end + end +end + +function _allocate_comm_data(num_dofs_x_cell,lids) + map(num_dofs_x_cell,lids) do num_dofs_x_cell,lids + n = length(lids) + ptrs = Vector{Int32}(undef,n+1) + ptrs.= 0 + for i = 1:n + for j = lids.ptrs[i]:lids.ptrs[i+1]-1 + ptrs[i+1] = ptrs[i+1] + num_dofs_x_cell.data[j] + end + end + PartitionedArrays.length_to_ptrs!(ptrs) + ndata = ptrs[end]-1 + data = Vector{Float64}(undef,ndata) + PartitionedArrays.JaggedArray(data,ptrs) + end +end + +function _pack_snd_data!(snd_data,cell_dof_values,snd_lids) + map(snd_data,cell_dof_values,snd_lids) do snd_data,cell_dof_values,snd_lids + cache = array_cache(cell_dof_values) + s = 1 + for i = 1:length(snd_lids) + for j = snd_lids.ptrs[i]:snd_lids.ptrs[i+1]-1 + cell = snd_lids.data[j] + ldofs = getindex!(cache,cell_dof_values,cell) + + e = s+length(ldofs)-1 + range = s:e + snd_data.data[range] .= ldofs + s = e+1 + end + end + end +end + +function _unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) + map(cell_dof_values,rcv_data,rcv_lids) do cell_dof_values,rcv_data,rcv_lids + s = 1 + for i = 1:length(rcv_lids.ptrs)-1 + for j = rcv_lids.ptrs[i]:rcv_lids.ptrs[i+1]-1 + cell = rcv_lids.data[j] + range_cell_dof_values = cell_dof_values.ptrs[cell]:cell_dof_values.ptrs[cell+1]-1 + + e = s+length(range_cell_dof_values)-1 + range_rcv_data = s:e + cell_dof_values.data[range_cell_dof_values] .= rcv_data.data[range_rcv_data] + s = e+1 + end + end + end +end + +function _num_dofs_x_cell(cell_dofs_array,lids) + map(cell_dofs_array,lids) do cell_dofs_array, lids + data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] + PartitionedArrays.JaggedArray(data,lids.ptrs) + end +end + +function get_redistribute_cell_dofs_cache(cell_dof_values_old, + cell_dof_ids_new, + model_new, + glue::RedistributeGlue; + reverse=false) + + lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) + + cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) + cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) + + num_dofs_x_cell_snd = _num_dofs_x_cell(cell_dof_values_old, lids_snd) + num_dofs_x_cell_rcv = _num_dofs_x_cell(cell_dof_ids_new, lids_rcv) + snd_data = _allocate_comm_data(num_dofs_x_cell_snd, lids_snd) + rcv_data = _allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) + + cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) + + caches = snd_data, rcv_data, cell_dof_values_new + return caches +end + +function redistribute_cell_dofs(cell_dof_values_old, + cell_dof_ids_new, + model_new, + glue::RedistributeGlue; + reverse=false) + caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + return redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) +end + +function redistribute_cell_dofs!(caches, + cell_dof_values_old, + cell_dof_ids_new, + model_new, + glue::RedistributeGlue; + reverse=false) + + snd_data, rcv_data, cell_dof_values_new = caches + lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) + old_parts, new_parts = get_old_and_new_parts(glue,Val(reverse)) + + cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) + cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) + + _pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) + + graph = ExchangeGraph(parts_snd,parts_rcv) + t = exchange!(rcv_data,snd_data,graph) + wait(t) + + # We have to build the owned part of "cell_dof_values_new" out of + # 1. cell_dof_values_old (for those cells s.t. new2old[:]!=0) + # 2. cell_dof_values_new_rcv (for those cells s.t. new2old[:]=0) + _update_cell_dof_values_with_local_info!(cell_dof_values_new, + cell_dof_values_old, + new2old) + + _unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) + + # Now that every part knows it's new owned dofs, exchange ghosts + cell_dof_values_new = change_parts(cell_dof_values_new,new_parts) + if i_am_in(new_parts) + cache = fetch_vector_ghost_values_cache(cell_dof_values_new,partition(get_cell_gids(model_new))) + fetch_vector_ghost_values!(cell_dof_values_new,cache) |> wait + end + return cell_dof_values_new +end + +function _get_cell_dof_ids_inner_space(s::FESpace) + get_cell_dof_ids(s) +end + +function _get_cell_dof_ids_inner_space(s::FESpaceWithLinearConstraints) + get_cell_dof_ids(s.space) +end + +function get_redistribute_free_values_cache(fv_new::Union{PVector,Nothing}, + Uh_new::Union{DistributedSingleFieldFESpace,Nothing}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractArray,Nothing}, + Uh_old::Union{DistributedSingleFieldFESpace,Nothing}, + model_new, + glue::RedistributeGlue; + reverse=false) + old_parts, new_parts = get_old_and_new_parts(glue,Val(reverse)) + cell_dof_values_old = i_am_in(old_parts) ? map(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing + cell_dof_ids_new = i_am_in(new_parts) ? map(_get_cell_dof_ids_inner_space, local_views(Uh_new)) : nothing + caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + return caches +end + +function redistribute_free_values(fv_new::Union{PVector,Nothing}, + Uh_new::Union{DistributedSingleFieldFESpace,Nothing}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractArray,Nothing}, + Uh_old::Union{DistributedSingleFieldFESpace,Nothing}, + model_new, + glue::RedistributeGlue; + reverse=false) + + caches = get_redistribute_free_values_cache(fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) + return redistribute_free_values!(caches,fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) +end + +function redistribute_free_values!(caches, + fv_new::Union{PVector,Nothing}, + Uh_new::Union{DistributedSingleFieldFESpace,Nothing}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractArray,Nothing}, + Uh_old::Union{DistributedSingleFieldFESpace,Nothing}, + model_new, + glue::RedistributeGlue; + reverse=false) + + old_parts, new_parts = get_old_and_new_parts(glue,Val(reverse)) + cell_dof_values_old = i_am_in(old_parts) ? map(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing + cell_dof_ids_new = i_am_in(new_parts) ? map(_get_cell_dof_ids_inner_space, local_views(Uh_new)) : nothing + cell_dof_values_new = redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + + # Gather the new free dofs + if i_am_in(new_parts) + Gridap.FESpaces.gather_free_values!(fv_new,Uh_new,cell_dof_values_new) + end + return fv_new +end + +function redistribute_fe_function(uh_old::Union{DistributedSingleFieldFEFunction,Nothing}, + Uh_new::Union{DistributedSingleFieldFESpace,Nothing}, + model_new, + glue::RedistributeGlue; + reverse=false) + + old_parts, new_parts = get_old_and_new_parts(glue,Val(reverse)) + cell_dof_values_old = i_am_in(old_parts) ? map(get_cell_dof_values,local_views(uh_old)) : nothing + cell_dof_ids_new = i_am_in(new_parts) ? map(_get_cell_dof_ids_inner_space,local_views(Uh_new)) : nothing + cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + + # Assemble the new FEFunction + if i_am_in(new_parts) + free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) + free_values = PVector(free_values,partition(Uh_new.gids)) + uh_new = FEFunction(Uh_new,free_values,dirichlet_values) + return uh_new + else + return nothing + end +end diff --git a/src/Algebra.jl b/src/Algebra.jl index efa3987d..1c17da03 100644 --- a/src/Algebra.jl +++ b/src/Algebra.jl @@ -16,6 +16,91 @@ function change_axes(a::Algebra.AllocationCOO{T,A}, axes::A) where {T,A} Algebra.AllocationCOO(counter,a.I,a.J,a.V) end +# This type is required because MPIArray from PArrays +# cannot be instantiated with a NULL communicator +struct MPIVoidVector{T} <: AbstractVector{T} + comm::MPI.Comm + function MPIVoidVector(::Type{T}) where {T} + new{T}(MPI.COMM_NULL) + end +end + +Base.size(a::MPIVoidVector) = (0,) +Base.IndexStyle(::Type{<:MPIVoidVector}) = IndexLinear() +function Base.getindex(a::MPIVoidVector,i::Int) + error("Indexing of MPIVoidVector not possible.") +end +function Base.setindex!(a::MPIVoidVector,v,i::Int) + error("Indexing of MPIVoidVector not possible.") +end +function Base.show(io::IO,k::MIME"text/plain",data::MPIVoidVector) + println(io,"MPIVoidVector") +end + +# Subcommunicators + +function get_part_id(comm::MPI.Comm) + if comm != MPI.COMM_NULL + id = MPI.Comm_rank(comm)+1 + else + id = -1 + end + id +end + +function i_am_in(comm::MPI.Comm) + get_part_id(comm) >=0 +end + +function i_am_in(comm::MPIArray) + i_am_in(comm.comm) +end + +function i_am_in(comm::MPIVoidVector) + i_am_in(comm.comm) +end + +function change_parts(x::Union{MPIArray,DebugArray,Nothing,MPIVoidVector}, new_parts; default=nothing) + x_new = map(new_parts) do _p + if isa(x,MPIArray) || isa(x,DebugArray) + PartitionedArrays.getany(x) + else + default + end + end + return x_new +end + +function generate_subparts(parts::MPIArray,new_comm_size) + root_comm = parts.comm + root_size = MPI.Comm_size(root_comm) + rank = MPI.Comm_rank(root_comm) + + @static if isdefined(MPI,:MPI_UNDEFINED) + mpi_undefined = MPI.MPI_UNDEFINED[] + else + mpi_undefined = MPI.API.MPI_UNDEFINED[] + end + + if root_size == new_comm_size + return parts + else + if rank < new_comm_size + comm = MPI.Comm_split(root_comm,0,0) + return distribute_with_mpi(LinearIndices((new_comm_size,));comm=comm,duplicate_comm=false) + else + comm = MPI.Comm_split(root_comm,mpi_undefined,mpi_undefined) + return MPIVoidVector(eltype(parts)) + end + end +end + +function generate_subparts(parts::DebugArray,new_comm_size) + DebugArray(LinearIndices((new_comm_size,))) +end + +# local_views + function local_views(a) @abstractmethod end diff --git a/src/FESpaces.jl b/src/FESpaces.jl index 6f306c8b..3953bd90 100644 --- a/src/FESpaces.jl +++ b/src/FESpaces.jl @@ -49,6 +49,13 @@ function FESpaces.gather_free_and_dirichlet_values!(free_values,dirichlet_values map(gather_free_and_dirichlet_values!, local_views(free_values), local_views(dirichlet_values), local_views(f), local_views(cell_vals)) end +function FESpaces.gather_free_and_dirichlet_values(f::DistributedFESpace,cell_vals) + free_values, dirichlet_values = map(local_views(f),cell_vals) do f, cell_vals + FESpaces.gather_free_and_dirichlet_values(f,cell_vals) + end |> tuple_of_arrays + return free_values, dirichlet_values +end + function dof_wise_to_cell_wise!(cell_wise_vector,dof_wise_vector,cell_to_ldofs,cell_prange) map(cell_wise_vector, dof_wise_vector, diff --git a/src/Geometry.jl b/src/Geometry.jl index 5a3ae119..53f0d9f7 100644 --- a/src/Geometry.jl +++ b/src/Geometry.jl @@ -301,7 +301,7 @@ function Geometry.DiscreteModel( end |> tuple_of_arrays partition = map(parts,lcell_to_cell,lcell_to_part) do part, lcell_to_cell, lcell_to_part - LocalIndices(part, ncells, lcell_to_cell, lcell_to_part) + LocalIndices(ncells, part, lcell_to_cell, lcell_to_part) end # This is required to provide the hint that the communication @@ -309,7 +309,7 @@ function Geometry.DiscreteModel( # to execute the algorithm the reconstructs the reciprocal in the # communication graph assembly_neighbors(partition;symmetric=true) - + gids = PRange(partition) models = map(lcell_to_cell) do lcell_to_cell @@ -319,67 +319,6 @@ function Geometry.DiscreteModel( GenericDistributedDiscreteModel(models,gids) end -# DistributedAdaptedDiscreteModels - -const DistributedAdaptedDiscreteModel{Dc,Dp} = GenericDistributedDiscreteModel{Dc,Dp,<:AbstractArray{<:AdaptedDiscreteModel{Dc,Dp}}} - -function DistributedAdaptedDiscreteModel(model ::DistributedDiscreteModel, - parent ::DistributedDiscreteModel, - glue ::AbstractArray{<:AdaptivityGlue}) - models = map(local_views(model),local_views(parent),glue) do model, parent, glue - AdaptedDiscreteModel(model,parent,glue) - end - return GenericDistributedDiscreteModel(models,get_cell_gids(model)) -end - -function Adaptivity.get_adaptivity_glue(model::DistributedAdaptedDiscreteModel) - return map(Adaptivity.get_adaptivity_glue,local_views(model)) -end - -# RedistributeGlue : Redistributing discrete models - -""" - RedistributeGlue - - Glue linking two distributions of the same mesh. - - `parts_rcv`: Array with the part IDs from which each part receives - - `parts_snd`: Array with the part IDs to which each part sends - - `lids_rcv` : Local IDs of the entries that are received from each part - - `lids_snd` : Local IDs of the entries that are sent to each part - - `old2new` : Mapping of local IDs from the old to the new mesh - - `new2old` : Mapping of local IDs from the new to the old mesh -""" -struct RedistributeGlue - parts_rcv :: AbstractArray{<:AbstractVector{<:Integer}} - parts_snd :: AbstractArray{<:AbstractVector{<:Integer}} - lids_rcv :: AbstractArray{<:JaggedArray{<:Integer}} - lids_snd :: AbstractArray{<:JaggedArray{<:Integer}} - old2new :: AbstractArray{<:AbstractVector{<:Integer}} - new2old :: AbstractArray{<:AbstractVector{<:Integer}} -end - -get_parts(g::RedistributeGlue) = get_parts(g.parts_rcv) - -function allocate_rcv_buffer(t::Type{T},g::RedistributeGlue) where T - ptrs = local_indices_rcv.ptrs - data = zeros(T,ptrs[end]-1) - JaggedArray(data,ptrs) -end -function allocate_snd_buffer(t::Type{T},g::RedistributeGlue) where T - ptrs = local_indices_snd.ptrs - data = zeros(T,ptrs[end]-1) - JaggedArray(data,ptrs) -end - -""" - Redistributes an DistributedDiscreteModel to optimally - rebalance the loads between the processors. - Returns the rebalanced model and a RedistributeGlue instance. -""" -function redistribute(::DistributedDiscreteModel,args...;kwargs...) - @abstractmethod -end - # Triangulation # We do not inherit from Triangulation on purpose. diff --git a/src/GridapDistributed.jl b/src/GridapDistributed.jl index 28a8a4bc..eca02ab6 100644 --- a/src/GridapDistributed.jl +++ b/src/GridapDistributed.jl @@ -15,6 +15,7 @@ using Gridap.MultiField using Gridap.ODEs.TransientFETools using Gridap.ODEs.ODETools +using MPI using PartitionedArrays const PArrays = PartitionedArrays @@ -57,4 +58,6 @@ include("TransientMultiFieldDistributedCellField.jl") include("TransientFESpaces.jl") +include("Adaptivity.jl") + end # module diff --git a/test/AdaptivityTests.jl b/test/AdaptivityTests.jl new file mode 100644 index 00000000..3d35263b --- /dev/null +++ b/test/AdaptivityTests.jl @@ -0,0 +1,234 @@ +module AdaptivityTests +using Test + +using Gridap +using Gridap.Geometry +using Gridap.Adaptivity +using Gridap.FESpaces + +using MPI +using GridapDistributed +using PartitionedArrays + +using GridapDistributed: i_am_in, generate_subparts +using GridapDistributed: find_local_to_local_map +using GridapDistributed: DistributedAdaptedDiscreteModel +using GridapDistributed: RedistributeGlue, redistribute_cell_dofs, redistribute_fe_function, redistribute_free_values + +function are_equal(a1::MPIArray,a2::MPIArray) + same = map(a1,a2) do a1,a2 + a1 ≈ a2 + end + return reduce(&,same,init=true) +end + +function are_equal(a1::PVector,a2::PVector) + are_equal(own_values(a1),own_values(a2)) +end + +function DistributedAdaptivityGlue(serial_glue,parent,child) + glue = map(partition(get_cell_gids(parent)),partition(get_cell_gids(child))) do parent_gids, child_gids + old_g2l = global_to_local(parent_gids) + old_l2g = local_to_global(parent_gids) + new_l2g = local_to_global(child_gids) + + n2o_cell_map = lazy_map(Reindex(old_g2l),serial_glue.n2o_faces_map[3][new_l2g]) + n2o_faces_map = [Int64[],Int64[],collect(n2o_cell_map)] + n2o_cell_to_child_id = serial_glue.n2o_cell_to_child_id[new_l2g] + rrules = serial_glue.refinement_rules[old_l2g] + AdaptivityGlue(n2o_faces_map,n2o_cell_to_child_id,rrules) + end + return glue +end + +function get_redistribute_glue(old_parts,new_parts::DebugArray,old_cell_to_part,new_cell_to_part,model,redist_model) + parts_rcv,parts_snd,lids_rcv,lids_snd,old2new,new2old = + map(new_parts,partition(get_cell_gids(redist_model))) do p,new_partition + old_new_cell_to_part = collect(zip(old_cell_to_part,new_cell_to_part)) + gids_rcv = findall(x->x[1]!=p && x[2]==p, old_new_cell_to_part) + gids_snd = findall(x->x[1]==p && x[2]!=p, old_new_cell_to_part) + + parts_rcv = unique(old_cell_to_part[gids_rcv]) + parts_snd = unique(new_cell_to_part[gids_snd]) + + gids_rcv_by_part = [filter(x -> old_cell_to_part[x] == nbor, gids_rcv) for nbor in parts_rcv] + gids_snd_by_part = [filter(x -> new_cell_to_part[x] == nbor, gids_snd) for nbor in parts_snd] + + if p ∈ old_parts.items + old_partition = partition(get_cell_gids(model)).items[p] + lids_rcv = map(gids -> lazy_map(Reindex(global_to_local(new_partition)),gids),gids_rcv_by_part) + lids_snd = map(gids -> lazy_map(Reindex(global_to_local(old_partition)),gids),gids_snd_by_part) + old2new = replace(find_local_to_local_map(old_partition,new_partition), -1 => 0) + new2old = replace(find_local_to_local_map(new_partition,old_partition), -1 => 0) + else + lids_rcv = map(gids -> lazy_map(Reindex(global_to_local(new_partition)),gids),gids_rcv_by_part) + lids_snd = map(gids -> fill(Int32(0),length(gids)),gids_snd_by_part) + old2new = Int[] + new2old = fill(0,length(findall(x -> x == p,new_cell_to_part))) + end + + return parts_rcv,parts_snd,JaggedArray(lids_rcv),JaggedArray(lids_snd),old2new,new2old + end |> tuple_of_arrays + + return RedistributeGlue(new_parts,old_parts,parts_rcv,parts_snd,lids_rcv,lids_snd,old2new,new2old) +end + +function get_redistribute_glue(old_parts,new_parts::MPIArray,old_cell_to_part,new_cell_to_part,model,redist_model) + parts_rcv,parts_snd,lids_rcv,lids_snd,old2new,new2old = + map(new_parts,partition(get_cell_gids(redist_model))) do p,new_partition + old_new_cell_to_part = collect(zip(old_cell_to_part,new_cell_to_part)) + gids_rcv = findall(x->x[1]!=p && x[2]==p, old_new_cell_to_part) + gids_snd = findall(x->x[1]==p && x[2]!=p, old_new_cell_to_part) + + parts_rcv = unique(old_cell_to_part[gids_rcv]) + parts_snd = unique(new_cell_to_part[gids_snd]) + + gids_rcv_by_part = [filter(x -> old_cell_to_part[x] == nbor, gids_rcv) for nbor in parts_rcv] + gids_snd_by_part = [filter(x -> new_cell_to_part[x] == nbor, gids_snd) for nbor in parts_snd] + + if i_am_in(old_parts) + old_partition = PartitionedArrays.getany(partition(get_cell_gids(model))) + lids_rcv = map(gids -> lazy_map(Reindex(global_to_local(new_partition)),gids),gids_rcv_by_part) + lids_snd = map(gids -> lazy_map(Reindex(global_to_local(old_partition)),gids),gids_snd_by_part) + old2new = replace(find_local_to_local_map(old_partition,new_partition), -1 => 0) + new2old = replace(find_local_to_local_map(new_partition,old_partition), -1 => 0) + else + lids_rcv = map(gids -> lazy_map(Reindex(global_to_local(new_partition)),gids),gids_rcv_by_part) + lids_snd = map(gids -> fill(Int32(0),length(gids)),gids_snd_by_part) + old2new = Int[] + new2old = fill(0,length(findall(x -> x == p,new_cell_to_part))) + end + + return parts_rcv,parts_snd,JaggedArray(lids_rcv),JaggedArray(lids_snd),old2new,new2old + end |> tuple_of_arrays + + return RedistributeGlue(new_parts,old_parts,parts_rcv,parts_snd,lids_rcv,lids_snd,old2new,new2old) +end + +function test_redistribution(coarse_ranks, fine_ranks, model, redist_model, redist_glue) + sol(x) = sum(x) + reffe = ReferenceFE(lagrangian,Float64,1) + + if i_am_in(coarse_ranks) + space = FESpace(model,reffe) + u = interpolate(sol,space) + cell_dofs = map(get_cell_dof_values,local_views(u)) + free_values = get_free_dof_values(u) + dir_values = zero_dirichlet_values(space) + else + space = nothing; u = nothing; cell_dofs = nothing; free_values = nothing; dir_values = nothing; + end + + redist_space = FESpace(redist_model,reffe) + redist_u = interpolate(sol,redist_space) + redist_cell_dofs = map(get_cell_dof_values,local_views(redist_u)) + redist_free_values = get_free_dof_values(redist_u) + redist_dir_values = zero_dirichlet_values(redist_space) + + # Redistribute cell values, both ways + tmp_cell_dofs = copy(redist_cell_dofs) + redistribute_cell_dofs(cell_dofs,tmp_cell_dofs,redist_model,redist_glue) + @test are_equal(redist_cell_dofs,tmp_cell_dofs) + + tmp_cell_dofs = i_am_in(coarse_ranks) ? copy(cell_dofs) : nothing + redistribute_cell_dofs(redist_cell_dofs,tmp_cell_dofs,model,redist_glue;reverse=true) + if i_am_in(coarse_ranks) + @test are_equal(cell_dofs,tmp_cell_dofs) + end + + # Redistribute free values, both ways + tmp_free_values = copy(redist_free_values) + redistribute_free_values(tmp_free_values,redist_space,free_values,dir_values,space,redist_model,redist_glue) + @test are_equal(redist_free_values,tmp_free_values) + + tmp_free_values = i_am_in(coarse_ranks) ? copy(free_values) : nothing + redistribute_free_values(tmp_free_values,space,redist_free_values,redist_dir_values,redist_space,model,redist_glue;reverse=true) + if i_am_in(coarse_ranks) + @test are_equal(free_values,tmp_free_values) + end + + return true +end + +function test_adaptivity(ranks,cmodel,fmodel,glue) + if i_am_in(ranks) + sol(x) = sum(x) + reffe = ReferenceFE(lagrangian,Float64,1) + amodel = DistributedAdaptedDiscreteModel(fmodel,cmodel,glue) + + Ωf = Triangulation(amodel) + dΩf = Measure(Ωf,2) + Vf = FESpace(amodel,reffe) + Uf = TrialFESpace(Vf) + uh_fine = interpolate(sol,Vf) + + Ωc = Triangulation(cmodel) + dΩc = Measure(Ωc,2) + Vc = FESpace(cmodel,reffe) + Uc = TrialFESpace(Vc) + uh_coarse = interpolate(sol,Vc) + + dΩcf = Measure(Ωc,Ωf,2) + + # Coarse to Fine projection + af(u,v) = ∫(u⋅v)*dΩf + lf(v) = ∫(uh_coarse*v)*dΩf + op = AffineFEOperator(af,lf,Uf,Vf) + uh_coarse_to_fine = solve(op) + + eh = uh_fine - uh_coarse_to_fine + @test sum(∫(eh⋅eh)*dΩf) < 1e-8 + + # Fine to Coarse projection + ac(u,v) = ∫(u⋅v)*dΩc + lc(v) = ∫(uh_fine*v)*dΩcf + op = AffineFEOperator(ac,lc,Uc,Vc) + uh_fine_to_coarse = solve(op) + + eh = uh_coarse - uh_fine_to_coarse + @test sum(∫(eh⋅eh)*dΩc) < 1e-8 + end + return true +end + +############################################################################################ + +function main(distribute) + fine_ranks = distribute(LinearIndices((4,))) + coarse_ranks = generate_subparts(fine_ranks,2) + + # Create models and glues + serial_parent = UnstructuredDiscreteModel(CartesianDiscreteModel((0,1,0,1),(4,4))) + serial_child = refine(serial_parent) + serial_rglue = get_adaptivity_glue(serial_child) + + parent_cell_to_part = [1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2] + child_cell_to_part = lazy_map(Reindex(parent_cell_to_part),serial_rglue.n2o_faces_map[3]) + if i_am_in(coarse_ranks) + parent = DiscreteModel(coarse_ranks,serial_parent,parent_cell_to_part) + child = DiscreteModel(coarse_ranks,serial_child,child_cell_to_part) + coarse_adaptivity_glue = DistributedAdaptivityGlue(serial_rglue,parent,child) + else + parent = nothing; child = nothing; coarse_adaptivity_glue = nothing + end + + redist_parent_cell_to_part = [1,1,2,2,1,1,2,2,3,3,4,4,3,3,4,4] + redist_parent = DiscreteModel(fine_ranks,serial_parent,redist_parent_cell_to_part) + redist_glue_parent = get_redistribute_glue(coarse_ranks,fine_ranks,parent_cell_to_part,redist_parent_cell_to_part,parent,redist_parent); + + redist_child_cell_to_part = lazy_map(Reindex(redist_parent_cell_to_part),serial_rglue.n2o_faces_map[3]) + redist_child = DiscreteModel(fine_ranks,serial_child,redist_child_cell_to_part) + fine_adaptivity_glue = DistributedAdaptivityGlue(serial_rglue,redist_parent,redist_child) + redist_glue_child = get_redistribute_glue(coarse_ranks,fine_ranks,child_cell_to_part,redist_child_cell_to_part,child,redist_child); + + # Tests + test_redistribution(coarse_ranks,fine_ranks,parent,redist_parent,redist_glue_parent) + test_redistribution(coarse_ranks,fine_ranks,child,redist_child,redist_glue_child) + + test_adaptivity(coarse_ranks,parent,child,coarse_adaptivity_glue) + test_adaptivity(fine_ranks,redist_parent,redist_child,fine_adaptivity_glue) + + return +end + +end # module AdaptivityTests \ No newline at end of file diff --git a/test/TestApp/src/TestApp.jl b/test/TestApp/src/TestApp.jl index 441c2161..f6c40936 100644 --- a/test/TestApp/src/TestApp.jl +++ b/test/TestApp/src/TestApp.jl @@ -11,4 +11,5 @@ module TestApp include("../../TransientMultiFieldDistributedCellFieldTests.jl") include("../../HeatEquationTests.jl") include("../../StokesOpenBoundaryTests.jl") + include("../../AdaptivityTests.jl") end \ No newline at end of file diff --git a/test/mpi/runtests_np4_body.jl b/test/mpi/runtests_np4_body.jl index a1a19d03..0e8fbb6a 100644 --- a/test/mpi/runtests_np4_body.jl +++ b/test/mpi/runtests_np4_body.jl @@ -40,5 +40,10 @@ function all_tests(distribute,parts) TestApp.StokesOpenBoundaryTests.main(distribute,parts) PArrays.toc!(t,"StokesOpenBoundary") + if prod(parts) == 4 + TestApp.AdaptivityTests.main(distribute) + PArrays.toc!(t,"Adaptivity") + end + display(t) end