Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update format action #636

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ jobs:
edit:
name: clang-format
if: ${{ github.event.comment.body == 'format this please' }}
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
env:
CLANG_FORMAT: clang-format
steps:
Expand Down
3 changes: 1 addition & 2 deletions benchmark/cajita/Cajita_SparsePartitionerPerformance.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,7 @@ int current = 0;
int uniqueNumber() { return current++; }

Kokkos::View<int* [3], Kokkos::HostSpace>
generateRandomTileSequence( int tiles_per_dim )
{
generateRandomTileSequence( int tiles_per_dim ) {
Kokkos::View<int* [3], Kokkos::HostSpace> tiles_host(
"random_tile_sequence_host",
tiles_per_dim * tiles_per_dim * tiles_per_dim );
Expand Down
4 changes: 2 additions & 2 deletions cajita/src/Cajita_Array.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,8 @@ class Array

public:
//! Subview type.
using subview_type = decltype( createSubview(
_data, _layout->indexSpace( Ghost(), Local() ) ) );
using subview_type = decltype(
createSubview( _data, _layout->indexSpace( Ghost(), Local() ) ) );
//! Subview array layout type.
using subview_layout = typename subview_type::array_layout;
//! Subview memory traits.
Expand Down
6 changes: 2 additions & 4 deletions cajita/src/Cajita_GlobalGrid_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,16 +49,14 @@ GlobalGrid<MeshType>::GlobalGrid(
// Duplicate the communicator and store in a std::shared_ptr so that
// all copies point to the same object
[comm, num_space_dim_copy, ranks_per_dim_copy, periodic_dims_copy,
reorder_cart_ranks]()
{
reorder_cart_ranks]() {
auto p = std::make_unique<MPI_Comm>();
MPI_Cart_create( comm, num_space_dim_copy, ranks_per_dim_copy,
periodic_dims_copy, reorder_cart_ranks, p.get() );
return p.release();
}(),
// Custom deleter to mark the communicator for deallocation
[]( MPI_Comm* p )
{
[]( MPI_Comm* p ) {
MPI_Comm_free( p );
delete p;
} );
Expand Down
6 changes: 2 additions & 4 deletions cajita/src/Cajita_Halo.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,8 +228,7 @@ class Halo
auto local_grid = getLocalGrid( arrays... );

// Function to get the local id of the neighbor.
auto neighbor_id = []( const std::array<int, num_space_dim>& ijk )
{
auto neighbor_id = []( const std::array<int, num_space_dim>& ijk ) {
int id = ijk[0];
for ( std::size_t d = 1; d < num_space_dim; ++d )
id += num_space_dim * id + ijk[d];
Expand All @@ -238,8 +237,7 @@ class Halo

// Neighbor id flip function. This lets us compute what neighbor we
// are relative to a given neighbor.
auto flip_id = [=]( const std::array<int, num_space_dim>& ijk )
{
auto flip_id = [=]( const std::array<int, num_space_dim>& ijk ) {
std::array<int, num_space_dim> flip_ijk;
for ( std::size_t d = 0; d < num_space_dim; ++d )
flip_ijk[d] = -ijk[d];
Expand Down
4 changes: 2 additions & 2 deletions cajita/src/Cajita_LocalGrid_impl.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1040,7 +1040,7 @@ auto LocalGrid<MeshType>::faceSharedIndexSpace(
template <class MeshType>
template <int Dir, std::size_t NSD>
std::enable_if_t<3 == NSD, IndexSpace<3>>
LocalGrid<MeshType>::edgeIndexSpace( Own, Edge<Dir>, Local ) const
LocalGrid<MeshType>::edgeIndexSpace( Own, Edge<Dir>, Local ) const
{
// Compute the lower bound.
std::array<long, 3> min;
Expand Down Expand Up @@ -1073,7 +1073,7 @@ LocalGrid<MeshType>::edgeIndexSpace( Own, Edge<Dir>, Local ) const
template <class MeshType>
template <int Dir, std::size_t NSD>
std::enable_if_t<3 == NSD, IndexSpace<3>>
LocalGrid<MeshType>::edgeIndexSpace( Ghost, Edge<Dir>, Local ) const
LocalGrid<MeshType>::edgeIndexSpace( Ghost, Edge<Dir>, Local ) const
{
// Compute the size.
std::array<long, 3> size;
Expand Down
12 changes: 4 additions & 8 deletions cajita/unit_test/tstHalo2d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,7 @@ void checkGather( const std::array<bool, 2>& is_dim_periodic,

// This function checks if an index is in the low boundary halo in the
// given dimension
auto in_boundary_min_halo = [&]( const int i, const int dim )
{
auto in_boundary_min_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || !global_grid.onLowBoundary( dim ) )
return false;
else
Expand All @@ -68,8 +67,7 @@ void checkGather( const std::array<bool, 2>& is_dim_periodic,

// This function checks if an index is in the high boundary halo of in the
// given dimension
auto in_boundary_max_halo = [&]( const int i, const int dim )
{
auto in_boundary_max_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || !global_grid.onHighBoundary( dim ) )
return false;
else
Expand Down Expand Up @@ -116,8 +114,7 @@ void checkScatter( const std::array<bool, 2>& is_dim_periodic,

// This function checks if an index is in the halo of a low neighbor in
// the given dimension
auto in_dim_min_halo = [&]( const int i, const int dim )
{
auto in_dim_min_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || global_grid.dimBlockId( dim ) > 0 )
return i < ( owned_space.min( dim ) + halo_width +
haloPad( typename Array::entity_type(), dim ) );
Expand All @@ -127,8 +124,7 @@ void checkScatter( const std::array<bool, 2>& is_dim_periodic,

// This function checks if an index is in the halo of a high neighbor in
// the given dimension
auto in_dim_max_halo = [&]( const int i, const int dim )
{
auto in_dim_max_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || global_grid.dimBlockId( dim ) <
global_grid.dimNumBlock( dim ) - 1 )
return i >= ( owned_space.max( dim ) - halo_width );
Expand Down
12 changes: 4 additions & 8 deletions cajita/unit_test/tstHalo3d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,7 @@ void checkGather( const std::array<bool, 3>& is_dim_periodic,

// This function checks if an index is in the low boundary halo in the
// given dimension
auto in_boundary_min_halo = [&]( const int i, const int dim )
{
auto in_boundary_min_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || !global_grid.onLowBoundary( dim ) )
return false;
else
Expand All @@ -75,8 +74,7 @@ void checkGather( const std::array<bool, 3>& is_dim_periodic,

// This function checks if an index is in the high boundary halo of in the
// given dimension
auto in_boundary_max_halo = [&]( const int i, const int dim )
{
auto in_boundary_max_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || !global_grid.onHighBoundary( dim ) )
return false;
else
Expand Down Expand Up @@ -131,8 +129,7 @@ void checkScatter( const std::array<bool, 3>& is_dim_periodic,

// This function checks if an index is in the halo of a low neighbor in
// the given dimension
auto in_dim_min_halo = [&]( const int i, const int dim )
{
auto in_dim_min_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || global_grid.dimBlockId( dim ) > 0 )
return i < ( owned_space.min( dim ) + halo_width +
haloPad( typename Array::entity_type(), dim ) );
Expand All @@ -142,8 +139,7 @@ void checkScatter( const std::array<bool, 3>& is_dim_periodic,

// This function checks if an index is in the halo of a high neighbor in
// the given dimension
auto in_dim_max_halo = [&]( const int i, const int dim )
{
auto in_dim_max_halo = [&]( const int i, const int dim ) {
if ( is_dim_periodic[dim] || global_grid.dimBlockId( dim ) <
global_grid.dimNumBlock( dim ) - 1 )
return i >= ( owned_space.max( dim ) - halo_width );
Expand Down
6 changes: 2 additions & 4 deletions cajita/unit_test/tstIndexConversion.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,8 +112,7 @@ void testConversion3d( const std::array<bool, 3>& is_dim_periodic )
Kokkos::create_mirror_view_and_copy( Kokkos::HostSpace(), index_view );
auto l2g_view_host =
Kokkos::create_mirror_view_and_copy( Kokkos::HostSpace(), l2g_view );
auto check_results = [&]( const IndexSpace<3>& space )
{
auto check_results = [&]( const IndexSpace<3>& space ) {
for ( int i = space.min( Dim::I ); i < space.max( Dim::I ); ++i )
for ( int j = space.min( Dim::J ); j < space.max( Dim::J ); ++j )
for ( int k = space.min( Dim::K ); k < space.max( Dim::K );
Expand Down Expand Up @@ -207,8 +206,7 @@ void testConversion2d( const std::array<bool, 2>& is_dim_periodic )
Kokkos::create_mirror_view_and_copy( Kokkos::HostSpace(), index_view );
auto l2g_view_host =
Kokkos::create_mirror_view_and_copy( Kokkos::HostSpace(), l2g_view );
auto check_results = [&]( const IndexSpace<2>& space )
{
auto check_results = [&]( const IndexSpace<2>& space ) {
for ( int i = space.min( Dim::I ); i < space.max( Dim::I ); ++i )
for ( int j = space.min( Dim::J ); j < space.max( Dim::J ); ++j )
for ( int d = 0; d < 2; ++d )
Expand Down
10 changes: 6 additions & 4 deletions cajita/unit_test/tstLocalMesh2d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -362,10 +362,12 @@ void irregularTest2d( const std::array<int, 2>& ranks_per_dim )
double ref_cell_size = 8.0 * std::atan( 1.0 ) / ncell;
std::array<int, 2> num_cell = { ncell, ncell };

auto i_func = [=]( const int i )
{ return 0.5 * std::cos( i * ref_cell_size ) + low_corner[Dim::I]; };
auto j_func = [=]( const int j )
{ return 2.0 * std::cos( j * ref_cell_size ) + low_corner[Dim::J]; };
auto i_func = [=]( const int i ) {
return 0.5 * std::cos( i * ref_cell_size ) + low_corner[Dim::I];
};
auto j_func = [=]( const int j ) {
return 2.0 * std::cos( j * ref_cell_size ) + low_corner[Dim::J];
};

std::array<std::vector<double>, 2> edges;
for ( int n = 0; n < num_cell[Dim::I] + 1; ++n )
Expand Down
15 changes: 9 additions & 6 deletions cajita/unit_test/tstLocalMesh3d.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -583,12 +583,15 @@ void irregularTest3d( const std::array<int, 3>& ranks_per_dim )
double ref_cell_size = 8.0 * std::atan( 1.0 ) / ncell;
std::array<int, 3> num_cell = { ncell, ncell, ncell };

auto i_func = [=]( const int i )
{ return 0.5 * std::cos( i * ref_cell_size ) + low_corner[Dim::I]; };
auto j_func = [=]( const int j )
{ return 2.0 * std::cos( j * ref_cell_size ) + low_corner[Dim::J]; };
auto k_func = [=]( const int k )
{ return 1.5 * std::cos( k * ref_cell_size ) + low_corner[Dim::K]; };
auto i_func = [=]( const int i ) {
return 0.5 * std::cos( i * ref_cell_size ) + low_corner[Dim::I];
};
auto j_func = [=]( const int j ) {
return 2.0 * std::cos( j * ref_cell_size ) + low_corner[Dim::J];
};
auto k_func = [=]( const int k ) {
return 1.5 * std::cos( k * ref_cell_size ) + low_corner[Dim::K];
};

std::array<std::vector<double>, 3> edges;
for ( int n = 0; n < num_cell[Dim::I] + 1; ++n )
Expand Down
41 changes: 20 additions & 21 deletions cajita/unit_test/tstSparseArray.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,25 +78,24 @@ generate_random_partition( std::array<int, 3> ranks_per_dim,
// convert std::set to device-side view
template <typename T>
auto set2view( const std::set<std::array<T, 3>>& in_set )
-> Kokkos::View<T* [3], TEST_MEMSPACE>
{
// set => view (host)
typedef typename TEST_EXECSPACE::array_layout layout;
Kokkos::View<T* [3], layout, Kokkos::HostSpace> host_view( "view_host",
in_set.size() );
int i = 0;
for ( auto it = in_set.begin(); it != in_set.end(); ++it )
{
for ( int d = 0; d < 3; ++d )
host_view( i, d ) = ( *it )[d];
++i;
}
-> Kokkos::View<T* [3], TEST_MEMSPACE> {
// set => view (host)
typedef typename TEST_EXECSPACE::array_layout layout;
Kokkos::View<T* [3], layout, Kokkos::HostSpace> host_view(
"view_host", in_set.size() );
int i = 0;
for ( auto it = in_set.begin(); it != in_set.end(); ++it )
{
for ( int d = 0; d < 3; ++d )
host_view( i, d ) = ( *it )[d];
++i;
}

// create tiles view on device
Kokkos::View<T* [3], TEST_MEMSPACE> dev_view =
Kokkos::create_mirror_view_and_copy( TEST_MEMSPACE(), host_view );
return dev_view;
}
// create tiles view on device
Kokkos::View<T* [3], TEST_MEMSPACE> dev_view =
Kokkos::create_mirror_view_and_copy( TEST_MEMSPACE(), host_view );
return dev_view;
}

// return random generated particles and occupied tile numbers (last two params)
template <typename T>
Expand All @@ -118,9 +117,9 @@ void generate_random_particles( const int particle_number,
// all the activated tiles sit inside the valid partition range
start[d] = global_low_corner[d] +
cell_size * ( 2.01f + cell_per_tile_dim * (T)part_start[d] );
size[d] =
cell_size *
( cell_per_tile_dim * (T)( part_end[d] - part_start[d] ) - 4.02f );
size[d] = cell_size *
( cell_per_tile_dim * ( T )( part_end[d] - part_start[d] ) -
4.02f );
}

// insert random particles to the set
Expand Down
13 changes: 5 additions & 8 deletions cajita/unit_test/tstSparseDimPartitioner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -163,12 +163,10 @@ void uniform_distribution_automatic_rank()
EXPECT_FLOAT_EQ( imbalance_factor, gt_imbalance_factor );
}

auto generate_random_tiles( const std::array<std::vector<int>, 3>& gt_partition,
const Kokkos::Array<int, 3>& cart_rank,
const int size_tile_per_dim,
int occupy_tile_num_per_rank )
-> Kokkos::View<int* [3], TEST_MEMSPACE>
{
auto generate_random_tiles(
const std::array<std::vector<int>, 3>& gt_partition,
const Kokkos::Array<int, 3>& cart_rank, const int size_tile_per_dim,
int occupy_tile_num_per_rank ) -> Kokkos::View<int* [3], TEST_MEMSPACE> {
// register valid tiles in each MPI rank
// compute the sub-domain size (divided by the ground truth partition)
const int area_size = size_tile_per_dim * size_tile_per_dim;
Expand Down Expand Up @@ -223,8 +221,7 @@ auto generate_random_particles(
const std::array<std::vector<int>, 3>& gt_partition,
const Kokkos::Array<int, 3>& cart_rank, int occupy_par_num_per_rank,
const std::array<double, 3> global_low_corner, double dx,
int cell_num_per_tile_dim ) -> Kokkos::View<double* [3], TEST_MEMSPACE>
{
int cell_num_per_tile_dim ) -> Kokkos::View<double* [3], TEST_MEMSPACE> {
std::set<std::array<double, 3>> par_set;

double start[3], size[3];
Expand Down
17 changes: 7 additions & 10 deletions core/src/Cabana_CommunicationPlan.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,7 @@ auto countSendsAndCreateSteering( const ExportRankView element_export_ranks,
// reserve space in global array via a loop over neighbor counts
Kokkos::parallel_for(
Kokkos::TeamThreadRange( team, comm_size ),
[&]( const int i )
{
[&]( const int i ) {
// global memory atomic add, reserves space
global_offset[i] = Kokkos::atomic_fetch_add(
&neighbor_counts( i ), histo[i] );
Expand Down Expand Up @@ -295,8 +294,9 @@ auto countSendsAndCreateSteering( const ExportRankView element_export_ranks,
Kokkos::parallel_reduce(
Kokkos::TeamThreadRange( team,
neighbor_counts_dup.extent( 0 ) ),
[&]( const index_type thread_id, int& result )
{ result += neighbor_counts_dup( thread_id, i ); },
[&]( const index_type thread_id, int& result ) {
result += neighbor_counts_dup( thread_id, i );
},
thread_counts );
neighbor_counts( i ) = thread_counts;
} );
Expand All @@ -321,8 +321,7 @@ auto countSendsAndCreateSteering( const ExportRankView element_export_ranks,
Kokkos::parallel_reduce(
Kokkos::TeamThreadRange( team,
neighbor_ids_dup.extent( 0 ) ),
[&]( const index_type thread_id, index_type& result )
{
[&]( const index_type thread_id, index_type& result ) {
if ( neighbor_ids_dup( thread_id, i ) > 0 )
result += thread_id;
},
Expand Down Expand Up @@ -435,15 +434,13 @@ class CommunicationPlan
_comm_ptr.reset(
// Duplicate the communicator and store in a std::shared_ptr so that
// all copies point to the same object
[comm]()
{
[comm]() {
auto p = std::make_unique<MPI_Comm>();
MPI_Comm_dup( comm, p.get() );
return p.release();
}(),
// Custom deleter to mark the communicator for deallocation
[]( MPI_Comm* p )
{
[]( MPI_Comm* p ) {
MPI_Comm_free( p );
delete p;
} );
Expand Down
Loading