Skip to content

Commit

Permalink
Merge pull request #648 from streeve/fixup_ex_comm_rank
Browse files Browse the repository at this point in the history
Use Cajita comm rank in examples
  • Loading branch information
streeve authored Jul 6, 2023
2 parents 352487c + 33f3163 commit 6d490ad
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 51 deletions.
16 changes: 8 additions & 8 deletions example/cajita_tutorial/04_global_grid/global_grid_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,6 @@ void globalGridExample()
physical size and characteristics of the mesh. The global grid accordingly
defines indexing throughout the entire mesh domain.
*/
int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Global Grid Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
Both the global mesh and partitioning information are necessary to create
Expand Down Expand Up @@ -62,6 +54,14 @@ void globalGridExample()
auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Global Grid Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
Now extract grid details that are the same globally (on each MPI rank):
periodicity, number of blocks (MPI decomposition) in total and per
Expand Down
16 changes: 8 additions & 8 deletions example/cajita_tutorial/06_local_grid/local_grid_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,6 @@ void localGridExample()
Cajita subpackage as application users will likely interact with it the
most and it includes interfaces to all other grid/mesh classes.
*/
int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Local Grid Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

// Here we partition only in x to simplify the example below.
int comm_size;
Expand All @@ -58,6 +50,14 @@ void localGridExample()
auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Local Grid Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
We create the local grid from the global grid and a halo width -
the number of cells communicated with MPI neighbor ranks. The halo width
Expand Down
17 changes: 8 additions & 9 deletions example/cajita_tutorial/07_local_mesh/local_mesh_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,6 @@ void localMeshExample()
local mesh, including ghost information.
*/

int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Local Mesh Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

using exec_space = Kokkos::DefaultHostExecutionSpace;
using device_type = exec_space::device_type;

Expand All @@ -65,6 +56,14 @@ void localMeshExample()
auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Local Mesh Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

// Create a local grid
int halo_width = 1;
auto local_grid = Cajita::createLocalGrid( global_grid, halo_width );
Expand Down
17 changes: 8 additions & 9 deletions example/cajita_tutorial/08_array/array_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,6 @@ void arrayExample()
data itself and may only be defined on a single entity type.
*/

int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Array Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

using exec_space = Kokkos::DefaultHostExecutionSpace;
using device_type = exec_space::device_type;

Expand Down Expand Up @@ -69,6 +60,14 @@ void arrayExample()
auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Array Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
An array layout includes the local grid size information together with the
dimensionality of the field data, the degrees of freedom on the mesh.
Expand Down
17 changes: 8 additions & 9 deletions example/cajita_tutorial/12_halo/halo_example.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,6 @@ void gridHaloExample()
plan and performing both scatter and gather operations.
*/

int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Grid Halo Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

using exec_space = Kokkos::DefaultHostExecutionSpace;
using device_type = exec_space::device_type;

Expand Down Expand Up @@ -81,6 +72,14 @@ void gridHaloExample()
auto global_grid = createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Grid Halo Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
Here the halo width allocated for the system is not necessarily always
fully communicated - we can use any integer value from zero to the value
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,14 +61,6 @@ void loadBalancerExample()
* application using Cajita without load balancing; the comments will be
* focused on the additions/changes due to including the load balancer.
*/
int comm_rank = -1;
MPI_Comm_rank( MPI_COMM_WORLD, &comm_rank );

if ( comm_rank == 0 )
{
std::cout << "Cajita Load Balancer Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
* The example system is 2D and its size based on the number of ranks. Every
Expand Down Expand Up @@ -99,6 +91,14 @@ void loadBalancerExample()
auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh,
is_dim_periodic, partitioner );

// Get the current rank for printing output.
int comm_rank = global_grid->blockId();
if ( comm_rank == 0 )
{
std::cout << "Cajita Load Balancer Example" << std::endl;
std::cout << " (intended to be run with MPI)\n" << std::endl;
}

/*
* The load balancer is initialized using the global grid and MPI
* communicator. The additional minimum domain size is optional and, if
Expand Down

0 comments on commit 6d490ad

Please sign in to comment.