From c79cee33c07f71130a025d8928406f0de2c49df6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 5 Nov 2024 09:27:41 +0100 Subject: [PATCH 01/14] Add functions that process output from Zoltan --- opm/grid/GraphOfGrid.cpp | 2 + opm/grid/GraphOfGridWrappers.hpp | 329 ++++++++++++++++++++++++++++--- tests/test_graphofgrid.cpp | 27 ++- 3 files changed, 320 insertions(+), 38 deletions(-) diff --git a/opm/grid/GraphOfGrid.cpp b/opm/grid/GraphOfGrid.cpp index daa4b7e69..5d723c265 100644 --- a/opm/grid/GraphOfGrid.cpp +++ b/opm/grid/GraphOfGrid.cpp @@ -31,10 +31,12 @@ namespace Opm{ template void GraphOfGrid::createGraph () { + const auto& rank = grid.comm().rank(); // load vertices (grid cells) into graph for (auto it=grid.template leafbegin<0>(); it!=grid.template leafend<0>(); ++it) { VertexProperties vertex; + vertex.nproc = rank; // get vertex's global ID int gID = grid.globalIdSet().id(*it); diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 4eb6ff871..483b7620c 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -29,6 +29,7 @@ #include #include #include +#include namespace Opm { /* @@ -37,11 +38,6 @@ namespace Opm { Additionally, parsing wells is done here. */ -#define ZOLTAN_OK 1 -#define ZOLTAN_FATAL 0 -namespace { - using ZOLTAN_ID_PTR = int*; -} /// \brief callback function for ZOLTAN_NUM_OBJ_FN /// @@ -183,7 +179,7 @@ void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, /// responsible for keeping wells disjoint. void addFutureConnectionWells (GraphOfGrid& gog, const std::unordered_map>& wells, - bool checkWellIntersections=true) + bool checkWellIntersections=true) { // create compressed lookup from cartesian. const auto& grid = gog.getGrid(); @@ -219,7 +215,28 @@ void addWellConnections (GraphOfGrid& gog, { for (const auto& w : wells) { - gog.addWell(w,checkWellIntersections); + gog.addWell(w,checkWellIntersections); + } +} + +/// \brief Correct gIDtoRank's data about well cells +/// +/// gIDtoRank is constructed with default value thisRank, other entries +/// come from Zoltan partitioner's export list that does not contain all well cells +void extendGIDtoRank (const GraphOfGrid& gog, + std::vector& gIDtoRank, + const int& thisRank = -1) +{ + for (const auto& w : gog.getWells()) + { + auto wellID = *w.begin(); + if (gIDtoRank[wellID]!=thisRank) + { + for (const auto& gID : w) + { + gIDtoRank.at(gID) = gIDtoRank.at(wellID); + } + } } } @@ -228,9 +245,15 @@ void addWellConnections (GraphOfGrid& gog, /// Output of the partitioning is missing vertices that were contracted. /// This function fills in omitted gIDs and gives them the properties /// (like process number and ownership) of their representative cell (well ID). +/// It is possible to skip wells on a given rank, because import and export +/// lists are expanded by all cells on a current rank in makeImportAndExportLists. +/// Knowing gIDtoRank speeds up the process, wells can be skipped sooner. +/// TheTuple is for ExportList and for ImportList. template void extendImportExportList (const GraphOfGrid& gog, - std::vector& cellList) + std::vector& cellList, + int skippedRank=-1, + const std::vector& gIDtoRank={}) { // using TheTuple = std::tuple; or std::tuple using CellList = std::vector; @@ -239,7 +262,22 @@ void extendImportExportList (const GraphOfGrid& gog, std::unordered_map> wellMap; for (const auto& well : gog.getWells()) { - wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + if (gIDtoRank.size()>0) + { + auto wellID = *well.begin(); + if (gIDtoRank.at(wellID)!=skippedRank) + { + wellMap[wellID] = std::make_tuple(well.begin(),well.end()); + } + } + else + { + wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + } + } + if (wellMap.size()==0) + { + return; } CellList addToList; @@ -250,31 +288,276 @@ void extendImportExportList (const GraphOfGrid& gog, auto pWell = wellMap.find(std::get<0>(cellProperties)); if (pWell!=wellMap.end()) { - const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); - for (auto pgID = begin; pgID!=end; ++pgID) + if (std::get<1>(cellProperties)!=skippedRank) { - // cells in one well have the same attributes (except ID) - if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list + const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); + for (auto pgID = begin; pgID!=end; ++pgID) { - TheTuple wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; - addToList.push_back(wellCell); + // cells in one well have the same attributes (except ID) + if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list + { + TheTuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); + } } } wellMap.erase(pWell); - } - - if (wellMap.empty()) - { - break; + if (wellMap.empty()) + { + break; + } } } - std::sort(addToList.begin(),addToList.end(),[](const auto& a, const auto& b){return std::get<0>(a)(b);}); + + // add new cells to the cellList and sort it. It is assumed that cellList starts sorted. + auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; + std::sort(addToList.begin(),addToList.end(),compareTuple); auto origSize = cellList.size(); auto totsize = origSize+addToList.size(); cellList.reserve(totsize); cellList.insert(cellList.end(),addToList.begin(),addToList.end()); - std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end()); + std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end(),compareTuple); +} + +/// \brief Find to which ranks wells were assigned +/// +/// returns the vector of ranks, ordering is given by wellConnections +/// \param gIDtoRank Takes global ID and returns rank +/// \param wellConnections Has global IDs of well cells +std::vector getWellRanks(const std::vector& gIDtoRank, + const Dune::cpgrid::WellConnections& wellConnections) +{ + std::vector wellIndices(wellConnections.size()); + for (std::size_t wellIndex = 0; wellIndex < wellConnections.size(); ++wellIndex) + { + int wellID = *(wellConnections[wellIndex].begin()); + wellIndices[wellIndex] = gIDtoRank[wellID]; + } + return wellIndices; +} + +/// \brief Get rank-specific information about which wells are present +/// +/// \param wells Vector of wells containing names (and other...) +/// \param wellRanks Tells on which (single) rank is the well placed, +/// ordering in the vector is given by wells +/// \param cc The communication object +/// \param root Rank holding the information about the grid +/// @return vector of pairs string-bool that hold the name of the well +/// and whether it is situated on this process rank +/// +/// This function only gets the information from wellRanks into proper +/// format to call computeParallelWells. +std::vector> +wellsOnThisRank(const std::vector& wells, + const std::vector& wellRanks, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root) +{ + auto numProcs = cc.size(); + std::vector> wells_on_proc(numProcs); + for (std::size_t i=0; i +std::tuple, + std::vector>, + std::vector >, + std::vector > > +makeImportAndExportLists(const GraphOfGrid& gog, + const Dune::Communication& cc, + const std::vector * wells, + const Dune::cpgrid::WellConnections& wellConnections, + int root, + int numExport, + int numImport, + [[maybe_unused]] const Id* exportLocalGids, + const Id* exportGlobalGids, + const int* exportToPart, + const Id* importGlobalGids) +{ + const auto& cpgrid = gog.getGrid(); + int size = cpgrid.numCells(); + int rank = cc.rank(); + std::vector gIDtoRank(size, rank); + std::vector > wellsOnProc; + + // List entry: process to export to, (global) index, process rank, attribute there (not needed?) + std::vector> myExportList(numExport); + // List entry: process to import from, global index, process rank, attribute here, local index (determined later) + std::vector> myImportList(numImport); + myExportList.reserve(1.2*myExportList.size()); + myImportList.reserve(1.2*myImportList.size()); + using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; + + for ( int i=0; i < numExport; ++i ) + { + gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; + myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); + } + // partitioner sees only one cell per well, modify remaining + extendGIDtoRank(gog,gIDtoRank,rank); + + for ( int i=0; i < numImport; ++i ) + { + myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); + } + + + // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. + for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) + { + if ( gIDtoRank[i] == rank ) + { + myExportList.emplace_back(i, rank, static_cast(AttributeSet::owner) ); + myImportList.emplace_back(i, rank, static_cast(AttributeSet::owner), -1 ); + } + } + std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); + std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); + + // Complete the lists by adding cells that were contracted in the graph. + // All cells on the rank "rank" were added before. + extendImportExportList(gog,myImportList,rank,gIDtoRank); + extendImportExportList(gog,myExportList,rank,gIDtoRank); + + std::vector> parallel_wells; + if( wells ) + { + auto wellRanks = getWellRanks(gIDtoRank, wellConnections); + parallel_wells = wellsOnThisRank(*wells, wellRanks, cc, root); + } + return std::make_tuple(gIDtoRank, parallel_wells, myExportList, myImportList); +} + +namespace { +void setDefaultZoltanParameters(Zoltan_Struct* zz) +{ + Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH"); + Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION"); + Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); + Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "0"); + Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL"); + Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM","1"); + Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "1"); + Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35"); /* 0-remove all, 1-remove none */ + Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0"); +#ifndef NDEBUG + Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); +#elif + Zoltan_Set_Param(zz, "CHECK_GRAPH", "0"); +#endif +} + +} // anon namespace + +/// \brief Call Zoltan partitioner on GraphOfGrid +/// +/// GraphOfGrid represents a well by one vertex, so wells can not be +/// spread over several processes. +/// transmissiblities are currently not supported, but are queued +std::tuple, std::vector>, + std::vector >, + std::vector >, + Dune::cpgrid::WellConnections> +zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, + const std::vector * wells, + const std::unordered_map>& possibleFutureConnections, + [[maybe_unused]] const double* transmissibilities, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + [[maybe_unused]] Dune::EdgeWeightMethod edgeWeightsMethod, + int root, + const double zoltanImbalanceTol, + const std::map& params) +{ + int rc = ZOLTAN_OK - 1; + float ver = 0; + struct Zoltan_Struct *zz; + int changes, numGidEntries, numLidEntries, numImport, numExport; + ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids; + int *importProcs, *importToPart, *exportProcs, *exportToPart; + int argc=0; + char** argv = 0 ; + rc = Zoltan_Initialize(argc, argv, &ver); + zz = Zoltan_Create(cc); + if ( rc != ZOLTAN_OK ) + { + OPM_THROW(std::runtime_error, "Could not initialize Zoltan!"); + } + setDefaultZoltanParameters(zz); + Zoltan_Set_Param(zz, "IMBALANCE_TOL", std::to_string(zoltanImbalanceTol).c_str()); + for (const auto& [key, value] : params) + Zoltan_Set_Param(zz, key.c_str(), value.c_str()); + + // prepare graph and contract well cells + GraphOfGrid gog(grid); + Dune::cpgrid::WellConnections wellConnections(*wells,possibleFutureConnections,grid); + addWellConnections(gog,wellConnections); + + // call partitioner + setGraphOfGridZoltanGraphFunctions(zz,gog); + rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */ + &changes, /* 1 if partitioning was changed, 0 otherwise */ + &numGidEntries, /* Number of integers used for a global ID */ + &numLidEntries, /* Number of integers used for a local ID */ + &numImport, /* Number of vertices to be sent to me */ + &importGlobalGids, /* Global IDs of vertices to be sent to me */ + &importLocalGids, /* Local IDs of vertices to be sent to me */ + &importProcs, /* Process rank for source of each incoming vertex */ + &importToPart, /* New partition for each incoming vertex */ + &numExport, /* Number of vertices I must send to other processes*/ + &exportGlobalGids, /* Global IDs of the vertices I must send */ + &exportLocalGids, /* Local IDs of the vertices I must send */ + &exportProcs, /* Process to which I send each of the vertices */ + &exportToPart); /* Partition to which each vertex will belong */ + + // arrange output into tuples and add well cells + auto importExportLists = makeImportAndExportLists(gog, + cc, + wells, + wellConnections, + root, + numExport, + numImport, + exportLocalGids, + exportGlobalGids, + exportProcs, + importGlobalGids); + + Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, &exportProcs, &exportToPart); + Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, &importProcs, &importToPart); + Zoltan_Destroy(&zz); + + // add wellConnections to the importExportLists and return it + auto result = std::tuple(std::move(std::get<0>(importExportLists)), + std::move(std::get<1>(importExportLists)), + std::move(std::get<2>(importExportLists)), + std::move(std::get<3>(importExportLists)), + wellConnections); + return result; } } // end namespace Opm diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index 1616615a6..c336d947a 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -31,7 +31,6 @@ #define BOOST_TEST_NO_MAIN #include #include -#include #include #include @@ -118,7 +117,7 @@ BOOST_AUTO_TEST_CASE(WrapperForZoltan) BOOST_REQUIRE(err==ZOLTAN_OK); BOOST_REQUIRE(nVer == 60); - std::vector gIDs(nVer); + std::vector gIDs(nVer); std::vector objWeights(nVer); getGraphOfGridVerticesList(&gog, 1, 1, gIDs.data(), nullptr, 1, objWeights.data(), &err); BOOST_REQUIRE(err==ZOLTAN_OK); @@ -141,7 +140,7 @@ BOOST_AUTO_TEST_CASE(WrapperForZoltan) } BOOST_REQUIRE(nEdges==266); - std::vector nborGIDs(nEdges); + std::vector nborGIDs(nEdges); std::vector nborProc(nEdges); std::vector edgeWeights(nEdges); getGraphOfGridEdgeList(&gog, 1, 1, nVer, gIDs.data(), nullptr, numEdges.data(), nborGIDs.data(), nborProc.data(), 1, edgeWeights.data(), &err); @@ -150,9 +149,6 @@ BOOST_AUTO_TEST_CASE(WrapperForZoltan) BOOST_REQUIRE(edgeWeights[203]==1.); // all are 1., no vertices were contracted numEdges[16] = 8; - std::string message("Expecting an error message from getGraphOfGridEdgeList, the vertex " - + std::to_string(gIDs[16]) + std::string(" has a wrong number of edges.")); - Opm::OpmLog::info(message); getGraphOfGridEdgeList(&gog, 1, 1, nVer, gIDs.data(), nullptr, numEdges.data(), nborGIDs.data(), nborProc.data(), 1, edgeWeights.data(), &err); BOOST_REQUIRE(err==ZOLTAN_FATAL); } @@ -176,10 +172,10 @@ BOOST_AUTO_TEST_CASE(GraphWithWell) BOOST_REQUIRE(err==ZOLTAN_OK); BOOST_REQUIRE(nVer == 49); - std::vector gIDs(nVer); + std::vector gIDs(nVer); std::vector objWeights(nVer); getGraphOfGridVerticesList(&gog, 1, 1, gIDs.data(), nullptr, 1, objWeights.data(), &err); - BOOST_REQUIRE(err=ZOLTAN_OK); + BOOST_REQUIRE(err==ZOLTAN_OK); for (int i=0; i gIDs(nVer); + std::vector gIDs(nVer); std::vector objWeights(nVer); getGraphOfGridVerticesList(&gog, 1, 1, gIDs.data(), nullptr, 1, objWeights.data(), &err); - BOOST_REQUIRE(err=ZOLTAN_OK); + BOOST_REQUIRE(err==ZOLTAN_OK); for (int i=0; i numEdges(nOut); - std::vector gID{12,0,54}; + std::vector gID{12,0,54}; getGraphOfGridNumEdges(&gog, 1, 1, nOut, gID.data(), nullptr, numEdges.data(), &err); BOOST_REQUIRE(err==ZOLTAN_OK); BOOST_REQUIRE(numEdges[0]==12); @@ -263,7 +259,7 @@ BOOST_AUTO_TEST_CASE(IntersectingWells) BOOST_REQUIRE(numEdges[2]==3); int nEdges = 41; - std::vector nborGIDs(nEdges); + std::vector nborGIDs(nEdges); std::vector nborProc(nEdges); std::vector edgeWeights(nEdges); getGraphOfGridEdgeList(&gog, 1, 1, nOut, gID.data(), nullptr, numEdges.data(), nborGIDs.data(), nborProc.data(), 1, edgeWeights.data(), &err); @@ -397,10 +393,10 @@ BOOST_AUTO_TEST_CASE(addWellConnections) int nVer = getGraphOfGridNumVertices(&gog,&err); BOOST_REQUIRE(err==ZOLTAN_OK); BOOST_REQUIRE(nVer == 4); - std::vector gIDs(nVer); + std::vector gIDs(nVer); std::vector objWeights(nVer); getGraphOfGridVerticesList(&gog, 1, 1, gIDs.data(), nullptr, 1, objWeights.data(), &err); - BOOST_REQUIRE(err=ZOLTAN_OK); + BOOST_REQUIRE(err==ZOLTAN_OK); std::sort(gIDs.begin(),gIDs.end()); BOOST_REQUIRE(gIDs[0]==0 && gIDs[1]==1 && gIDs[2]==3 && gIDs[3]==7); std::vector numEdges(nVer); @@ -408,7 +404,8 @@ BOOST_AUTO_TEST_CASE(addWellConnections) BOOST_REQUIRE(err==ZOLTAN_OK); BOOST_REQUIRE(numEdges[0]==3 && numEdges[1]==2 && numEdges[2]==3 && numEdges[3]==2); int nEdges = 10; // sum of numEdges[i] - std::vector nborGIDs(nEdges), nborProc(nEdges); + std::vector nborGIDs(nEdges); + std::vector nborProc(nEdges); std::vector edgeWeights(nEdges); getGraphOfGridEdgeList(&gog, 1, 1, nVer, gIDs.data(), nullptr, numEdges.data(), nborGIDs.data(), nborProc.data(), 1, edgeWeights.data(), &err); BOOST_REQUIRE(err==ZOLTAN_OK); From d5104ed55cf4d3d6e244a6f81830c5edd33afd23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 5 Nov 2024 15:08:31 +0100 Subject: [PATCH 02/14] Add GraphOfGrid partitioner to CpGrid::scatterGrid options and fix linking issues. --- CMakeLists_files.cmake | 1 + opm/grid/GraphOfGridWrappers.cpp | 493 +++++++++++++++++++++++++++++++ opm/grid/GraphOfGridWrappers.hpp | 372 ++--------------------- opm/grid/common/GridEnums.hpp | 4 +- opm/grid/cpgrid/CpGrid.cpp | 10 + tests/test_graphofgrid.cpp | 2 + 6 files changed, 528 insertions(+), 354 deletions(-) create mode 100644 opm/grid/GraphOfGridWrappers.cpp diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index c1bb7e3a0..acafebe73 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -48,6 +48,7 @@ list (APPEND MAIN_SOURCE_FILES opm/grid/ColumnExtract.cpp opm/grid/FaceQuadrature.cpp opm/grid/GraphOfGrid.cpp + opm/grid/GraphOfGridWrappers.cpp opm/grid/GridHelpers.cpp opm/grid/GridManager.cpp opm/grid/GridUtilities.cpp diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp new file mode 100644 index 000000000..db0ef2f8c --- /dev/null +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -0,0 +1,493 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/* + Copyright 2024 Equinor ASA. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . + + Consult the COPYING file in the top-level source directory of this + module for the precise wording of the license and the list of + copyright holders. +*/ + +#include +#include "GraphOfGridWrappers.hpp" + +namespace Opm { + +#if HAVE_MPI +int getGraphOfGridNumVertices(void* pGraph, int *err) +{ + const GraphOfGrid& gog = *static_cast*>(pGraph); + int size = gog.size(); + *err = ZOLTAN_OK; + return size; +} + +void getGraphOfGridVerticesList(void* pGraph, + [[maybe_unused]] int dimGlobalID, + [[maybe_unused]] int dimLocalID, + ZOLTAN_ID_PTR gIDs, + [[maybe_unused]] ZOLTAN_ID_PTR lIDs, + int weightDim, + float *objWeights, + int *err) +{ + assert(dimGlobalID==1); // ID is a single int + assert(weightDim==1); // vertex weight is a single float + const GraphOfGrid& gog = *static_cast*>(pGraph); + int i=0; + for (const auto& v : gog) + { + gIDs[i] = v.first; + // lIDs are left unused + objWeights[i] = v.second.weight; + ++i; + } + *err = ZOLTAN_OK; +} + +void getGraphOfGridNumEdges(void *pGraph, + [[maybe_unused]] int dimGlobalID, + [[maybe_unused]] int dimLocalID, + int numCells, + ZOLTAN_ID_PTR gIDs, + [[maybe_unused]] ZOLTAN_ID_PTR lIDs, + int *numEdges, + int *err) +{ + assert(dimGlobalID==1); // ID is a single int + const GraphOfGrid& gog = *static_cast*>(pGraph); + for (int i=0; i& gog = *static_cast*>(pGraph); + int id=0; + for (int i=0; i +void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, + const GraphOfGrid& gog, + bool pretendNull) +{ + GraphOfGrid* pGraph = const_cast*>(&gog); + if (pretendNull) + { + Zoltan_Set_Num_Obj_Fn(zz, Dune::cpgrid::getNullNumCells, pGraph); + Zoltan_Set_Obj_List_Fn(zz, Dune::cpgrid::getNullVertexList, pGraph); + Zoltan_Set_Num_Edges_Multi_Fn(zz, Dune::cpgrid::getNullNumEdgesList, pGraph); + Zoltan_Set_Edge_List_Multi_Fn(zz, Dune::cpgrid::getNullEdgeList, pGraph); + } + else + { + Zoltan_Set_Num_Obj_Fn(zz, getGraphOfGridNumVertices, pGraph); + Zoltan_Set_Obj_List_Fn(zz, getGraphOfGridVerticesList, pGraph); + Zoltan_Set_Num_Edges_Multi_Fn(zz, getGraphOfGridNumEdges, pGraph); + Zoltan_Set_Edge_List_Multi_Fn(zz, getGraphOfGridEdgeList, pGraph); + } +} +#endif // HAVE_MPI + +void addFutureConnectionWells (GraphOfGrid& gog, + const std::unordered_map>& wells, + bool checkWellIntersections) +{ + // create compressed lookup from cartesian. + const auto& grid = gog.getGrid(); + const auto& cpgdim = grid.logicalCartesianSize(); + std::vector cartesian_to_compressed(cpgdim[0]*cpgdim[1]*cpgdim[2], -1); + for( int i=0; i < grid.numCells(); ++i ) + { + cartesian_to_compressed[grid.globalCell()[i]] = i; + } + + for (const auto& w: wells) + { + std::set wellsgID; + for (const int& cell : w.second) + { + int gID = cartesian_to_compressed.at(cell); + assert(gID!=-1); // well should be an active cell + wellsgID.insert(gID); + } + gog.addWell(wellsgID,checkWellIntersections); + } +} + +void addWellConnections (GraphOfGrid& gog, + const Dune::cpgrid::WellConnections& wells, + bool checkWellIntersections) +{ + for (const auto& w : wells) + { + gog.addWell(w,checkWellIntersections); + } +} + +void extendGIDtoRank (const GraphOfGrid& gog, + std::vector& gIDtoRank, + const int& thisRank) +{ + for (const auto& w : gog.getWells()) + { + auto wellID = *w.begin(); + if (gIDtoRank[wellID]!=thisRank) + { + for (const auto& gID : w) + { + gIDtoRank.at(gID) = gIDtoRank.at(wellID); + } + } + } +} + +template +void extendImportExportList (const GraphOfGrid& gog, + std::vector& cellList, + int skippedRank, + const std::vector& gIDtoRank) +{ + // using TheTuple = std::tuple; or std::tuple + using CellList = std::vector; + // make a list of wells for easy identification. Contains ID, begin, end + using iter = std::set::const_iterator; + std::unordered_map> wellMap; + for (const auto& well : gog.getWells()) + { + if (gIDtoRank.size()>0) + { + auto wellID = *well.begin(); + if (gIDtoRank.at(wellID)!=skippedRank) + { + wellMap[wellID] = std::make_tuple(well.begin(),well.end()); + } + } + else + { + wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + } + } + if (wellMap.size()==0) + { + return; + } + + CellList addToList; + // iterate once through the original cellList + for (const auto& cellProperties : cellList) + { + // if a cell is in any well, add cells of the well to cellList + auto pWell = wellMap.find(std::get<0>(cellProperties)); + if (pWell!=wellMap.end()) + { + if (std::get<1>(cellProperties)!=skippedRank) + { + const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); + for (auto pgID = begin; pgID!=end; ++pgID) + { + // cells in one well have the same attributes (except ID) + if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list + { + TheTuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); + } + } + } + wellMap.erase(pWell); + if (wellMap.empty()) + { + break; + } + } + } + + // add new cells to the cellList and sort it. It is assumed that cellList starts sorted. + auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; + std::sort(addToList.begin(),addToList.end(),compareTuple); + auto origSize = cellList.size(); + auto totsize = origSize+addToList.size(); + cellList.reserve(totsize); + cellList.insert(cellList.end(),addToList.begin(),addToList.end()); + std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end(),compareTuple); +} + +std::vector getWellRanks(const std::vector& gIDtoRank, + const Dune::cpgrid::WellConnections& wellConnections) +{ + std::vector wellIndices(wellConnections.size()); + for (std::size_t wellIndex = 0; wellIndex < wellConnections.size(); ++wellIndex) + { + int wellID = *(wellConnections[wellIndex].begin()); + wellIndices[wellIndex] = gIDtoRank[wellID]; + } + return wellIndices; +} + +#if HAVE_MPI +std::vector> +wellsOnThisRank(const std::vector& wells, + const std::vector& wellRanks, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root) +{ + auto numProcs = cc.size(); + std::vector> wells_on_proc(numProcs); + for (std::size_t i=0; i +std::tuple, + std::vector>, + std::vector >, + std::vector > > +makeImportAndExportLists(const GraphOfGrid& gog, + const Dune::Communication& cc, + const std::vector * wells, + const Dune::cpgrid::WellConnections& wellConnections, + int root, + int numExport, + int numImport, + [[maybe_unused]] const Id* exportLocalGids, + const Id* exportGlobalGids, + const int* exportToPart, + const Id* importGlobalGids) +{ + const auto& cpgrid = gog.getGrid(); + int size = cpgrid.numCells(); + int rank = cc.rank(); + std::vector gIDtoRank(size, rank); + std::vector > wellsOnProc; + + // List entry: process to export to, (global) index, process rank, attribute there (not needed?) + std::vector> myExportList(numExport); + // List entry: process to import from, global index, process rank, attribute here, local index (determined later) + std::vector> myImportList(numImport); + myExportList.reserve(1.2*myExportList.size()); + myImportList.reserve(1.2*myImportList.size()); + using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; + + for ( int i=0; i < numExport; ++i ) + { + gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; + myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); + } + // partitioner sees only one cell per well, modify remaining + extendGIDtoRank(gog,gIDtoRank,rank); + + for ( int i=0; i < numImport; ++i ) + { + myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); + } + + + // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. + for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) + { + if ( gIDtoRank[i] == rank ) + { + myExportList.emplace_back(i, rank, static_cast(AttributeSet::owner) ); + myImportList.emplace_back(i, rank, static_cast(AttributeSet::owner), -1 ); + } + } + std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); + std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); + + // Complete the lists by adding cells that were contracted in the graph. + // All cells on the rank "rank" were added before. + extendImportExportList(gog,myImportList,rank,gIDtoRank); + extendImportExportList(gog,myExportList,rank,gIDtoRank); + + std::vector> parallel_wells; + if( wells ) + { + auto wellRanks = getWellRanks(gIDtoRank, wellConnections); + parallel_wells = wellsOnThisRank(*wells, wellRanks, cc, root); + } + return std::make_tuple(gIDtoRank, parallel_wells, myExportList, myImportList); +} + +namespace { +void setDefaultZoltanParameters(Zoltan_Struct* zz) +{ + Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH"); + Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION"); + Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); + Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "0"); + Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL"); + Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM","1"); + Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "1"); + Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35"); /* 0-remove all, 1-remove none */ + Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0"); +#ifndef NDEBUG + Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); +#else + Zoltan_Set_Param(zz, "CHECK_GRAPH", "0"); +#endif +} + +} // anon namespace + +std::tuple, std::vector>, + std::vector >, + std::vector >, + Dune::cpgrid::WellConnections> +zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, + const std::vector * wells, + const std::unordered_map>& possibleFutureConnections, + [[maybe_unused]] const double* transmissibilities, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + [[maybe_unused]] Dune::EdgeWeightMethod edgeWeightsMethod, + int root, + const double zoltanImbalanceTol, + const std::map& params) +{ + int rc = ZOLTAN_OK - 1; + float ver = 0; + struct Zoltan_Struct *zz; + int changes, numGidEntries, numLidEntries, numImport, numExport; + ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids; + int *importProcs, *importToPart, *exportProcs, *exportToPart; + int argc=0; + char** argv = 0 ; + rc = Zoltan_Initialize(argc, argv, &ver); + zz = Zoltan_Create(cc); + if ( rc != ZOLTAN_OK ) + { + OPM_THROW(std::runtime_error, "Could not initialize Zoltan!"); + } + setDefaultZoltanParameters(zz); + Zoltan_Set_Param(zz, "IMBALANCE_TOL", std::to_string(zoltanImbalanceTol).c_str()); + for (const auto& [key, value] : params) + Zoltan_Set_Param(zz, key.c_str(), value.c_str()); + + // root process has the whole grid, other ranks nothing + bool partitionIsEmpty = cc.rank()!=root; + + // prepare graph and contract well cells + GraphOfGrid gog(grid); + Dune::cpgrid::WellConnections wellConnections(*wells,possibleFutureConnections,grid); + addWellConnections(gog,wellConnections); + + // call partitioner + setGraphOfGridZoltanGraphFunctions(zz,gog,partitionIsEmpty); + rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */ + &changes, /* 1 if partitioning was changed, 0 otherwise */ + &numGidEntries, /* Number of integers used for a global ID */ + &numLidEntries, /* Number of integers used for a local ID */ + &numImport, /* Number of vertices to be sent to me */ + &importGlobalGids, /* Global IDs of vertices to be sent to me */ + &importLocalGids, /* Local IDs of vertices to be sent to me */ + &importProcs, /* Process rank for source of each incoming vertex */ + &importToPart, /* New partition for each incoming vertex */ + &numExport, /* Number of vertices I must send to other processes*/ + &exportGlobalGids, /* Global IDs of the vertices I must send */ + &exportLocalGids, /* Local IDs of the vertices I must send */ + &exportProcs, /* Process to which I send each of the vertices */ + &exportToPart); /* Partition to which each vertex will belong */ + + // arrange output into tuples and add well cells + auto importExportLists = makeImportAndExportLists(gog, + cc, + wells, + wellConnections, + root, + numExport, + numImport, + exportLocalGids, + exportGlobalGids, + exportProcs, + importGlobalGids); + + Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, &exportProcs, &exportToPart); + Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, &importProcs, &importToPart); + Zoltan_Destroy(&zz); + + // add wellConnections to the importExportLists and return it + auto result = std::tuple(std::move(std::get<0>(importExportLists)), + std::move(std::get<1>(importExportLists)), + std::move(std::get<2>(importExportLists)), + std::move(std::get<3>(importExportLists)), + wellConnections); + return result; +} +#endif // HAVE_MPI + +// explicit template instantiations +template void extendImportExportList(const GraphOfGrid&, + std::vector >& , + int, + const std::vector&); +template void extendImportExportList(const GraphOfGrid&, + std::vector >& , + int, + const std::vector&); + +} // end namespace Opm diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 483b7620c..dbc8c21c0 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -29,7 +29,7 @@ #include #include #include -#include +#include // defines Zoltan and null-callback-functions namespace Opm { /* @@ -39,16 +39,11 @@ namespace Opm { Additionally, parsing wells is done here. */ +#if HAVE_MPI /// \brief callback function for ZOLTAN_NUM_OBJ_FN /// /// returns the number of vertices in the graph -int getGraphOfGridNumVertices(void* pGraph, int *err) -{ - const GraphOfGrid& gog = *static_cast*>(pGraph); - int size = gog.size(); - *err = ZOLTAN_OK; - return size; -} +int getGraphOfGridNumVertices(void* pGraph, int *err); /// \brief callback function for ZOLTAN_OBJ_LIST_FN /// @@ -61,21 +56,7 @@ void getGraphOfGridVerticesList(void* pGraph, [[maybe_unused]] ZOLTAN_ID_PTR lIDs, int weightDim, float *objWeights, - int *err) -{ - assert(dimGlobalID==1); // ID is a single int - assert(weightDim==1); // vertex weight is a single float - const GraphOfGrid& gog = *static_cast*>(pGraph); - int i=0; - for (const auto& v : gog) - { - gIDs[i] = v.first; - // lIDs are left unused - objWeights[i] = v.second.weight; - ++i; - } - *err = ZOLTAN_OK; -} + int *err); /// \brief callback function for ZOLTAN_NUM_EDGES_MULTI_FN /// @@ -88,25 +69,7 @@ void getGraphOfGridNumEdges(void *pGraph, ZOLTAN_ID_PTR gIDs, [[maybe_unused]] ZOLTAN_ID_PTR lIDs, int *numEdges, - int *err) -{ - assert(dimGlobalID==1); // ID is a single int - const GraphOfGrid& gog = *static_cast*>(pGraph); - for (int i=0; i& gog = *static_cast*>(pGraph); - int id=0; - for (int i=0; i void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, - const GraphOfGrid& gog) -{ - GraphOfGrid* pGraph = const_cast*>(&gog); - Zoltan_Set_Num_Obj_Fn(zz, getGraphOfGridNumVertices, pGraph); - Zoltan_Set_Obj_List_Fn(zz, getGraphOfGridVerticesList, pGraph); - Zoltan_Set_Num_Edges_Multi_Fn(zz, getGraphOfGridNumEdges, pGraph); - Zoltan_Set_Edge_List_Multi_Fn(zz, getGraphOfGridEdgeList, pGraph); -} + const GraphOfGrid& gog, + bool pretendNull); +#endif /// \brief Adds well to the GraphOfGrid /// @@ -179,29 +109,7 @@ void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, /// responsible for keeping wells disjoint. void addFutureConnectionWells (GraphOfGrid& gog, const std::unordered_map>& wells, - bool checkWellIntersections=true) -{ - // create compressed lookup from cartesian. - const auto& grid = gog.getGrid(); - const auto& cpgdim = grid.logicalCartesianSize(); - std::vector cartesian_to_compressed(cpgdim[0]*cpgdim[1]*cpgdim[2], -1); - for( int i=0; i < grid.numCells(); ++i ) - { - cartesian_to_compressed[grid.globalCell()[i]] = i; - } - - for (const auto& w: wells) - { - std::set wellsgID; - for (const int& cell : w.second) - { - int gID = cartesian_to_compressed.at(cell); - assert(gID!=-1); // well should be an active cell - wellsgID.insert(gID); - } - gog.addWell(wellsgID,checkWellIntersections); - } -} + bool checkWellIntersections=true); /// \brief Add WellConnections to the GraphOfGrid /// @@ -211,13 +119,7 @@ void addFutureConnectionWells (GraphOfGrid& gog, /// responsible for keeping wells disjoint. void addWellConnections (GraphOfGrid& gog, const Dune::cpgrid::WellConnections& wells, - bool checkWellIntersections=true) -{ - for (const auto& w : wells) - { - gog.addWell(w,checkWellIntersections); - } -} + bool checkWellIntersections=true); /// \brief Correct gIDtoRank's data about well cells /// @@ -225,20 +127,7 @@ void addWellConnections (GraphOfGrid& gog, /// come from Zoltan partitioner's export list that does not contain all well cells void extendGIDtoRank (const GraphOfGrid& gog, std::vector& gIDtoRank, - const int& thisRank = -1) -{ - for (const auto& w : gog.getWells()) - { - auto wellID = *w.begin(); - if (gIDtoRank[wellID]!=thisRank) - { - for (const auto& gID : w) - { - gIDtoRank.at(gID) = gIDtoRank.at(wellID); - } - } - } -} + const int& thisRank = -1); /// \brief Add well cells' global IDs to the list /// @@ -253,72 +142,7 @@ template void extendImportExportList (const GraphOfGrid& gog, std::vector& cellList, int skippedRank=-1, - const std::vector& gIDtoRank={}) -{ - // using TheTuple = std::tuple; or std::tuple - using CellList = std::vector; - // make a list of wells for easy identification. Contains ID, begin, end - using iter = std::set::const_iterator; - std::unordered_map> wellMap; - for (const auto& well : gog.getWells()) - { - if (gIDtoRank.size()>0) - { - auto wellID = *well.begin(); - if (gIDtoRank.at(wellID)!=skippedRank) - { - wellMap[wellID] = std::make_tuple(well.begin(),well.end()); - } - } - else - { - wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); - } - } - if (wellMap.size()==0) - { - return; - } - - CellList addToList; - // iterate once through the original cellList - for (const auto& cellProperties : cellList) - { - // if a cell is in any well, add cells of the well to cellList - auto pWell = wellMap.find(std::get<0>(cellProperties)); - if (pWell!=wellMap.end()) - { - if (std::get<1>(cellProperties)!=skippedRank) - { - const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); - for (auto pgID = begin; pgID!=end; ++pgID) - { - // cells in one well have the same attributes (except ID) - if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list - { - TheTuple wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; - addToList.push_back(wellCell); - } - } - } - wellMap.erase(pWell); - if (wellMap.empty()) - { - break; - } - } - } - - // add new cells to the cellList and sort it. It is assumed that cellList starts sorted. - auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; - std::sort(addToList.begin(),addToList.end(),compareTuple); - auto origSize = cellList.size(); - auto totsize = origSize+addToList.size(); - cellList.reserve(totsize); - cellList.insert(cellList.end(),addToList.begin(),addToList.end()); - std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end(),compareTuple); -} + const std::vector& gIDtoRank={}); /// \brief Find to which ranks wells were assigned /// @@ -326,17 +150,9 @@ void extendImportExportList (const GraphOfGrid& gog, /// \param gIDtoRank Takes global ID and returns rank /// \param wellConnections Has global IDs of well cells std::vector getWellRanks(const std::vector& gIDtoRank, - const Dune::cpgrid::WellConnections& wellConnections) -{ - std::vector wellIndices(wellConnections.size()); - for (std::size_t wellIndex = 0; wellIndex < wellConnections.size(); ++wellIndex) - { - int wellID = *(wellConnections[wellIndex].begin()); - wellIndices[wellIndex] = gIDtoRank[wellID]; - } - return wellIndices; -} + const Dune::cpgrid::WellConnections& wellConnections); +#if HAVE_MPI /// \brief Get rank-specific information about which wells are present /// /// \param wells Vector of wells containing names (and other...) @@ -353,16 +169,7 @@ std::vector> wellsOnThisRank(const std::vector& wells, const std::vector& wellRanks, const Dune::cpgrid::CpGridDataTraits::Communication& cc, - int root) -{ - auto numProcs = cc.size(); - std::vector> wells_on_proc(numProcs); - for (std::size_t i=0; i& gog, [[maybe_unused]] const Id* exportLocalGids, const Id* exportGlobalGids, const int* exportToPart, - const Id* importGlobalGids) -{ - const auto& cpgrid = gog.getGrid(); - int size = cpgrid.numCells(); - int rank = cc.rank(); - std::vector gIDtoRank(size, rank); - std::vector > wellsOnProc; - - // List entry: process to export to, (global) index, process rank, attribute there (not needed?) - std::vector> myExportList(numExport); - // List entry: process to import from, global index, process rank, attribute here, local index (determined later) - std::vector> myImportList(numImport); - myExportList.reserve(1.2*myExportList.size()); - myImportList.reserve(1.2*myImportList.size()); - using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; - - for ( int i=0; i < numExport; ++i ) - { - gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; - myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); - } - // partitioner sees only one cell per well, modify remaining - extendGIDtoRank(gog,gIDtoRank,rank); - - for ( int i=0; i < numImport; ++i ) - { - myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); - } - - - // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. - for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) - { - if ( gIDtoRank[i] == rank ) - { - myExportList.emplace_back(i, rank, static_cast(AttributeSet::owner) ); - myImportList.emplace_back(i, rank, static_cast(AttributeSet::owner), -1 ); - } - } - std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); - std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); - - // Complete the lists by adding cells that were contracted in the graph. - // All cells on the rank "rank" were added before. - extendImportExportList(gog,myImportList,rank,gIDtoRank); - extendImportExportList(gog,myExportList,rank,gIDtoRank); - - std::vector> parallel_wells; - if( wells ) - { - auto wellRanks = getWellRanks(gIDtoRank, wellConnections); - parallel_wells = wellsOnThisRank(*wells, wellRanks, cc, root); - } - return std::make_tuple(gIDtoRank, parallel_wells, myExportList, myImportList); -} - -namespace { -void setDefaultZoltanParameters(Zoltan_Struct* zz) -{ - Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH"); - Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION"); - Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); - Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "0"); - Zoltan_Set_Param(zz, "RETURN_LISTS", "ALL"); - Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM","1"); - Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "1"); - Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", ".35"); /* 0-remove all, 1-remove none */ - Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0"); -#ifndef NDEBUG - Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); -#elif - Zoltan_Set_Param(zz, "CHECK_GRAPH", "0"); -#endif -} - -} // anon namespace + const Id* importGlobalGids); /// \brief Call Zoltan partitioner on GraphOfGrid /// @@ -491,74 +223,8 @@ zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, [[maybe_unused]] Dune::EdgeWeightMethod edgeWeightsMethod, int root, const double zoltanImbalanceTol, - const std::map& params) -{ - int rc = ZOLTAN_OK - 1; - float ver = 0; - struct Zoltan_Struct *zz; - int changes, numGidEntries, numLidEntries, numImport, numExport; - ZOLTAN_ID_PTR importGlobalGids, importLocalGids, exportGlobalGids, exportLocalGids; - int *importProcs, *importToPart, *exportProcs, *exportToPart; - int argc=0; - char** argv = 0 ; - rc = Zoltan_Initialize(argc, argv, &ver); - zz = Zoltan_Create(cc); - if ( rc != ZOLTAN_OK ) - { - OPM_THROW(std::runtime_error, "Could not initialize Zoltan!"); - } - setDefaultZoltanParameters(zz); - Zoltan_Set_Param(zz, "IMBALANCE_TOL", std::to_string(zoltanImbalanceTol).c_str()); - for (const auto& [key, value] : params) - Zoltan_Set_Param(zz, key.c_str(), value.c_str()); - - // prepare graph and contract well cells - GraphOfGrid gog(grid); - Dune::cpgrid::WellConnections wellConnections(*wells,possibleFutureConnections,grid); - addWellConnections(gog,wellConnections); - - // call partitioner - setGraphOfGridZoltanGraphFunctions(zz,gog); - rc = Zoltan_LB_Partition(zz, /* input (all remaining fields are output) */ - &changes, /* 1 if partitioning was changed, 0 otherwise */ - &numGidEntries, /* Number of integers used for a global ID */ - &numLidEntries, /* Number of integers used for a local ID */ - &numImport, /* Number of vertices to be sent to me */ - &importGlobalGids, /* Global IDs of vertices to be sent to me */ - &importLocalGids, /* Local IDs of vertices to be sent to me */ - &importProcs, /* Process rank for source of each incoming vertex */ - &importToPart, /* New partition for each incoming vertex */ - &numExport, /* Number of vertices I must send to other processes*/ - &exportGlobalGids, /* Global IDs of the vertices I must send */ - &exportLocalGids, /* Local IDs of the vertices I must send */ - &exportProcs, /* Process to which I send each of the vertices */ - &exportToPart); /* Partition to which each vertex will belong */ - - // arrange output into tuples and add well cells - auto importExportLists = makeImportAndExportLists(gog, - cc, - wells, - wellConnections, - root, - numExport, - numImport, - exportLocalGids, - exportGlobalGids, - exportProcs, - importGlobalGids); - - Zoltan_LB_Free_Part(&exportGlobalGids, &exportLocalGids, &exportProcs, &exportToPart); - Zoltan_LB_Free_Part(&importGlobalGids, &importLocalGids, &importProcs, &importToPart); - Zoltan_Destroy(&zz); - - // add wellConnections to the importExportLists and return it - auto result = std::tuple(std::move(std::get<0>(importExportLists)), - std::move(std::get<1>(importExportLists)), - std::move(std::get<2>(importExportLists)), - std::move(std::get<3>(importExportLists)), - wellConnections); - return result; -} + const std::map& params); +#endif // HAVE_MPI } // end namespace Opm diff --git a/opm/grid/common/GridEnums.hpp b/opm/grid/common/GridEnums.hpp index 32efe9ef1..c7e097a96 100644 --- a/opm/grid/common/GridEnums.hpp +++ b/opm/grid/common/GridEnums.hpp @@ -47,7 +47,9 @@ namespace Dune { /// \brief Use Zoltan for partitioning zoltan=1, /// \brief Use METIS for partitioning - metis=2 + metis=2, + /// \brief use Zoltan on GraphOfGrid for partitioning + zoltanGoG=3 }; } diff --git a/opm/grid/cpgrid/CpGrid.cpp b/opm/grid/cpgrid/CpGrid.cpp index 0c32640b1..ebf28d884 100644 --- a/opm/grid/cpgrid/CpGrid.cpp +++ b/opm/grid/cpgrid/CpGrid.cpp @@ -53,6 +53,7 @@ #include "ParentToChildrenCellGlobalIdHandle.hpp" #include #include +#include //#include #include //#include @@ -351,6 +352,15 @@ CpGrid::scatterGrid(EdgeWeightMethod method, OPM_THROW(std::runtime_error, "Parallel runs depend on METIS if useMetis is true. Please install!"); #endif // HAVE_METIS + } + else if (partitionMethod == Dune::PartitionMethod::zoltanGoG) + { +#ifdef HAVE_ZOLTAN + std::tie(computedCellPart, wells_on_proc, exportList, importList, wellConnections) + = Opm::zoltanPartitioningWithGraphOfGrid(*this, wells, possibleFutureConnections, transmissibilities, cc, method, 0, imbalanceTol, partitioningParams); +#else + OPM_THROW(std::runtime_error, "Parallel runs depend on ZOLTAN if useZoltan is true. Please install!"); +#endif // HAVE_ZOLTAN } else { diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index c336d947a..a4d369694 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -104,6 +104,7 @@ BOOST_AUTO_TEST_CASE(SimpleGraphWithVertexContraction) } +#if HAVE_MPI BOOST_AUTO_TEST_CASE(WrapperForZoltan) { Dune::CpGrid grid; @@ -451,6 +452,7 @@ BOOST_AUTO_TEST_CASE(addWellConnections) } } +#endif // HAVE_MPI // After partitioning, importList and exportList are not complete, // other cells from wells need to be added. From 07b96e73bd9c686db35c8e7175189faacf9f43b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 5 Nov 2024 17:41:23 +0100 Subject: [PATCH 03/14] Parallelism: path for non-root processes --- opm/grid/GraphOfGridWrappers.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index db0ef2f8c..7b6c50435 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -432,8 +432,10 @@ zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, bool partitionIsEmpty = cc.rank()!=root; // prepare graph and contract well cells + // non-root processes have empty grid and no wells GraphOfGrid gog(grid); - Dune::cpgrid::WellConnections wellConnections(*wells,possibleFutureConnections,grid); + auto wellConnections=partitionIsEmpty ? Dune::cpgrid::WellConnections() + : Dune::cpgrid::WellConnections(*wells,possibleFutureConnections,grid); addWellConnections(gog,wellConnections); // call partitioner From a994da5d80febfc00da37bf23dc495ef558eb401 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Wed, 6 Nov 2024 14:29:18 +0100 Subject: [PATCH 04/14] Add unit tests for functions processing the partitioning output --- tests/test_graphofgrid.cpp | 147 +++++++++++++++++++++++++++++-------- 1 file changed, 115 insertions(+), 32 deletions(-) diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index a4d369694..548d14be4 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -329,34 +329,38 @@ BOOST_AUTO_TEST_CASE(IntersectingWells) BOOST_REQUIRE( *wellList.rbegin()==well1 ); } } +#endif // HAVE_MPI -// Create yet another small grid with wells and test graph properties. -// This time wells are supplied via OpmWellType interface -BOOST_AUTO_TEST_CASE(addWellConnections) -{ - // create a grid - Dune::CpGrid grid; - std::array dims{2,2,2}; - std::array size{1.,1.,1.}; - grid.createCartesian(dims,size); - Opm::GraphOfGrid gog(grid); - BOOST_REQUIRE(gog.size()==8); - +namespace { // create Wells, we only use well name and cell locations - auto createConnection = [](int i, int j, int k) + auto createConnection (int i, int j, int k) { return Opm::Connection(i,j,k,0, 0,Opm::Connection::State::OPEN, Opm::Connection::Direction::Z, Opm::Connection::CTFKind::DeckValue, 0, 5.,Opm::Connection::CTFProperties(),0,false); - }; - auto createWell = [](const std::string& name) + } + auto createWell (const std::string& name) { using namespace Opm; return Dune::cpgrid::OpmWellType(name,name,0,0,0,0,0.,WellType(), Well::ProducerCMode(),Connection::Order(),UnitSystem(), 0.,0.,false,false,0,Well::GasInflowEquation()); }; +} // end anonymous namespace + +#if HAVE_MPI +// Create yet another small grid with wells and test graph properties. +// This time wells are supplied via OpmWellType interface +BOOST_AUTO_TEST_CASE(addWellConnections) +{ + // create a grid + Dune::CpGrid grid; + std::array dims{2,2,2}; + std::array size{1.,1.,1.}; + grid.createCartesian(dims,size); + Opm::GraphOfGrid gog(grid); + BOOST_REQUIRE(gog.size()==8); auto wellCon = std::make_shared(); // do not confuse with Dune::cpgrid::WellConnections wellCon->add(createConnection(0,0,0)); @@ -475,31 +479,110 @@ BOOST_AUTO_TEST_CASE(ImportExportListExpansion) using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; std::vector imp(3); - imp[0] = std::make_tuple(0,1,AttributeSet::owner,1); + imp[0] = std::make_tuple(0,0,AttributeSet::owner,1); imp[1] = std::make_tuple(3,4,AttributeSet::copy,2); - imp[2] = std::make_tuple(5,0,AttributeSet::copy,3); + imp[2] = std::make_tuple(5,1,AttributeSet::copy,3); extendImportExportList(gog,imp); BOOST_REQUIRE(imp.size()==7); + for (int i=0; i<7; ++i) + { + if (i<3) + { + BOOST_CHECK(std::get<0>(imp[i])==i); + BOOST_CHECK(std::get<1>(imp[i])==0); + BOOST_CHECK(std::get<2>(imp[i])==AttributeSet::owner); + BOOST_CHECK(std::get<3>(imp[i])==1); + } + else if (i>3) + { + BOOST_CHECK(std::get<1>(imp[i])==1); + BOOST_CHECK(std::get<2>(imp[i])==AttributeSet::copy); + BOOST_CHECK(std::get<3>(imp[i])==3); + } + } + // lists are sorted by ID (by get<0>) + BOOST_CHECK(std::get<0>(imp[3])==3); + BOOST_CHECK(std::get<1>(imp[3])==4); + BOOST_CHECK(std::get<2>(imp[3])==AttributeSet::copy); + BOOST_CHECK(std::get<3>(imp[3])==2); + BOOST_CHECK(std::get<0>(imp[4])==5); BOOST_CHECK(std::get<0>(imp[5])==8); - BOOST_CHECK(std::get<1>(imp[5])==0); - BOOST_CHECK(std::get<2>(imp[5])==AttributeSet::copy); - BOOST_CHECK(std::get<3>(imp[5])==3); - BOOST_CHECK(std::get<0>(imp[5])==8); - BOOST_CHECK(std::get<1>(imp[1])==1); - BOOST_CHECK(std::get<2>(imp[1])==AttributeSet::owner); - BOOST_CHECK(std::get<3>(imp[1])==1); + BOOST_CHECK(std::get<0>(imp[6])==11); std::vector exp(3); - exp[0] = std::make_tuple(0,1,AttributeSet::owner); + exp[0] = std::make_tuple(0,0,AttributeSet::owner); exp[1] = std::make_tuple(3,4,AttributeSet::copy); - exp[2] = std::make_tuple(5,0,AttributeSet::copy); + exp[2] = std::make_tuple(5,1,AttributeSet::copy); extendImportExportList(gog,exp); - BOOST_CHECK(std::get<0>(imp[5])==8); - BOOST_CHECK(std::get<1>(imp[5])==0); - BOOST_CHECK(std::get<2>(imp[5])==AttributeSet::copy); - BOOST_CHECK(std::get<0>(imp[5])==8); - BOOST_CHECK(std::get<1>(imp[1])==1); - BOOST_CHECK(std::get<2>(imp[1])==AttributeSet::owner); + for (int i=0; i<7; ++i) + { + BOOST_CHECK(std::get<0>(imp[i])==std::get<0>(exp[i])); + BOOST_CHECK(std::get<1>(imp[i])==std::get<1>(exp[i])); + BOOST_CHECK(std::get<2>(imp[i])==std::get<2>(exp[i])); + } + + std::vectorgIDtoRank(12,1); + gIDtoRank[0]=0; // well {0,1,2} + gIDtoRank[8]=2; // inside well {5,8,11}, to be rewritten unless skipped + extendGIDtoRank(gog,gIDtoRank,1); // skip wells on rank 1 + BOOST_CHECK(gIDtoRank[8]==2); + for (int i=0; i<12; ++i) + { + if (i<3) + BOOST_CHECK(gIDtoRank[i]==0); + else if (i!=8) + BOOST_CHECK(gIDtoRank[i]==1); + } + extendGIDtoRank(gog,gIDtoRank); + BOOST_CHECK(gIDtoRank[8]==1); + + // extendImportExportList skip Rank + std::vector expSkipped(3); + expSkipped[0] = std::make_tuple(0,0,AttributeSet::owner); + expSkipped[1] = std::make_tuple(3,4,AttributeSet::copy); + expSkipped[2] = std::make_tuple(5,1,AttributeSet::copy); + extendImportExportList(gog,expSkipped,0,gIDtoRank); + BOOST_REQUIRE(expSkipped.size()==5); + BOOST_CHECK(expSkipped[0]==exp[0]); + for (int i=1; i<5; ++i) + BOOST_CHECK_MESSAGE(expSkipped[i]==exp[i+2],"expSkipped[i]!=exp[i+2] with i="+std::to_string(i)); +} + +// getWellRanks takes wellConnections and vector gIDtoRank mapping cells to their ranks +// and returns a vector of well ranks +BOOST_AUTO_TEST_CASE(test_getWellRanks) +{ + // create a grid with wells + Dune::CpGrid grid; + std::array dims{1,2,4}; + std::array size{1.,1.,1.}; + grid.createCartesian(dims,size); + + auto wellCon = std::make_shared(); + wellCon->add(createConnection(0,0,0)); + wellCon->add(createConnection(0,1,0)); + wellCon->add(createConnection(0,1,1)); + std::vector wells; + wells.push_back(createWell("first")); + wells[0].updateConnections(wellCon,true); + + wellCon = std::make_shared(); // reset + wellCon->add(createConnection(0,0,2)); + wellCon->add(createConnection(0,1,2)); + wells.push_back(createWell("second")); + wells[1].updateConnections(wellCon,true); + + wells.push_back(createWell("third")); + + std::vector gIDtoRank{4,4,1,4,3,3,2,2}; + std::unordered_map> futureConnections; + futureConnections.emplace("third",std::set{6,7}); + Dune::cpgrid::WellConnections wellConnections(wells,futureConnections,grid); + auto wellRanks = Opm::getWellRanks(gIDtoRank,wellConnections); + BOOST_REQUIRE(wellRanks.size()==3); + BOOST_CHECK(wellRanks[0]==4); + BOOST_CHECK(wellRanks[1]==3); + BOOST_CHECK(wellRanks[2]==2); } bool From 6fe562e9a13c72a7bef5857731bd1f1d2e2afdc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 12 Nov 2024 17:05:11 +0100 Subject: [PATCH 05/14] Fix behaviour on non-root ranks. --- opm/grid/GraphOfGridWrappers.cpp | 248 ++++++++++++++++++++++++++++--- opm/grid/GraphOfGridWrappers.hpp | 13 ++ 2 files changed, 238 insertions(+), 23 deletions(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index 7b6c50435..d788441bb 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -25,6 +25,7 @@ #include #include "GraphOfGridWrappers.hpp" +#include namespace Opm { @@ -275,6 +276,194 @@ void extendImportExportList (const GraphOfGrid& gog, std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end(),compareTuple); } +std::vector>> getExtendedExportList (const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank) +{ + const auto& cc = gog.getGrid().comm(); + // non-root ranks have empty export lists. + if (cc.rank()!=root) + { + return std::vector>>(); + } + std::vector>> toBeCommunicatedCells(cc.size()); + using ExportList = std::vector>; + // make a list of wells for easy identification. Contains ID, begin, end + using iter = std::set::const_iterator; + std::unordered_map> wellMap; + for (const auto& well : gog.getWells()) + { + if (gIDtoRank.size()>0) + { + auto wellID = *well.begin(); + if (gIDtoRank.at(wellID)!=root) + { + wellMap[wellID] = std::make_tuple(well.begin(),well.end()); + } + } + else + { + wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + } + } + + ExportList addToList; + // iterate once through the original exportList + for (const auto& cellProperties : exportList) + { + // if a cell is in any well, add cells of the well to exportList + auto pWell = wellMap.find(std::get<0>(cellProperties)); + if (pWell!=wellMap.end()) + { + int rankToExport = std::get<1>(cellProperties); + if (rankToExport!=root) + { + std::set wellToExport; + const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); + for (auto pgID = begin; pgID!=end; ++pgID) + { + // cells in one well have the same attributes (except ID) + if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list + { + std::tuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); + } + wellToExport.emplace(*pgID); + } + toBeCommunicatedCells[rankToExport].push_back(std::move(wellToExport)); + } + wellMap.erase(pWell); + if (wellMap.empty()) + { + break; + } + } + } + + // add new cells to the exportList and sort it. It is assumed that exportList starts sorted. + auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; + std::sort(addToList.begin(),addToList.end(),compareTuple); + auto origSize = exportList.size(); + auto totsize = origSize+addToList.size(); + exportList.reserve(totsize); + exportList.insert(exportList.end(),addToList.begin(),addToList.end()); + std::inplace_merge(exportList.begin(),exportList.begin()+origSize,exportList.end(),compareTuple); + + return toBeCommunicatedCells; +} + +std::vector> communicateToExtendWells ( + const std::vector>>& toBeCommunicatedCells, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root) +{ + // create a vector with well-cells-to-be-imported that will be communicated to non-root ranks + int totsize = 0; + if (cc.rank()==root) + { + totsize = 2*cc.size()-1; + for (int i=0; i toBeCommunicated(totsize); + if (cc.rank()==root) + { + int index=cc.size(); + for (int i=1; i offsets; + std::tie(toBeCommunicated, offsets) = Opm::allGatherv(toBeCommunicated,cc); + if (cc.rank()==root) + { + return std::vector>(); + } + int index = toBeCommunicated[cc.rank()]; + int nrSets = toBeCommunicated[index++]; + std::vector> result(nrSets); + for (int i=0; i>& importList, + const std::vector>& extraWells) +{ + using ImportList = std::vector>; + // make a list of wells for easy identification. Contains ID, begin, end + using iter = std::set::const_iterator; + std::unordered_map> wellMap; + for (const auto& well : extraWells) // empty on non-root + { + wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + } + + ImportList addToList; + // iterate once through the original importList + for (const auto& cellProperties : importList) + { + // if a cell is in any well, add cells of the well to importList + auto pWell = wellMap.find(std::get<0>(cellProperties)); + if (pWell!=wellMap.end()) + { + const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); + for (auto pgID = begin; pgID!=end; ++pgID) + { + // cells in one well have the same attributes (except ID) + if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list + { + std::tuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); + } + } + wellMap.erase(pWell); + if (wellMap.empty()) + { + break; + } + } + } + + // add new cells to the importList and sort it. It is assumed that importList starts sorted. + auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; + std::sort(addToList.begin(),addToList.end(),compareTuple); + auto origSize = importList.size(); + auto totsize = origSize+addToList.size(); + importList.reserve(totsize); + importList.insert(importList.end(),addToList.begin(),addToList.end()); + std::inplace_merge(importList.begin(),importList.begin()+origSize,importList.end(),compareTuple); +} + std::vector getWellRanks(const std::vector& gIDtoRank, const Dune::cpgrid::WellConnections& wellConnections) { @@ -334,36 +523,45 @@ makeImportAndExportLists(const GraphOfGrid& gog, myImportList.reserve(1.2*myImportList.size()); using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; - for ( int i=0; i < numExport; ++i ) - { - gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; - myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); - } - // partitioner sees only one cell per well, modify remaining - extendGIDtoRank(gog,gIDtoRank,rank); - for ( int i=0; i < numImport; ++i ) { myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); } - - - // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. - for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) + assert(rank==root || numExport==0); + if (rank==root) { - if ( gIDtoRank[i] == rank ) + for ( int i=0; i < numExport; ++i ) { - myExportList.emplace_back(i, rank, static_cast(AttributeSet::owner) ); - myImportList.emplace_back(i, rank, static_cast(AttributeSet::owner), -1 ); + gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; + myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); } - } - std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); - std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); + std::sort(myExportList.begin(),myExportList.end()); + // partitioner sees only one cell per well, modify remaining + extendGIDtoRank(gog,gIDtoRank,rank); - // Complete the lists by adding cells that were contracted in the graph. - // All cells on the rank "rank" were added before. - extendImportExportList(gog,myImportList,rank,gIDtoRank); - extendImportExportList(gog,myExportList,rank,gIDtoRank); + // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. + for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) + { + if ( gIDtoRank[i] == rank ) + { + myExportList.emplace_back(i, rank, static_cast(AttributeSet::owner) ); + myImportList.emplace_back(i, rank, static_cast(AttributeSet::owner), -1 ); + } + } + std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); + std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); + // Complete the lists by adding cells that were contracted in the graph. + // All cells on the rank "rank" were added before. + extendImportExportList(gog,myImportList,rank,gIDtoRank); + // extendImportExportList(gog,myExportList,rank); // ,gIDtoRank); + } + auto eExL = getExtendedExportList(gog,myExportList,root,gIDtoRank); + auto extended = communicateToExtendWells(eExL,cc,root); + if (cc.rank()!=root) + { + extendImportList(myImportList,extended); + } + std::sort(myImportList.begin(),myImportList.end()); std::vector> parallel_wells; if( wells ) @@ -371,7 +569,10 @@ makeImportAndExportLists(const GraphOfGrid& gog, auto wellRanks = getWellRanks(gIDtoRank, wellConnections); parallel_wells = wellsOnThisRank(*wells, wellRanks, cc, root); } - return std::make_tuple(gIDtoRank, parallel_wells, myExportList, myImportList); + return std::make_tuple( std::move(gIDtoRank), + std::move(parallel_wells), + std::move(myExportList), + std::move(myImportList) ); } namespace { @@ -434,6 +635,7 @@ zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, // prepare graph and contract well cells // non-root processes have empty grid and no wells GraphOfGrid gog(grid); + assert(gog.size()==0 || !partitionIsEmpty); auto wellConnections=partitionIsEmpty ? Dune::cpgrid::WellConnections() : Dune::cpgrid::WellConnections(*wells,possibleFutureConnections,grid); addWellConnections(gog,wellConnections); diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index dbc8c21c0..00eba0c9f 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -128,6 +128,9 @@ void addWellConnections (GraphOfGrid& gog, void extendGIDtoRank (const GraphOfGrid& gog, std::vector& gIDtoRank, const int& thisRank = -1); +void extendGIDtoRank (const Dune::cpgrid::WellConnections& wellConnections, + std::vector& gIDtoRank, + const int& thisRank = -1); /// \brief Add well cells' global IDs to the list /// @@ -143,6 +146,16 @@ void extendImportExportList (const GraphOfGrid& gog, std::vector& cellList, int skippedRank=-1, const std::vector& gIDtoRank={}); +void extendImportList (std::vector>& importList, + const std::vector>& extraWells); +std::vector>> getExtendedExportList (const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank); +std::vector> communicateToExtendWells ( + const std::vector>>& toBeCommunicatedCells, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root); /// \brief Find to which ranks wells were assigned /// From 9cf5a118b776b595fdae92a60a0ee69868f719ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Mon, 18 Nov 2024 19:48:17 +0100 Subject: [PATCH 06/14] Refactor code for extending import and export lists --- opm/grid/GraphOfGridWrappers.cpp | 193 +++++++++++-------------------- opm/grid/GraphOfGridWrappers.hpp | 74 ++++++++---- tests/test_graphofgrid.cpp | 62 +--------- 3 files changed, 119 insertions(+), 210 deletions(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index d788441bb..c088c356b 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -190,12 +190,12 @@ void addWellConnections (GraphOfGrid& gog, void extendGIDtoRank (const GraphOfGrid& gog, std::vector& gIDtoRank, - const int& thisRank) + const int& root) { for (const auto& w : gog.getWells()) { auto wellID = *w.begin(); - if (gIDtoRank[wellID]!=thisRank) + if (gIDtoRank[wellID]!=root) { for (const auto& gID : w) { @@ -205,81 +205,13 @@ void extendGIDtoRank (const GraphOfGrid& gog, } } -template -void extendImportExportList (const GraphOfGrid& gog, - std::vector& cellList, - int skippedRank, - const std::vector& gIDtoRank) -{ - // using TheTuple = std::tuple; or std::tuple - using CellList = std::vector; - // make a list of wells for easy identification. Contains ID, begin, end - using iter = std::set::const_iterator; - std::unordered_map> wellMap; - for (const auto& well : gog.getWells()) - { - if (gIDtoRank.size()>0) - { - auto wellID = *well.begin(); - if (gIDtoRank.at(wellID)!=skippedRank) - { - wellMap[wellID] = std::make_tuple(well.begin(),well.end()); - } - } - else - { - wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); - } - } - if (wellMap.size()==0) - { - return; - } +namespace Impl{ - CellList addToList; - // iterate once through the original cellList - for (const auto& cellProperties : cellList) - { - // if a cell is in any well, add cells of the well to cellList - auto pWell = wellMap.find(std::get<0>(cellProperties)); - if (pWell!=wellMap.end()) - { - if (std::get<1>(cellProperties)!=skippedRank) - { - const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); - for (auto pgID = begin; pgID!=end; ++pgID) - { - // cells in one well have the same attributes (except ID) - if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list - { - TheTuple wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; - addToList.push_back(wellCell); - } - } - } - wellMap.erase(pWell); - if (wellMap.empty()) - { - break; - } - } - } - - // add new cells to the cellList and sort it. It is assumed that cellList starts sorted. - auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; - std::sort(addToList.begin(),addToList.end(),compareTuple); - auto origSize = cellList.size(); - auto totsize = origSize+addToList.size(); - cellList.reserve(totsize); - cellList.insert(cellList.end(),addToList.begin(),addToList.end()); - std::inplace_merge(cellList.begin(),cellList.begin()+origSize,cellList.end(),compareTuple); -} - -std::vector>> getExtendedExportList (const GraphOfGrid& gog, - std::vector>& exportList, - int root, - const std::vector& gIDtoRank) +std::vector>> +extendRootExportList (const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank) { const auto& cc = gog.getGrid().comm(); // non-root ranks have empty export lists. @@ -287,11 +219,11 @@ std::vector>> getExtendedExportList (const GraphOfGrid { return std::vector>>(); } - std::vector>> toBeCommunicatedCells(cc.size()); + std::vector>> exportedWells(cc.size()); using ExportList = std::vector>; // make a list of wells for easy identification. Contains ID, begin, end using iter = std::set::const_iterator; - std::unordered_map> wellMap; + std::unordered_map> wellMap; for (const auto& well : gog.getWells()) { if (gIDtoRank.size()>0) @@ -299,12 +231,12 @@ std::vector>> getExtendedExportList (const GraphOfGrid auto wellID = *well.begin(); if (gIDtoRank.at(wellID)!=root) { - wellMap[wellID] = std::make_tuple(well.begin(),well.end()); + wellMap[wellID] = std::make_pair(well.begin(),well.end()); } } else { - wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + wellMap[*well.begin()] = std::make_pair(well.begin(),well.end()); } } @@ -319,20 +251,20 @@ std::vector>> getExtendedExportList (const GraphOfGrid int rankToExport = std::get<1>(cellProperties); if (rankToExport!=root) { - std::set wellToExport; - const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); - for (auto pgID = begin; pgID!=end; ++pgID) + const auto& [begin,end] = pWell->second; + std::set wellToExport{*begin}; + // well ID is its cell of lowest index and is already in the exportList + assert(*begin==std::get<0>(cellProperties)); + for (auto pgID = begin; ++pgID!=end; ) { // cells in one well have the same attributes (except ID) - if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list - { - std::tuple wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; - addToList.push_back(wellCell); - } + std::tuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); + wellToExport.emplace(*pgID); } - toBeCommunicatedCells[rankToExport].push_back(std::move(wellToExport)); + exportedWells[rankToExport].push_back(std::move(wellToExport)); } wellMap.erase(pWell); if (wellMap.empty()) @@ -351,22 +283,22 @@ std::vector>> getExtendedExportList (const GraphOfGrid exportList.insert(exportList.end(),addToList.begin(),addToList.end()); std::inplace_merge(exportList.begin(),exportList.begin()+origSize,exportList.end(),compareTuple); - return toBeCommunicatedCells; + return exportedWells; } -std::vector> communicateToExtendWells ( - const std::vector>>& toBeCommunicatedCells, +std::vector> communicateExportedWells ( + const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root) { // create a vector with well-cells-to-be-imported that will be communicated to non-root ranks - int totsize = 0; + int totsize = cc.size(); if (cc.rank()==root) { totsize = 2*cc.size()-1; for (int i=0; i> communicateToExtendWells ( for (int i=1; i>& importList, using ImportList = std::vector>; // make a list of wells for easy identification. Contains ID, begin, end using iter = std::set::const_iterator; - std::unordered_map> wellMap; - for (const auto& well : extraWells) // empty on non-root + std::unordered_map> wellMap; + for (const auto& well : extraWells) { - wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end()); + if (well.size()>1) + { + wellMap[*well.begin()] = std::make_pair(well.begin(),well.end()); + } } ImportList addToList; @@ -435,17 +370,17 @@ void extendImportList (std::vector>& importList, auto pWell = wellMap.find(std::get<0>(cellProperties)); if (pWell!=wellMap.end()) { - const auto& [begin,end] = std::pair(std::get<0>(pWell->second),std::get<1>(pWell->second)); - for (auto pgID = begin; pgID!=end; ++pgID) + const auto& [begin,end] = pWell->second; + // well ID is its cell of lowest index and is already in the importList + assert(*begin==std::get<0>(cellProperties)); + for (auto pgID = begin; ++pgID!=end; ) { // cells in one well have the same attributes (except ID) - if (*pgID!=std::get<0>(cellProperties)) // avoid adding cell that is already in the list - { - std::tuple wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; - addToList.push_back(wellCell); - } + std::tuple wellCell = cellProperties; + std::get<0>(wellCell) = *pgID; + addToList.push_back(wellCell); } + wellMap.erase(pWell); if (wellMap.empty()) { @@ -464,6 +399,26 @@ void extendImportList (std::vector>& importList, std::inplace_merge(importList.begin(),importList.begin()+origSize,importList.end(),compareTuple); } +} // end namespace Impl + +void extendExportAndImportLists(const GraphOfGrid& gog, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root, + std::vector>& exportList, + std::vector>& importList, + const std::vector& gIDtoRank) +{ + // extend root's export list and get sets of well cells for other ranks + auto expListToComm = Impl::extendRootExportList(gog,exportList,root,gIDtoRank); + // obtain wells on this rank from root + auto extraWells = Impl::communicateExportedWells(expListToComm,cc,root); + if (cc.rank()!=root) + { + std::sort(importList.begin(),importList.end()); + Impl::extendImportList(importList,extraWells); + } +} + std::vector getWellRanks(const std::vector& gIDtoRank, const Dune::cpgrid::WellConnections& wellConnections) { @@ -540,6 +495,7 @@ makeImportAndExportLists(const GraphOfGrid& gog, extendGIDtoRank(gog,gIDtoRank,rank); // Add cells that stay here to the lists. Somehow I could not persuade Zoltan to do this. + // This also adds all well cells that were missing in the importGlobalIDs. for ( std::size_t i = 0; i < gIDtoRank.size(); ++i) { if ( gIDtoRank[i] == rank ) @@ -550,22 +506,15 @@ makeImportAndExportLists(const GraphOfGrid& gog, } std::inplace_merge(myImportList.begin(), myImportList.begin() + numImport, myImportList.end()); std::inplace_merge(myExportList.begin(), myExportList.begin() + numExport, myExportList.end()); - // Complete the lists by adding cells that were contracted in the graph. - // All cells on the rank "rank" were added before. - extendImportExportList(gog,myImportList,rank,gIDtoRank); - // extendImportExportList(gog,myExportList,rank); // ,gIDtoRank); } - auto eExL = getExtendedExportList(gog,myExportList,root,gIDtoRank); - auto extended = communicateToExtendWells(eExL,cc,root); - if (cc.rank()!=root) - { - extendImportList(myImportList,extended); - } - std::sort(myImportList.begin(),myImportList.end()); + std::vector> parallel_wells; if( wells ) { + // complete root's export and other's import list by adding remaining well cells + extendExportAndImportLists(gog,cc,root,myExportList,myImportList,gIDtoRank); + auto wellRanks = getWellRanks(gIDtoRank, wellConnections); parallel_wells = wellsOnThisRank(*wells, wellRanks, cc, root); } @@ -685,13 +634,5 @@ zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, #endif // HAVE_MPI // explicit template instantiations -template void extendImportExportList(const GraphOfGrid&, - std::vector >& , - int, - const std::vector&); -template void extendImportExportList(const GraphOfGrid&, - std::vector >& , - int, - const std::vector&); } // end namespace Opm diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 00eba0c9f..6d9af7259 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -123,39 +123,63 @@ void addWellConnections (GraphOfGrid& gog, /// \brief Correct gIDtoRank's data about well cells /// -/// gIDtoRank is constructed with default value thisRank, other entries -/// come from Zoltan partitioner's export list that does not contain all well cells +/// gIDtoRank's entries come from Zoltan partitioner's export list +/// that does not contain all well cells. Default value is root's rank. +/// parameter root allows skipping wells that are correct. void extendGIDtoRank (const GraphOfGrid& gog, std::vector& gIDtoRank, - const int& thisRank = -1); -void extendGIDtoRank (const Dune::cpgrid::WellConnections& wellConnections, - std::vector& gIDtoRank, - const int& thisRank = -1); + const int& root = -1); -/// \brief Add well cells' global IDs to the list +namespace Impl{ +/// \brief Add well cells' global IDs to the import list /// -/// Output of the partitioning is missing vertices that were contracted. -/// This function fills in omitted gIDs and gives them the properties -/// (like process number and ownership) of their representative cell (well ID). -/// It is possible to skip wells on a given rank, because import and export -/// lists are expanded by all cells on a current rank in makeImportAndExportLists. -/// Knowing gIDtoRank speeds up the process, wells can be skipped sooner. -/// TheTuple is for ExportList and for ImportList. -template -void extendImportExportList (const GraphOfGrid& gog, - std::vector& cellList, - int skippedRank=-1, - const std::vector& gIDtoRank={}); +/// Helper function for extendExportAndImportLists. +/// Used on non-root ranks that do not have access to wells. void extendImportList (std::vector>& importList, const std::vector>& extraWells); -std::vector>> getExtendedExportList (const GraphOfGrid& gog, - std::vector>& exportList, - int root, - const std::vector& gIDtoRank); -std::vector> communicateToExtendWells ( - const std::vector>>& toBeCommunicatedCells, + +/// \brief Add well cells' global IDs to the root's export list and output other rank's wells +/// +/// Helper function for extendExportAndImportLists. +/// Does nothing on non-root ranks. +/// On root, exportList is extended by well cells that are hidden from the partitioner. +/// These wells are also collected and returned so they can be communicated to other ranks. +/// \return vector of size cc.size(). Each entry contains vector of wells exported to that rank. +std::vector>> +extendedRootExportList (const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank); + +/// \brief Communicate wells exported from root, needed for extending other rank's import lists +/// +/// Helper function for extendExportAndImportLists. +/// \param exportedWells Contains for each rank the wells that are exported there, +/// empty on non-root ranks +/// \param cc Communication object +/// \param root The root's rank +/// \return Vector of wells necessary to extend this rank's import lists, +/// empty on the root rank +std::vector> communicateExportedWells ( + const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root); +} // end namespace Impl + +/// \brief Add well cells' global IDs to the root's export and others' import list +/// +/// Output of the partitioning is missing vertices that were contracted. +/// This function fills in omitted gIDs and gives them the properties +/// (like process number and ownership) of their representative cell (well ID). +/// Root is the only rank with information about wells, and communicates +/// the necessary information to other ranks. +/// On root ImportList has been already extended with all cells on the current rank. +void extendExportAndImportLists(const GraphOfGrid& gog, + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root, + std::vector>& exportList, + std::vector>& importList, + const std::vector& gIDtoRank={}); /// \brief Find to which ranks wells were assigned /// diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index 548d14be4..fb3520ad7 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -461,6 +461,9 @@ BOOST_AUTO_TEST_CASE(addWellConnections) // After partitioning, importList and exportList are not complete, // other cells from wells need to be added. BOOST_AUTO_TEST_CASE(ImportExportListExpansion) +{} + +BOOST_AUTO_TEST_CASE(gIDtoRankCorrection) { // create a grid with wells Dune::CpGrid grid; @@ -473,54 +476,6 @@ BOOST_AUTO_TEST_CASE(ImportExportListExpansion) const auto& wells = gog.getWells(); BOOST_REQUIRE(wells.size()==2); - // mock import and export lists - using importTuple = std::tuple; - using exportTuple = std::tuple; - using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; - - std::vector imp(3); - imp[0] = std::make_tuple(0,0,AttributeSet::owner,1); - imp[1] = std::make_tuple(3,4,AttributeSet::copy,2); - imp[2] = std::make_tuple(5,1,AttributeSet::copy,3); - extendImportExportList(gog,imp); - BOOST_REQUIRE(imp.size()==7); - for (int i=0; i<7; ++i) - { - if (i<3) - { - BOOST_CHECK(std::get<0>(imp[i])==i); - BOOST_CHECK(std::get<1>(imp[i])==0); - BOOST_CHECK(std::get<2>(imp[i])==AttributeSet::owner); - BOOST_CHECK(std::get<3>(imp[i])==1); - } - else if (i>3) - { - BOOST_CHECK(std::get<1>(imp[i])==1); - BOOST_CHECK(std::get<2>(imp[i])==AttributeSet::copy); - BOOST_CHECK(std::get<3>(imp[i])==3); - } - } - // lists are sorted by ID (by get<0>) - BOOST_CHECK(std::get<0>(imp[3])==3); - BOOST_CHECK(std::get<1>(imp[3])==4); - BOOST_CHECK(std::get<2>(imp[3])==AttributeSet::copy); - BOOST_CHECK(std::get<3>(imp[3])==2); - BOOST_CHECK(std::get<0>(imp[4])==5); - BOOST_CHECK(std::get<0>(imp[5])==8); - BOOST_CHECK(std::get<0>(imp[6])==11); - - std::vector exp(3); - exp[0] = std::make_tuple(0,0,AttributeSet::owner); - exp[1] = std::make_tuple(3,4,AttributeSet::copy); - exp[2] = std::make_tuple(5,1,AttributeSet::copy); - extendImportExportList(gog,exp); - for (int i=0; i<7; ++i) - { - BOOST_CHECK(std::get<0>(imp[i])==std::get<0>(exp[i])); - BOOST_CHECK(std::get<1>(imp[i])==std::get<1>(exp[i])); - BOOST_CHECK(std::get<2>(imp[i])==std::get<2>(exp[i])); - } - std::vectorgIDtoRank(12,1); gIDtoRank[0]=0; // well {0,1,2} gIDtoRank[8]=2; // inside well {5,8,11}, to be rewritten unless skipped @@ -535,17 +490,6 @@ BOOST_AUTO_TEST_CASE(ImportExportListExpansion) } extendGIDtoRank(gog,gIDtoRank); BOOST_CHECK(gIDtoRank[8]==1); - - // extendImportExportList skip Rank - std::vector expSkipped(3); - expSkipped[0] = std::make_tuple(0,0,AttributeSet::owner); - expSkipped[1] = std::make_tuple(3,4,AttributeSet::copy); - expSkipped[2] = std::make_tuple(5,1,AttributeSet::copy); - extendImportExportList(gog,expSkipped,0,gIDtoRank); - BOOST_REQUIRE(expSkipped.size()==5); - BOOST_CHECK(expSkipped[0]==exp[0]); - for (int i=1; i<5; ++i) - BOOST_CHECK_MESSAGE(expSkipped[i]==exp[i+2],"expSkipped[i]!=exp[i+2] with i="+std::to_string(i)); } // getWellRanks takes wellConnections and vector gIDtoRank mapping cells to their ranks From 1aa80f540f5e833cb36b4878c97da3a5467a868b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 19 Nov 2024 13:56:25 +0100 Subject: [PATCH 07/14] Change the communication of wells from root to others to pairwise --- opm/grid/GraphOfGridWrappers.cpp | 81 ++++++++++++++++---------------- 1 file changed, 41 insertions(+), 40 deletions(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index c088c356b..cd9ca3f23 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -291,60 +291,61 @@ std::vector> communicateExportedWells ( const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root) { - // create a vector with well-cells-to-be-imported that will be communicated to non-root ranks - int totsize = cc.size(); + // send data from root if (cc.rank()==root) { - totsize = 2*cc.size()-1; for (int i=0; i toBeCommunicated(totsize); - if (cc.rank()==root) - { - int index=cc.size(); - for (int i=1; i sentData(totsize); + int index = 0; + sentData[index++] = n; + for (const auto& well : exportedWells[i]) + { + sentData[index++] = well.size(); + for (const auto& gID : well) + { + sentData[index++] = gID; + } + } + assert(totsize==sentData.size()); + int tag = 37; // a random number + MPI_Send(&totsize, 1, MPI_INT, i, tag++, cc); + MPI_Send(sentData.data(), totsize, MPI_INT, i, tag, cc); } } + return {}; } - std::vector offsets; - std::tie(toBeCommunicated, offsets) = Opm::allGatherv(toBeCommunicated,cc); - if (cc.rank()==root) - { - return std::vector>(); - } - int index = toBeCommunicated[cc.rank()]; - int nrSets = toBeCommunicated[index++]; - std::vector> result(nrSets); - for (int i=0; i receivedData(totsize); + MPI_Recv(receivedData.data(), totsize, MPI_INT, root, tag, cc, MPI_STATUS_IGNORE); + + int n = receivedData[0]; + std::vector> result(n); + int index = 1; + for (int i=0; i>& importList, From 6afa478faf5da154a78a5f64caec10c74c25a133 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 19 Nov 2024 14:59:40 +0100 Subject: [PATCH 08/14] Hide MPI parameters in the serial build. --- opm/grid/GraphOfGridWrappers.cpp | 2 ++ opm/grid/GraphOfGridWrappers.hpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index cd9ca3f23..dcc1697d7 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -205,6 +205,7 @@ void extendGIDtoRank (const GraphOfGrid& gog, } } +#if HAVE_MPI namespace Impl{ std::vector>> @@ -419,6 +420,7 @@ void extendExportAndImportLists(const GraphOfGrid& gog, Impl::extendImportList(importList,extraWells); } } +#endif // HAVE_MPI std::vector getWellRanks(const std::vector& gIDtoRank, const Dune::cpgrid::WellConnections& wellConnections) diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 6d9af7259..2b63f59b0 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -130,6 +130,7 @@ void extendGIDtoRank (const GraphOfGrid& gog, std::vector& gIDtoRank, const int& root = -1); +#if HAVE_MPI namespace Impl{ /// \brief Add well cells' global IDs to the import list /// @@ -180,6 +181,7 @@ void extendExportAndImportLists(const GraphOfGrid& gog, std::vector>& exportList, std::vector>& importList, const std::vector& gIDtoRank={}); +#endif // HAVE_MPI /// \brief Find to which ranks wells were assigned /// From d903b4f287ce5af48620aece128a4a6abe86a7d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Tue, 19 Nov 2024 17:48:26 +0100 Subject: [PATCH 09/14] Parallel-proofing test_graphofgrid --- opm/grid/GraphOfGridWrappers.cpp | 2 +- tests/test_graphofgrid.cpp | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index dcc1697d7..47a7fe675 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -317,7 +317,7 @@ std::vector> communicateExportedWells ( sentData[index++] = gID; } } - assert(totsize==sentData.size()); + assert(totsize==(int)sentData.size()); int tag = 37; // a random number MPI_Send(&totsize, 1, MPI_INT, i, tag++, cc); MPI_Send(sentData.data(), totsize, MPI_INT, i, tag, cc); diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index fb3520ad7..03472fc39 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -48,6 +48,8 @@ BOOST_AUTO_TEST_CASE(SimpleGraph) std::array size{2.,2.,2.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; BOOST_REQUIRE(gog.size()==8); // number of graph vertices BOOST_REQUIRE(gog.numEdges(0)==3); // each vertex has 3 neighbors @@ -70,6 +72,8 @@ BOOST_AUTO_TEST_CASE(SimpleGraphWithVertexContraction) std::array size{2.,2.,2.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; auto edgeL = gog.edgeList(3); // std::map(gID,edgeWeight) BOOST_REQUIRE(edgeL[1]==1); @@ -112,6 +116,8 @@ BOOST_AUTO_TEST_CASE(WrapperForZoltan) std::array size{1.,1.,1.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; int err; int nVer = getGraphOfGridNumVertices(&gog,&err); @@ -161,6 +167,8 @@ BOOST_AUTO_TEST_CASE(GraphWithWell) std::array size{1.,1.,1.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; std::unordered_map> wells{ {"shape L on the front face", {5,10,15,35,55} }, @@ -196,6 +204,8 @@ BOOST_AUTO_TEST_CASE(IntersectingWells) std::array size{1.,1.,1.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; std::array,3> wells{std::set{0,1,2,3,4}, std::set{52,32,12}, @@ -360,6 +370,8 @@ BOOST_AUTO_TEST_CASE(addWellConnections) std::array size{1.,1.,1.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; BOOST_REQUIRE(gog.size()==8); auto wellCon = std::make_shared(); // do not confuse with Dune::cpgrid::WellConnections @@ -471,6 +483,9 @@ BOOST_AUTO_TEST_CASE(gIDtoRankCorrection) std::array size{1.,1.,1.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); + if (grid.size(0)==0) + return; + gog.addWell(std::set{0,1,2}); gog.addWell(std::set{5,8,11}); const auto& wells = gog.getWells(); @@ -501,6 +516,8 @@ BOOST_AUTO_TEST_CASE(test_getWellRanks) std::array dims{1,2,4}; std::array size{1.,1.,1.}; grid.createCartesian(dims,size); + if (grid.size(0)==0) + return; auto wellCon = std::make_shared(); wellCon->add(createConnection(0,0,0)); From 53a30ef6b490e36d7fcd7250a2c91244627fd573 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Wed, 20 Nov 2024 15:37:32 +0100 Subject: [PATCH 10/14] Add parallel test for GraphOfGridWrappers::extendExportAndImportLists --- CMakeLists.txt | 4 +- opm/grid/GraphOfGridWrappers.cpp | 2 +- opm/grid/GraphOfGridWrappers.hpp | 2 +- tests/test_graphofgrid.cpp | 9 +- tests/test_graphofgrid_parallel.cpp | 153 ++++++++++++++++++++++++++++ 5 files changed, 160 insertions(+), 10 deletions(-) create mode 100644 tests/test_graphofgrid_parallel.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 709f91675..67c9bb8a9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -119,10 +119,12 @@ if(MPI_FOUND) if(NOT MPIEXEC_EXECUTABLE) set(MPIEXEC_EXECUTABLE ${MPIEXEC}) endif() - add_test(addLgrsOnDistributedGrid_test_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/addLgrsOnDistributedGrid_test) + add_test(addLgrsOnDistributedGrid_test_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/addLgrsOnDistributedGrid_test) add_test(distribution_test_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/distribution_test) if(Boost_VERSION_STRING VERSION_GREATER 1.53) add_test(inactiveCell_lgr_test_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/inactiveCell_lgr_test) + add_test(test_graphofgrid_parallel3 ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 3 bin/test_graphofgrid_parallel) + add_test(test_graphofgrid_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/test_graphofgrid_parallel) endif() add_test(test_communication_utils_parallel ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 4 bin/test_communication_utils) endif() diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index 47a7fe675..0d72e3747 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -631,7 +631,7 @@ zoltanPartitioningWithGraphOfGrid(const Dune::CpGrid& grid, std::move(std::get<1>(importExportLists)), std::move(std::get<2>(importExportLists)), std::move(std::get<3>(importExportLists)), - wellConnections); + std::move(wellConnections)); return result; } #endif // HAVE_MPI diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 2b63f59b0..0737e0194 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -147,7 +147,7 @@ void extendImportList (std::vector>& importList, /// These wells are also collected and returned so they can be communicated to other ranks. /// \return vector of size cc.size(). Each entry contains vector of wells exported to that rank. std::vector>> -extendedRootExportList (const GraphOfGrid& gog, +extendRootExportList (const GraphOfGrid& gog, std::vector>& exportList, int root, const std::vector& gIDtoRank); diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index 03472fc39..f3b93b5a8 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -48,7 +48,7 @@ BOOST_AUTO_TEST_CASE(SimpleGraph) std::array size{2.,2.,2.}; grid.createCartesian(dims,size); Opm::GraphOfGrid gog(grid); - if (grid.size(0)==0) + if (grid.size(0)==0) // in prallel run, non-root ranks are empty return; BOOST_REQUIRE(gog.size()==8); // number of graph vertices @@ -59,7 +59,7 @@ BOOST_AUTO_TEST_CASE(SimpleGraph) BOOST_REQUIRE(edgeL[0]==1.); BOOST_REQUIRE(edgeL[3]==1.); BOOST_REQUIRE(edgeL[6]==1.); - BOOST_REQUIRE_THROW(edgeL.at(4),std::out_of_range); // not a neighbor (edgeL's size increased) + BOOST_REQUIRE_THROW(edgeL.at(4),std::out_of_range); // not a neighbor BOOST_REQUIRE_THROW(gog.edgeList(10),std::logic_error); // vertex 10 is not in the graph } @@ -470,11 +470,6 @@ BOOST_AUTO_TEST_CASE(addWellConnections) } #endif // HAVE_MPI -// After partitioning, importList and exportList are not complete, -// other cells from wells need to be added. -BOOST_AUTO_TEST_CASE(ImportExportListExpansion) -{} - BOOST_AUTO_TEST_CASE(gIDtoRankCorrection) { // create a grid with wells diff --git a/tests/test_graphofgrid_parallel.cpp b/tests/test_graphofgrid_parallel.cpp new file mode 100644 index 000000000..fd1397881 --- /dev/null +++ b/tests/test_graphofgrid_parallel.cpp @@ -0,0 +1,153 @@ +// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- +// vi: set et ts=4 sw=4 sts=4: +/* + Copyright 2024 Equinor ASA. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . + + Consult the COPYING file in the top-level source directory of this + module for the precise wording of the license and the list of + copyright holders. +*/ + +#include + +#include + +#define BOOST_TEST_MODULE GraphRepresentationOfGridParallel +#define BOOST_TEST_NO_MAIN +#include +#include + +#include +#include + +#if HAVE_MPI +// After partitioning, importList and exportList are not complete, +// other cells from wells need to be added. +BOOST_AUTO_TEST_CASE(ImportExportListExpansion) +{ + Dune::CpGrid grid; + std::array dims{3,3,2}; + std::array size{1.,1.,1.}; + grid.createCartesian(dims,size); + const auto& cc = grid.comm(); + if (cc.size()==1) + return; + + Opm::GraphOfGrid gog(grid); + // grid is nonempty only on the rank 0 + if (cc.rank()==0) + { + gog.addWell(std::set{0,1,2}); + gog.addWell(std::set{3,4,5}); + gog.addWell(std::set{6,7,8}); + gog.addWell(std::set{9,13,17}); + BOOST_REQUIRE(gog.size()==10); + } + + // rank-specific export and import lists + std::vector> exportList, exportSolution; + std::vector> importList, importSolution; + // this test works on any number or ranks although from rank 4 (including) all are empty + // if ranks<4, the highest rank gobbles leftovers + int maxrank = cc.size()-1; + std::vector ranks{0,std::min(maxrank,1),std::min(maxrank,2),std::min(maxrank,3)}; + + // cells[rank] holds solution, each rank 0..3 has 1 well + std::vector> cells(4); + cells[0] = std::vector{0,1,2,10,11}; + cells[1] = std::vector{3,4,5,12}; + cells[2] = std::vector{6,7,8,15,16}; + cells[3] = std::vector{9,13,14,17}; + using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; + char owner = AttributeSet::owner; + + for (int i=0; i<4; ++i) + { + for (const auto& c : cells[i]) + { + if (cc.rank()==ranks[i]) + { + importSolution.push_back(std::make_tuple(c,ranks[i],owner,-1)); + } + if (cc.rank()==0) + { + exportSolution.push_back(std::make_tuple(c,ranks[i],owner)); + } + } + } + std::sort(importSolution.begin(),importSolution.end()); + std::sort(exportSolution.begin(),exportSolution.end()); + + if (cc.rank()==ranks[0]) + { + // Zoltan does not include cells that remain on the rank into import and export list + // but they are added manually to both lists (cell on root is in its import AND export) + // before extendExportAndImportLists is called + for (const auto& gID : cells[ranks[0]]) + { + importList.push_back(std::make_tuple(gID,ranks[0],owner,-1)); + exportList.push_back(std::make_tuple(gID,ranks[0],owner)); + } + exportList.push_back(std::make_tuple(3,ranks[1],owner)); + exportList.push_back(std::make_tuple(12,ranks[1],owner)); + exportList.push_back(std::make_tuple(6,ranks[2],owner)); + exportList.push_back(std::make_tuple(15,ranks[2],owner)); + exportList.push_back(std::make_tuple(16,ranks[2],owner)); + exportList.push_back(std::make_tuple(9,ranks[3],owner)); + exportList.push_back(std::make_tuple(14,ranks[3],owner)); + std::sort(exportList.begin(),exportList.end()); + } + else if (cc.rank()==ranks[1]) + { + // non-root ranks have empty export list + // import list is not sorted + importList.push_back(std::make_tuple(12,ranks[1],owner,-1)); + importList.push_back(std::make_tuple(3,ranks[1],owner,-1)); + } + // no else branch, ranks[2]==1 when cc.size==2 + if (cc.rank()==ranks[2]) + { + importList.push_back(std::make_tuple(15,ranks[2],owner,-1)); + importList.push_back(std::make_tuple(6,ranks[2],owner,-1)); + importList.push_back(std::make_tuple(16,ranks[2],owner,-1)); + } + if (cc.rank()==ranks[3]) + { + importList.push_back(std::make_tuple(9,ranks[3],owner,-1)); + importList.push_back(std::make_tuple(14,ranks[3],owner,-1)); + } + + extendExportAndImportLists(gog,cc,ranks[0],exportList,importList); + + BOOST_CHECK_MESSAGE(importList==importSolution,"On rank "+std::to_string(cc.rank())); + BOOST_CHECK_MESSAGE(exportList==exportSolution,"On rank "+std::to_string(cc.rank())); +} +#endif // HAVE_MPI + +bool +init_unit_test_func() +{ + return true; +} + +int main(int argc, char** argv) +{ + Dune::MPIHelper::instance(argc, argv); + boost::unit_test::unit_test_main(&init_unit_test_func, + argc, argv); +} From 23584562344816fec2e480b7a74b5e5a49336804 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Thu, 21 Nov 2024 16:39:53 +0100 Subject: [PATCH 11/14] Refactoring. Indentation, comments, and few small optimizations. --- opm/grid/GraphOfGrid.cpp | 2 +- opm/grid/GraphOfGrid.hpp | 2 +- opm/grid/GraphOfGridWrappers.cpp | 118 ++++++++++++++-------------- opm/grid/GraphOfGridWrappers.hpp | 57 ++++++++------ tests/test_graphofgrid.cpp | 2 +- tests/test_graphofgrid_parallel.cpp | 2 +- 6 files changed, 96 insertions(+), 87 deletions(-) diff --git a/opm/grid/GraphOfGrid.cpp b/opm/grid/GraphOfGrid.cpp index 5d723c265..4daea16a9 100644 --- a/opm/grid/GraphOfGrid.cpp +++ b/opm/grid/GraphOfGrid.cpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, diff --git a/opm/grid/GraphOfGrid.hpp b/opm/grid/GraphOfGrid.hpp index 3119f5c1b..0a37116e5 100644 --- a/opm/grid/GraphOfGrid.hpp +++ b/opm/grid/GraphOfGrid.hpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index 0d72e3747..d2b4fdbba 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, @@ -131,8 +131,8 @@ void getGraphOfGridEdgeList(void *pGraph, template void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, - const GraphOfGrid& gog, - bool pretendNull) + const GraphOfGrid& gog, + bool pretendNull) { GraphOfGrid* pGraph = const_cast*>(&gog); if (pretendNull) @@ -152,9 +152,9 @@ void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, } #endif // HAVE_MPI -void addFutureConnectionWells (GraphOfGrid& gog, - const std::unordered_map>& wells, - bool checkWellIntersections) +void addFutureConnectionWells(GraphOfGrid& gog, + const std::unordered_map>& wells, + bool checkWellIntersections) { // create compressed lookup from cartesian. const auto& grid = gog.getGrid(); @@ -170,7 +170,7 @@ void addFutureConnectionWells (GraphOfGrid& gog, std::set wellsgID; for (const int& cell : w.second) { - int gID = cartesian_to_compressed.at(cell); + int gID = cartesian_to_compressed[cell]; assert(gID!=-1); // well should be an active cell wellsgID.insert(gID); } @@ -178,9 +178,9 @@ void addFutureConnectionWells (GraphOfGrid& gog, } } -void addWellConnections (GraphOfGrid& gog, - const Dune::cpgrid::WellConnections& wells, - bool checkWellIntersections) +void addWellConnections(GraphOfGrid& gog, + const Dune::cpgrid::WellConnections& wells, + bool checkWellIntersections) { for (const auto& w : wells) { @@ -188,18 +188,18 @@ void addWellConnections (GraphOfGrid& gog, } } -void extendGIDtoRank (const GraphOfGrid& gog, - std::vector& gIDtoRank, - const int& root) +void extendGIDtoRank(const GraphOfGrid& gog, + std::vector& gIDtoRank, + const int& root) { for (const auto& w : gog.getWells()) { - auto wellID = *w.begin(); - if (gIDtoRank[wellID]!=root) + auto wellRank = gIDtoRank[*w.begin()]; + if (wellRank!=root) { for (const auto& gID : w) { - gIDtoRank.at(gID) = gIDtoRank.at(wellID); + gIDtoRank[gID] = wellRank; } } } @@ -209,10 +209,10 @@ void extendGIDtoRank (const GraphOfGrid& gog, namespace Impl{ std::vector>> -extendRootExportList (const GraphOfGrid& gog, - std::vector>& exportList, - int root, - const std::vector& gIDtoRank) +extendRootExportList(const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank) { const auto& cc = gog.getGrid().comm(); // non-root ranks have empty export lists. @@ -230,7 +230,7 @@ extendRootExportList (const GraphOfGrid& gog, if (gIDtoRank.size()>0) { auto wellID = *well.begin(); - if (gIDtoRank.at(wellID)!=root) + if (gIDtoRank[wellID]!=root) { wellMap[wellID] = std::make_pair(well.begin(),well.end()); } @@ -276,54 +276,53 @@ extendRootExportList (const GraphOfGrid& gog, } // add new cells to the exportList and sort it. It is assumed that exportList starts sorted. - auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; - std::sort(addToList.begin(),addToList.end(),compareTuple); + std::sort(addToList.begin(),addToList.end()); auto origSize = exportList.size(); auto totsize = origSize+addToList.size(); exportList.reserve(totsize); exportList.insert(exportList.end(),addToList.begin(),addToList.end()); - std::inplace_merge(exportList.begin(),exportList.begin()+origSize,exportList.end(),compareTuple); + std::inplace_merge(exportList.begin(),exportList.begin()+origSize,exportList.end()); return exportedWells; } -std::vector> communicateExportedWells ( +std::vector> communicateExportedWells( const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root) { // send data from root + std::vector> result; if (cc.rank()==root) { for (int i=0; i sentData(totsize); - int index = 0; - sentData[index++] = n; + std::vector commData; + commData.reserve(totsize); + commData.push_back(numWells); for (const auto& well : exportedWells[i]) { - sentData[index++] = well.size(); + commData.push_back(well.size()); for (const auto& gID : well) { - sentData[index++] = gID; + commData.push_back(gID); } } - assert(totsize==(int)sentData.size()); + assert(totsize==(int)commData.size()); int tag = 37; // a random number MPI_Send(&totsize, 1, MPI_INT, i, tag++, cc); - MPI_Send(sentData.data(), totsize, MPI_INT, i, tag, cc); + MPI_Send(commData.data(), totsize, MPI_INT, i, tag, cc); } } - return {}; } else // receive data from root { @@ -333,10 +332,10 @@ std::vector> communicateExportedWells ( std::vector receivedData(totsize); MPI_Recv(receivedData.data(), totsize, MPI_INT, root, tag, cc, MPI_STATUS_IGNORE); - int n = receivedData[0]; - std::vector> result(n); + int numWells = receivedData[0]; + result.resize(numWells); int index = 1; - for (int i=0; i> communicateExportedWells ( result[i].emplace(receivedData[index++]); } } - return result; } + return result; } -void extendImportList (std::vector>& importList, - const std::vector>& extraWells) +void extendImportList(std::vector>& importList, + const std::vector>& extraWells) { using ImportList = std::vector>; // make a list of wells for easy identification. Contains ID, begin, end @@ -392,23 +391,22 @@ void extendImportList (std::vector>& importList, } // add new cells to the importList and sort it. It is assumed that importList starts sorted. - auto compareTuple = [](const auto& a, const auto& b){return std::get<0>(a)(b);}; - std::sort(addToList.begin(),addToList.end(),compareTuple); + std::sort(addToList.begin(),addToList.end()); auto origSize = importList.size(); auto totsize = origSize+addToList.size(); importList.reserve(totsize); importList.insert(importList.end(),addToList.begin(),addToList.end()); - std::inplace_merge(importList.begin(),importList.begin()+origSize,importList.end(),compareTuple); + std::inplace_merge(importList.begin(),importList.begin()+origSize,importList.end()); } } // end namespace Impl void extendExportAndImportLists(const GraphOfGrid& gog, - const Dune::cpgrid::CpGridDataTraits::Communication& cc, - int root, - std::vector>& exportList, - std::vector>& importList, - const std::vector& gIDtoRank) + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root, + std::vector>& exportList, + std::vector>& importList, + const std::vector& gIDtoRank) { // extend root's export list and get sets of well cells for other ranks auto expListToComm = Impl::extendRootExportList(gog,exportList,root,gIDtoRank); @@ -423,7 +421,7 @@ void extendExportAndImportLists(const GraphOfGrid& gog, #endif // HAVE_MPI std::vector getWellRanks(const std::vector& gIDtoRank, - const Dune::cpgrid::WellConnections& wellConnections) + const Dune::cpgrid::WellConnections& wellConnections) { std::vector wellIndices(wellConnections.size()); for (std::size_t wellIndex = 0; wellIndex < wellConnections.size(); ++wellIndex) @@ -445,7 +443,7 @@ wellsOnThisRank(const std::vector& wells, std::vector> wells_on_proc(numProcs); for (std::size_t i=0; i& gog, std::vector > wellsOnProc; // List entry: process to export to, (global) index, process rank, attribute there (not needed?) - std::vector> myExportList(numExport); + std::vector> myExportList; // List entry: process to import from, global index, process rank, attribute here, local index (determined later) - std::vector> myImportList(numImport); - myExportList.reserve(1.2*myExportList.size()); - myImportList.reserve(1.2*myImportList.size()); + std::vector> myImportList; + float buffer = 1.05; // to allocate extra space for wells in myExportList and myImportList + assert(rank==root || numExport==0); + assert(rank!=root || numImport==0); + // all cells on root are added to its export and its import list + std::size_t reserveEx = rank!=root ? 0 : cpgrid.size(0); + std::size_t reserveIm = rank!=root ? buffer*numImport : cpgrid.size(0)*buffer/cc.size(); + myExportList.reserve(reserveEx); + myImportList.reserve(reserveIm); using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; for ( int i=0; i < numImport; ++i ) { - myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); + myImportList.emplace_back(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); } assert(rank==root || numExport==0); if (rank==root) @@ -491,7 +495,7 @@ makeImportAndExportLists(const GraphOfGrid& gog, for ( int i=0; i < numExport; ++i ) { gIDtoRank[exportGlobalGids[i]] = exportToPart[i]; - myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); + myExportList.emplace_back(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); } std::sort(myExportList.begin(),myExportList.end()); // partitioner sees only one cell per well, modify remaining diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 0737e0194..1a51947a8 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, @@ -94,8 +94,8 @@ void getGraphOfGridEdgeList(void *pGraph, /// \brief Register callback functions to Zoltan template void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, - const GraphOfGrid& gog, - bool pretendNull); + const GraphOfGrid& gog, + bool pretendNull); #endif /// \brief Adds well to the GraphOfGrid @@ -107,9 +107,9 @@ void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, /// intersect and if their cell IDs are present in the graph. /// Setting it to false makes the algorithm faster but leaves user /// responsible for keeping wells disjoint. -void addFutureConnectionWells (GraphOfGrid& gog, - const std::unordered_map>& wells, - bool checkWellIntersections=true); +void addFutureConnectionWells(GraphOfGrid& gog, + const std::unordered_map>& wells, + bool checkWellIntersections=true); /// \brief Add WellConnections to the GraphOfGrid /// @@ -117,18 +117,18 @@ void addFutureConnectionWells (GraphOfGrid& gog, /// intersect and if their cell IDs are present in the graph. /// Setting it to false makes the algorithm faster but leaves user /// responsible for keeping wells disjoint. -void addWellConnections (GraphOfGrid& gog, - const Dune::cpgrid::WellConnections& wells, - bool checkWellIntersections=true); +void addWellConnections(GraphOfGrid& gog, + const Dune::cpgrid::WellConnections& wells, + bool checkWellIntersections=true); /// \brief Correct gIDtoRank's data about well cells /// /// gIDtoRank's entries come from Zoltan partitioner's export list /// that does not contain all well cells. Default value is root's rank. /// parameter root allows skipping wells that are correct. -void extendGIDtoRank (const GraphOfGrid& gog, - std::vector& gIDtoRank, - const int& root = -1); +void extendGIDtoRank(const GraphOfGrid& gog, + std::vector& gIDtoRank, + const int& root = -1); #if HAVE_MPI namespace Impl{ @@ -136,32 +136,37 @@ namespace Impl{ /// /// Helper function for extendExportAndImportLists. /// Used on non-root ranks that do not have access to wells. -void extendImportList (std::vector>& importList, - const std::vector>& extraWells); +void extendImportList(std::vector>& importList, + const std::vector>& extraWells); /// \brief Add well cells' global IDs to the root's export list and output other rank's wells /// /// Helper function for extendExportAndImportLists. -/// Does nothing on non-root ranks. +/// On non-root ranks, it does nothing and returns an empty vector. /// On root, exportList is extended by well cells that are hidden from the partitioner. /// These wells are also collected and returned so they can be communicated to other ranks. /// \return vector of size cc.size(). Each entry contains vector of wells exported to that rank. std::vector>> -extendRootExportList (const GraphOfGrid& gog, - std::vector>& exportList, - int root, - const std::vector& gIDtoRank); +extendRootExportList(const GraphOfGrid& gog, + std::vector>& exportList, + int root, + const std::vector& gIDtoRank); /// \brief Communicate wells exported from root, needed for extending other rank's import lists /// /// Helper function for extendExportAndImportLists. +/// Only the root rank has acccess to the grid and can map well coordinates +/// to the cell global ID. This function communicates well cell IDs to +/// other ranks. Input (the prepared container with cell IDs) is thus +/// relevant only on the root rank, whereas output (well cells received +/// by MPI communication) only on the non-root ranks. /// \param exportedWells Contains for each rank the wells that are exported there, /// empty on non-root ranks /// \param cc Communication object /// \param root The root's rank /// \return Vector of wells necessary to extend this rank's import lists, /// empty on the root rank -std::vector> communicateExportedWells ( +std::vector> communicateExportedWells( const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root); @@ -176,11 +181,11 @@ std::vector> communicateExportedWells ( /// the necessary information to other ranks. /// On root ImportList has been already extended with all cells on the current rank. void extendExportAndImportLists(const GraphOfGrid& gog, - const Dune::cpgrid::CpGridDataTraits::Communication& cc, - int root, - std::vector>& exportList, - std::vector>& importList, - const std::vector& gIDtoRank={}); + const Dune::cpgrid::CpGridDataTraits::Communication& cc, + int root, + std::vector>& exportList, + std::vector>& importList, + const std::vector& gIDtoRank={}); #endif // HAVE_MPI /// \brief Find to which ranks wells were assigned @@ -189,7 +194,7 @@ void extendExportAndImportLists(const GraphOfGrid& gog, /// \param gIDtoRank Takes global ID and returns rank /// \param wellConnections Has global IDs of well cells std::vector getWellRanks(const std::vector& gIDtoRank, - const Dune::cpgrid::WellConnections& wellConnections); + const Dune::cpgrid::WellConnections& wellConnections); #if HAVE_MPI /// \brief Get rank-specific information about which wells are present diff --git a/tests/test_graphofgrid.cpp b/tests/test_graphofgrid.cpp index f3b93b5a8..f164d5186 100644 --- a/tests/test_graphofgrid.cpp +++ b/tests/test_graphofgrid.cpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, diff --git a/tests/test_graphofgrid_parallel.cpp b/tests/test_graphofgrid_parallel.cpp index fd1397881..6abf2d658 100644 --- a/tests/test_graphofgrid_parallel.cpp +++ b/tests/test_graphofgrid_parallel.cpp @@ -7,7 +7,7 @@ OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 2 of the License, or + the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, From abff6d7ead99833bd4e295dcd66072537268ad16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Thu, 21 Nov 2024 17:42:56 +0100 Subject: [PATCH 12/14] Avoid unnecessary vector initialization. --- opm/grid/common/ZoltanPartition.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/opm/grid/common/ZoltanPartition.cpp b/opm/grid/common/ZoltanPartition.cpp index 3d2407f3c..2903c1141 100644 --- a/opm/grid/common/ZoltanPartition.cpp +++ b/opm/grid/common/ZoltanPartition.cpp @@ -60,23 +60,30 @@ makeImportAndExportLists(const Dune::CpGrid& cpgrid, std::vector > wellsOnProc; // List entry: process to export to, (global) index, process rank, attribute there (not needed?) - std::vector> myExportList(numExport); + std::vector> myExportList; // List entry: process to import from, global index, process rank, attribute here, local index // (determined later) - std::vector> myImportList(numImport); - myExportList.reserve(1.2*myExportList.size()); - myImportList.reserve(1.2*myImportList.size()); + std::vector> myImportList; + assert(rank==root || numExport==0); + assert(rank!=root || numImport==0); + int buffer = 1.05; + // all cells on root are added to its export and its import list + // myImportList will be expanded if distributed wells are not allowed and well cells are moved + std::size_t reserveEx = rank!=root ? 0 : cpgrid.size(0); + std::size_t reserveIm = rank!=root ? buffer*numImport : cpgrid.size(0)*buffer/cc.size(); + myExportList.reserve(reserveEx); + myImportList.reserve(reserveIm); using AttributeSet = Dune::cpgrid::CpGridData::AttributeSet; for ( int i=0; i < numExport; ++i ) { parts[exportLocalGids[i]] = exportToPart[i]; - myExportList[i] = std::make_tuple(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); + myExportList.emplace_back(exportGlobalGids[i], exportToPart[i], static_cast(AttributeSet::owner)); } for ( int i=0; i < numImport; ++i ) { - myImportList[i] = std::make_tuple(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); + myImportList.emplace_back(importGlobalGids[i], root, static_cast(AttributeSet::owner),-1); } From dbe686751c0d561e15afb86066a5764d7e13fda4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Fri, 22 Nov 2024 10:33:46 +0100 Subject: [PATCH 13/14] Add the graphofgrid parallel test to cmakelists. --- CMakeLists_files.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index acafebe73..979286b10 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -129,6 +129,7 @@ if(Boost_VERSION_STRING VERSION_GREATER 1.53) tests/cpgrid/lookupdataCpGrid_test.cpp tests/cpgrid/shifted_cart_test.cpp tests/test_graphofgrid.cpp + tests/test_graphofgrid_parallel.cpp ) endif() From 8e60e33bf47d214e0ed7aa2e0b35724880bd6255 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20T=C3=B3th?= Date: Fri, 22 Nov 2024 18:37:31 +0100 Subject: [PATCH 14/14] Change std::set to vector in structures leading to communication. --- opm/grid/GraphOfGridWrappers.cpp | 59 ++++++++++++++++---------------- opm/grid/GraphOfGridWrappers.hpp | 12 +++---- 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/opm/grid/GraphOfGridWrappers.cpp b/opm/grid/GraphOfGridWrappers.cpp index d2b4fdbba..bdd71b2fe 100644 --- a/opm/grid/GraphOfGridWrappers.cpp +++ b/opm/grid/GraphOfGridWrappers.cpp @@ -131,10 +131,10 @@ void getGraphOfGridEdgeList(void *pGraph, template void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, - const GraphOfGrid& gog, + GraphOfGrid& gog, bool pretendNull) { - GraphOfGrid* pGraph = const_cast*>(&gog); + GraphOfGrid* pGraph = &gog; if (pretendNull) { Zoltan_Set_Num_Obj_Fn(zz, Dune::cpgrid::getNullNumCells, pGraph); @@ -208,7 +208,7 @@ void extendGIDtoRank(const GraphOfGrid& gog, #if HAVE_MPI namespace Impl{ -std::vector>> +std::vector>> extendRootExportList(const GraphOfGrid& gog, std::vector>& exportList, int root, @@ -216,15 +216,16 @@ extendRootExportList(const GraphOfGrid& gog, { const auto& cc = gog.getGrid().comm(); // non-root ranks have empty export lists. + std::vector>> exportedWells; if (cc.rank()!=root) { - return std::vector>>(); + return exportedWells; } - std::vector>> exportedWells(cc.size()); + exportedWells.resize(cc.size()); using ExportList = std::vector>; // make a list of wells for easy identification. Contains ID, begin, end using iter = std::set::const_iterator; - std::unordered_map> wellMap; + std::unordered_map> wellMap; for (const auto& well : gog.getWells()) { if (gIDtoRank.size()>0) @@ -232,12 +233,12 @@ extendRootExportList(const GraphOfGrid& gog, auto wellID = *well.begin(); if (gIDtoRank[wellID]!=root) { - wellMap[wellID] = std::make_pair(well.begin(),well.end()); + wellMap[wellID] = std::make_tuple(well.begin(),well.end(),well.size()); } } else { - wellMap[*well.begin()] = std::make_pair(well.begin(),well.end()); + wellMap[*well.begin()] = std::make_tuple(well.begin(),well.end(),well.size()); } } @@ -252,8 +253,10 @@ extendRootExportList(const GraphOfGrid& gog, int rankToExport = std::get<1>(cellProperties); if (rankToExport!=root) { - const auto& [begin,end] = pWell->second; - std::set wellToExport{*begin}; + const auto& [begin,end,wSize] = pWell->second; + std::vector wellToExport; + wellToExport.reserve(wSize); + wellToExport.push_back(*begin); // well ID is its cell of lowest index and is already in the exportList assert(*begin==std::get<0>(cellProperties)); for (auto pgID = begin; ++pgID!=end; ) @@ -263,7 +266,7 @@ extendRootExportList(const GraphOfGrid& gog, std::get<0>(wellCell) = *pgID; addToList.push_back(wellCell); - wellToExport.emplace(*pgID); + wellToExport.push_back(*pgID); } exportedWells[rankToExport].push_back(std::move(wellToExport)); } @@ -286,13 +289,13 @@ extendRootExportList(const GraphOfGrid& gog, return exportedWells; } -std::vector> communicateExportedWells( - const std::vector>>& exportedWells, +std::vector> communicateExportedWells( + const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root) { // send data from root - std::vector> result; + std::vector> result; if (cc.rank()==root) { for (int i=0; i> communicateExportedWells( { int wellSize = receivedData[index++]; assert(index+wellSize<=totsize); - for (int j=0; j(dataBegin,dataBegin+wellSize); + index+=wellSize; } } return result; } void extendImportList(std::vector>& importList, - const std::vector>& extraWells) + const std::vector>& extraWells) { using ImportList = std::vector>; - // make a list of wells for easy identification. Contains ID, begin, end - using iter = std::set::const_iterator; - std::unordered_map> wellMap; - for (const auto& well : extraWells) + // make a list of wells for easy identification + std::unordered_map wellMap; + for (std::size_t i=0; i1) + if (extraWells[i].size()>1) { - wellMap[*well.begin()] = std::make_pair(well.begin(),well.end()); + wellMap[extraWells[i][0]] = i; } } @@ -371,14 +372,14 @@ void extendImportList(std::vector>& importList, auto pWell = wellMap.find(std::get<0>(cellProperties)); if (pWell!=wellMap.end()) { - const auto& [begin,end] = pWell->second; + const auto& wellVector = extraWells[pWell->second]; // well ID is its cell of lowest index and is already in the importList - assert(*begin==std::get<0>(cellProperties)); - for (auto pgID = begin; ++pgID!=end; ) + assert(wellVector[0]==std::get<0>(cellProperties)); + for (std::size_t j=1; j wellCell = cellProperties; - std::get<0>(wellCell) = *pgID; + std::get<0>(wellCell) = wellVector[j]; addToList.push_back(wellCell); } diff --git a/opm/grid/GraphOfGridWrappers.hpp b/opm/grid/GraphOfGridWrappers.hpp index 1a51947a8..9d1a24b57 100644 --- a/opm/grid/GraphOfGridWrappers.hpp +++ b/opm/grid/GraphOfGridWrappers.hpp @@ -94,7 +94,7 @@ void getGraphOfGridEdgeList(void *pGraph, /// \brief Register callback functions to Zoltan template void setGraphOfGridZoltanGraphFunctions(Zoltan_Struct *zz, - const GraphOfGrid& gog, + GraphOfGrid& gog, bool pretendNull); #endif @@ -137,7 +137,7 @@ namespace Impl{ /// Helper function for extendExportAndImportLists. /// Used on non-root ranks that do not have access to wells. void extendImportList(std::vector>& importList, - const std::vector>& extraWells); + const std::vector>& extraWells); /// \brief Add well cells' global IDs to the root's export list and output other rank's wells /// @@ -145,8 +145,8 @@ void extendImportList(std::vector>& importList, /// On non-root ranks, it does nothing and returns an empty vector. /// On root, exportList is extended by well cells that are hidden from the partitioner. /// These wells are also collected and returned so they can be communicated to other ranks. -/// \return vector of size cc.size(). Each entry contains vector of wells exported to that rank. -std::vector>> +/// \return vector[rank][well][cell] Each entry contains vector of wells exported to that rank. +std::vector>> extendRootExportList(const GraphOfGrid& gog, std::vector>& exportList, int root, @@ -166,8 +166,8 @@ extendRootExportList(const GraphOfGrid& gog, /// \param root The root's rank /// \return Vector of wells necessary to extend this rank's import lists, /// empty on the root rank -std::vector> communicateExportedWells( - const std::vector>>& exportedWells, +std::vector> communicateExportedWells( + const std::vector>>& exportedWells, const Dune::cpgrid::CpGridDataTraits::Communication& cc, int root); } // end namespace Impl