From a1663c51be5fefcc84603582095ffdab8c4c1ead Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Fri, 22 Jan 2021 17:08:16 +0000 Subject: [PATCH 01/15] standardising makefiles --- .../SingleFluidIdealRandomMPIHDF5/Makefile | 42 +++++++++++++++---- Project/CPU/Makefile | 39 +++++++++++++---- Tests/CPU/Makefile | 29 +++++++++---- 3 files changed, 83 insertions(+), 27 deletions(-) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile index e188325f..e589b24e 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile @@ -1,14 +1,28 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -CC = mpic++ -##CC = g++ +# -------------- PARAMETERS FOR USERS TO EDIT -------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 1 USE_OMP = 1 USE_HDF = 1 +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + PROJECT_TYPE = CPU # Module directory @@ -67,16 +81,26 @@ else endif ifeq ($(USE_HDF), 1) - export HDF5_CXX := $(CC) - export HDF5_CLINKER := $(CC) - - ifeq ($(USE_OMP), 1) - CC = h5pcc.openmpi + # Using HDF5 + ifeq($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) else - CC = h5c++ + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) endif + # Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else + # Not using HDF5 + ifeq($(USE_MPI), 1) + # Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif + # If not using hdf5 or mpi, CC remains as defined at top of Makefile endif + # Headers HDRS = ${SRCS:.cc=.h} cudaErrorCheck.h diff --git a/Project/CPU/Makefile b/Project/CPU/Makefile index 6996dbf9..582a633d 100644 --- a/Project/CPU/Makefile +++ b/Project/CPU/Makefile @@ -1,15 +1,27 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -#CC = g++ -CC = mpic++ +# -------------- PARAMETERS FOR USERS TO EDIT -------------------- # Whether to use MPI for multi-cpu processing USE_MPI = 1 USE_OMP = 1 USE_HDF = 1 +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + # Module directory MODULE_DIR = ./Src @@ -76,14 +88,23 @@ ifeq ($(USE_OMP), 1) endif ifeq ($(USE_HDF), 1) - export HDF5_CXX := $(CC) - export HDF5_CLINKER := $(CC) - - ifeq($(USE_OMP), 1) - CC = h5pcc.openmpi + # Using HDF5 + ifeq($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) else - CC = h5c++ + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif + # Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else + # Not using HDF5 + ifeq($(USE_MPI), 1) + # Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) endif + # If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers diff --git a/Tests/CPU/Makefile b/Tests/CPU/Makefile index a7e3e7dd..2e20c35d 100644 --- a/Tests/CPU/Makefile +++ b/Tests/CPU/Makefile @@ -16,24 +16,35 @@ # but shouldn't modify. -# Compiler used for all compilation objects. This should be the version of the hdf5 +# -------------- PARAMETERS FOR USERS TO EDIT -------------------- + +# The c++ capable mpi compiler. In systems with multiple versions of MPI, the particular version may need to be specified with eg +# mpicxx.mpich +MPI_CC = mpic++ +# The script used to launch mpi programs. In systems with multiple versions of MPI, the particular version may need to be +# specified with eg mpirun.mpich +MPIEXEC = mpirun + +# The hdf5 compiler. This must be the version of the hdf5 # compiler available on your system that links the correct mpi libraries. Should -# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. CC = h5pcc +# Points to the root of Google Test, relative to where this file is. +# Remember to tweak this if you move this file. +GTEST_DIR = ../../../GoogleTest + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + # Compiler used by hdf5 for c++. Shouldn't need to change this -export HDF5_CXX := mpic++ -export HDF5_CLINKER := mpic++ +export HDF5_CXX := $(MPI_CC) +export HDF5_CLINKER := $(MPI_CC) # these should no longer be needed but leaving them in just in case MPI_FLAGS = HDF5_FLAGS = - -# Points to the root of Google Test, relative to where this file is. -# Remember to tweak this if you move this file. -GTEST_DIR = ../../../GoogleTest - # Where to find user code. MODULE_DIR = ./../../Project/CPU/Src TEST_DIR = ./Src From d1bb0c6a1ec3f7d9f8c71ae39c1229d64bfe98b5 Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Mon, 25 Jan 2021 15:36:05 +0000 Subject: [PATCH 02/15] standardising KH example makefiles and adding makefile template --- Examples/KelvinHelmholtz/Makefile.template | 259 ++++++++++++++++++ .../SingleFluidIdealMPI/Makefile | 122 +++++++-- .../SingleFluidIdealRandomMPI/Makefile | 122 +++++++-- .../SingleFluidIdealRandomMPIHDF5/Makefile | 102 ++++--- .../Makefile | 113 +++++--- .../main.cc | 2 +- .../SingleFluidIdealRandomSerial/Makefile | 120 ++++++-- .../SingleFluidIdealRandomSerialHDF5/Makefile | 116 +++++--- .../Makefile | 114 +++++--- .../main.cc | 4 +- .../SingleFluidIdealSerial/Makefile | 120 ++++++-- 11 files changed, 974 insertions(+), 220 deletions(-) create mode 100644 Examples/KelvinHelmholtz/Makefile.template diff --git a/Examples/KelvinHelmholtz/Makefile.template b/Examples/KelvinHelmholtz/Makefile.template new file mode 100644 index 00000000..e8d0db3c --- /dev/null +++ b/Examples/KelvinHelmholtz/Makefile.template @@ -0,0 +1,259 @@ +## This is a template Makefile to use as a starting point for all KelvinHelmholtz examples. +# +# It can handle compiling with mpi and/or hdf5 if required, which can be controlled from the first +# section 'PARAMETERS USERS ARE LIKELY TO NEED TO EDIT' +# +# Creating a new example: +# If any source files have been added to METHOD for a new example, look for and update the +# 'TO EDIT IF SOURCE FILES ARE ADDED TO METHOD' sections, ideally updating them in this template file +# as well as in the new example makefile. +# +# ------------------------------------------------------------------------------ + +# Make file for the main function. Builds all modules +# and links for main.cc + +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- + +# Whether to use MPI for multi-cpu processing +USE_MPI = 1 +USE_OMP = 0 +USE_HDF = 1 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + +PROJECT_TYPE = CPU + +# Module directory +MODULE_DIR = ./../../../Project/${PROJECT_TYPE}/Src + +# Rootfinder directory +RTFIND_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary + +# Include directory +INC_DIR = ./../../../Project/${PROJECT_TYPE}/Include + +# Cminpack include directory +RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include + +# Cminpack source directory +RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src + +ifeq ($(USE_OMP), 1) + OMP_FLAGS = -fopenmp +endif + +# C++ compiler flags +CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas + +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Sources +SRCS = simulation.cc \ + srmhd.cc \ + initFunc.cc \ + simData.cc \ + wenoUpwinds.cc \ + weno.cc \ + RK2.cc \ + rkSplit.cc \ + boundaryConds.cc \ + fluxVectorSplitting.cc \ + +SERIAL_SRCS = serialSaveData.cc \ + serialEnv.cc \ + +ifeq ($(USE_HDF), 1) + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + checkpointArgs.cc +endif + +PARALLEL_SRCS = parallelSaveData.cc \ + parallelBoundaryConds.cc \ + parallelEnv.cc + +ifeq ($(USE_HDF), 1) + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + +endif + +# -------------- END TO EDIT ---------------------------------------------------- + +ifeq ($(USE_MPI), 1) + SRCS += ${PARALLEL_SRCS} +else + SRCS += ${SERIAL_SRCS} +endif + +ifeq ($(USE_HDF), 1) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile +endif + +# Headers +HDRS = ${SRCS:.cc=.h} cudaErrorCheck.h + +# Objects +OBJS = ${SRCS:.cc=.o} + +# Rootfinder objects +RTFIND_OBJS = $(RTFIND_SRC_DIR)/dogleg.o \ + $(RTFIND_SRC_DIR)/dpmpar.o \ + $(RTFIND_SRC_DIR)/enorm.o \ + $(RTFIND_SRC_DIR)/fdjac1.o \ + $(RTFIND_SRC_DIR)/hybrd.o \ + $(RTFIND_SRC_DIR)/hybrd1.o \ + $(RTFIND_SRC_DIR)/qform.o \ + $(RTFIND_SRC_DIR)/qrfac.o \ + $(RTFIND_SRC_DIR)/r1mpyq.o \ + $(RTFIND_SRC_DIR)/r1updt.o + +# Programmes +PROGS = ${SRCS:.cc=} + +# Main programme executable +EXEC = main + +# Rootfinder executables +RTFIND = buildRootfinder + + +# Build everything +build : $(RTFIND) $(EXEC) + +# Make and run all tests on simulation scripts and run main +all : $(RTFIND) $(EXEC) + cd $(TEST_DIR) && $(MAKE) test + @$(MAKE) run + +# Build and run main (does not run any tests if called explicitly) +run : $(RTFIND) $(EXEC) + @echo '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' + @echo '' + @echo '' + @echo '######################################################' + @echo '# Executing main programme #' + @echo '######################################################' + @./$(EXEC) + +clean : + rm -f $(EXEC) *.o *.gch + + +################# +# Build objects # +################# + +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/parallelCheckpointArgs.h $(INC_DIR)/initFunc.h $(INC_DIR)/parallelInitFuncFromCheckpoint.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + +simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + +srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) + +simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +boundaryConds.o : $(MODULE_DIR)/boundaryConds.cc $(INC_DIR)/boundaryConds.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/parallelBoundaryConds.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- + +# Executable +main : main.o $(OBJS) $(RTFIND_OBJS) + $(CC) $^ -o $@ $(CXXFLAGS) $(CXXFLAGS) + +buildRootfinder: + @cd $(RTFIND_DIR) && $(MAKE) objects diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealMPI/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealMPI/Makefile index 7b4ac649..12ed0f06 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealMPI/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealMPI/Makefile @@ -1,12 +1,27 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -CC = mpic++ -##CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 1 -USE_OMP = 1 +USE_OMP = 0 +USE_HDF = 0 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + PROJECT_TYPE = CPU @@ -26,35 +41,72 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ + +ifeq ($(USE_HDF), 1) + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc +endif PARALLEL_SRCS = parallelSaveData.cc \ - parallelBoundaryConds.cc \ - parallelEnv.cc + parallelBoundaryConds.cc \ + parallelEnv.cc + +ifeq ($(USE_HDF), 1) + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + +endif + +# -------------- END TO EDIT ---------------------------------------------------- ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} +else + SRCS += ${SERIAL_SRCS} +endif + +ifeq ($(USE_HDF), 1) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) else - SRCS += ${SERIAL_SRCS} +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -99,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -111,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -133,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -145,19 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPI/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPI/Makefile index 7b4ac649..12ed0f06 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPI/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPI/Makefile @@ -1,12 +1,27 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -CC = mpic++ -##CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 1 -USE_OMP = 1 +USE_OMP = 0 +USE_HDF = 0 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + PROJECT_TYPE = CPU @@ -26,35 +41,72 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ + +ifeq ($(USE_HDF), 1) + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc +endif PARALLEL_SRCS = parallelSaveData.cc \ - parallelBoundaryConds.cc \ - parallelEnv.cc + parallelBoundaryConds.cc \ + parallelEnv.cc + +ifeq ($(USE_HDF), 1) + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + +endif + +# -------------- END TO EDIT ---------------------------------------------------- ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} +else + SRCS += ${SERIAL_SRCS} +endif + +ifeq ($(USE_HDF), 1) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) else - SRCS += ${SERIAL_SRCS} +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -99,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -111,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -133,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -145,19 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile index e589b24e..97885011 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5/Makefile @@ -1,11 +1,11 @@ # Make file for the main function. Builds all modules # and links for main.cc -# -------------- PARAMETERS FOR USERS TO EDIT -------------------- +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- # Whether to use MPI for multi-cpu processing USE_MPI = 1 -USE_OMP = 1 +USE_OMP = 0 USE_HDF = 1 # Compiler @@ -41,12 +41,14 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + # Sources SRCS = simulation.cc \ srmhd.cc \ @@ -60,10 +62,12 @@ SRCS = simulation.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ ifeq ($(USE_HDF), 1) - SERIAL_SRCS += serialSaveDataHDF5.cc + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc endif PARALLEL_SRCS = parallelSaveData.cc \ @@ -71,36 +75,40 @@ PARALLEL_SRCS = parallelSaveData.cc \ parallelEnv.cc ifeq ($(USE_HDF), 1) - PARALLEL_SRCS += parallelSaveDataHDF5.cc + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + endif +# -------------- END TO EDIT ---------------------------------------------------- + ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} else - SRCS += ${SERIAL_SRCS} + SRCS += ${SERIAL_SRCS} endif ifeq ($(USE_HDF), 1) - # Using HDF5 - ifeq($(USE_MPI), 1) - export HDF5_CXX := $(MPI_CC) - export HDF5_CLINKER := $(MPI_CC) - else - export HDF5_CXX := $(CC) - export HDF5_CLINKER := $(CC) - endif - # Compile all sources with the hdf5 compiler wrapper - CC = $(HDF5_CC) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) else - # Not using HDF5 - ifeq($(USE_MPI), 1) - # Compile all sources with the mpi compiler wrapper - CC = $(MPI_CC) - endif - # If not using hdf5 or mpi, CC remains as defined at top of Makefile +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif - # Headers HDRS = ${SRCS:.cc=.h} cudaErrorCheck.h @@ -143,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -155,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -177,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -189,25 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/Makefile index 3f6809d4..94566424 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/Makefile @@ -1,14 +1,40 @@ +## This is a template Makefile to use as a starting point for all KelvinHelmholtz examples. +# +# It can handle compiling with mpi and/or hdf5 if required, which can be controlled from the first +# section 'PARAMETERS USERS ARE LIKELY TO NEED TO EDIT' +# +# Creating a new example: +# If any source files have been added to METHOD for a new example, look for and update the +# 'TO EDIT IF SOURCE FILES ARE ADDED TO METHOD' sections, ideally updating them in this template file +# as well as in the new example makefile. +# +# ------------------------------------------------------------------------------ + # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -CC = mpic++ -##CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 1 USE_OMP = 0 USE_HDF = 1 +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + PROJECT_TYPE = CPU # Module directory @@ -27,19 +53,21 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags -CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas -g +CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas + +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ @@ -49,9 +77,9 @@ SERIAL_SRCS = serialSaveData.cc \ serialEnv.cc \ ifeq ($(USE_HDF), 1) - SERIAL_SRCS += serialSaveDataHDF5.cc \ + SERIAL_SRCS += serialSaveDataHDF5.cc \ initFuncFromCheckpoint.cc \ - checkpointArgs.cc + serialCheckpointArgs.cc endif PARALLEL_SRCS = parallelSaveData.cc \ @@ -59,30 +87,38 @@ PARALLEL_SRCS = parallelSaveData.cc \ parallelEnv.cc ifeq ($(USE_HDF), 1) - PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ parallelInitFuncFromCheckpoint.cc \ parallelCheckpointArgs.cc endif +# -------------- END TO EDIT ---------------------------------------------------- + ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} else - SRCS += ${SERIAL_SRCS} + SRCS += ${SERIAL_SRCS} endif ifeq ($(USE_HDF), 1) - #export HDF5_CXX := $(CC) - #export HDF5_CLINKER := $(CC) - export HDF5_CXX := mpic++ - export HDF5_CLINKER := mpic++ - - ifeq ($(USE_OMP), 1) - CC = h5pcc.openmpi - else - ##CC = h5c++ - CC = h5pcc - endif +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -127,7 +163,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -139,6 +175,16 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/parallelCheckpointArgs.h $(INC_DIR)/initFunc.h $(INC_DIR)/parallelInitFuncFromCheckpoint.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -164,9 +210,6 @@ srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/parallelCheckpointArgs.h $(INC_DIR)/initFunc.h $(INC_DIR)/parallelInitFuncFromCheckpoint.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -174,7 +217,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -186,25 +229,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/main.cc b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/main.cc index cc2730fa..9b1ac9f2 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/main.cc +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5CheckpointRestart/main.cc @@ -41,7 +41,7 @@ int main(int argc, char *argv[]) { int seed(atoi(argv[1])); int reportItersPeriod(50); - ParallelEnv env(&argc, &argv, 2, 1, 1); + ParallelEnv env(&argc, &argv, 2, 2, 1); //const char* filename = "data_t3.checkpoint.hdf5"; const char* filename = "data_t0.checkpoint.hdf5"; diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerial/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerial/Makefile index 264c491b..c1bcab38 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerial/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerial/Makefile @@ -1,12 +1,27 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -##CC = mpic++ -CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 0 USE_OMP = 0 +USE_HDF = 0 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + PROJECT_TYPE = CPU @@ -26,35 +41,72 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ + +ifeq ($(USE_HDF), 1) + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc +endif PARALLEL_SRCS = parallelSaveData.cc \ - parallelBoundaryConds.cc \ - parallelEnv.cc + parallelBoundaryConds.cc \ + parallelEnv.cc + +ifeq ($(USE_HDF), 1) + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + +endif + +# -------------- END TO EDIT ---------------------------------------------------- ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} +else + SRCS += ${SERIAL_SRCS} +endif + +ifeq ($(USE_HDF), 1) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) else - SRCS += ${SERIAL_SRCS} +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -99,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -111,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -133,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -145,19 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5/Makefile index 369081cd..8e5d30cb 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5/Makefile @@ -1,14 +1,28 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -##CC = mpic++ -CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 0 USE_OMP = 0 USE_HDF = 1 +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5cc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + PROJECT_TYPE = CPU # Module directory @@ -27,29 +41,33 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags -CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas -g +CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas + +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ ifeq ($(USE_HDF), 1) - SERIAL_SRCS += serialSaveDataHDF5.cc + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc endif PARALLEL_SRCS = parallelSaveData.cc \ @@ -57,24 +75,38 @@ PARALLEL_SRCS = parallelSaveData.cc \ parallelEnv.cc ifeq ($(USE_HDF), 1) - PARALLEL_SRCS += parallelSaveDataHDF5.cc + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + endif +# -------------- END TO EDIT ---------------------------------------------------- + ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} else - SRCS += ${SERIAL_SRCS} + SRCS += ${SERIAL_SRCS} endif ifeq ($(USE_HDF), 1) - export HDF5_CXX := $(CC) - export HDF5_CLINKER := $(CC) - - ifeq ($(USE_OMP), 1) - CC = h5pcc.openmpi - else - CC = h5c++ - endif +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -119,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -131,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -153,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -165,25 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/Makefile index 341a63c9..e725e707 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/Makefile @@ -1,14 +1,28 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -##CC = mpic++ -CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 0 USE_OMP = 0 USE_HDF = 1 +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5cc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + + PROJECT_TYPE = CPU # Module directory @@ -27,31 +41,33 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags -CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas -g +CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas + +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - checkpointArgs.cc \ - initFuncFromCheckpoint.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ ifeq ($(USE_HDF), 1) - SERIAL_SRCS += serialSaveDataHDF5.cc + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc endif PARALLEL_SRCS = parallelSaveData.cc \ @@ -59,24 +75,38 @@ PARALLEL_SRCS = parallelSaveData.cc \ parallelEnv.cc ifeq ($(USE_HDF), 1) - PARALLEL_SRCS += parallelSaveDataHDF5.cc + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + endif +# -------------- END TO EDIT ---------------------------------------------------- + ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} else - SRCS += ${SERIAL_SRCS} + SRCS += ${SERIAL_SRCS} endif ifeq ($(USE_HDF), 1) - export HDF5_CXX := $(CC) - export HDF5_CLINKER := $(CC) - - ifeq ($(USE_OMP), 1) - CC = h5pcc.openmpi - else - CC = h5c++ - endif +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) +else +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -121,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -133,10 +163,23 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/serialCheckpointArgs.h $(INC_DIR)/initFunc.h $(INC_DIR)/initFuncFromCheckpoint.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -checkpointArgs.o : $(MODULE_DIR)/checkpointArgs.cc $(INC_DIR)/checkpointArgs.h +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h @@ -145,15 +188,16 @@ initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData. initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/checkpointArgs.h $(INC_DIR)/initFunc.h $(INC_DIR)/initFuncFromCheckpoint.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -161,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -173,25 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/main.cc b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/main.cc index be8d0819..eb2b236c 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/main.cc +++ b/Examples/KelvinHelmholtz/SingleFluidIdealRandomSerialHDF5CheckpointRestart/main.cc @@ -1,6 +1,6 @@ // Serial main #include "simData.h" -#include "checkpointArgs.h" +#include "serialCheckpointArgs.h" #include "simulation.h" #include "initFunc.h" #include "initFuncFromCheckpoint.h" @@ -49,7 +49,7 @@ int main(int argc, char *argv[]) { //Data data(nx, ny, nz, xmin, xmax, ymin, ymax, zmin, zmax, endTime, &env, // cfl, Ng, gamma, sigma, cp, mu1, mu2, frameSkip, reportItersPeriod); - CheckpointArgs checkpointArgs(filename, &env); + SerialCheckpointArgs checkpointArgs(filename, &env); checkpointArgs.endTime=endTime; Data data(checkpointArgs, &env, mu1, mu2, frameSkip, reportItersPeriod); diff --git a/Examples/KelvinHelmholtz/SingleFluidIdealSerial/Makefile b/Examples/KelvinHelmholtz/SingleFluidIdealSerial/Makefile index 264c491b..c1bcab38 100644 --- a/Examples/KelvinHelmholtz/SingleFluidIdealSerial/Makefile +++ b/Examples/KelvinHelmholtz/SingleFluidIdealSerial/Makefile @@ -1,12 +1,27 @@ # Make file for the main function. Builds all modules # and links for main.cc -# Compiler -##CC = mpic++ -CC = g++ +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing USE_MPI = 0 USE_OMP = 0 +USE_HDF = 0 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- + PROJECT_TYPE = CPU @@ -26,35 +41,72 @@ RTFIND_INC_DIR = ./../../../Project/${PROJECT_TYPE}/CminpackLibrary/Include RTFIND_SRC_DIR = ./../../../Project/${PROJECT_TYPE}//CminpackLibrary/Src ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp + OMP_FLAGS = -fopenmp endif # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -O3 $(OMP_FLAGS) -Wno-unknown-pragmas +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + # Sources SRCS = simulation.cc \ srmhd.cc \ initFunc.cc \ simData.cc \ - wenoUpwinds.cc \ - weno.cc \ + wenoUpwinds.cc \ + weno.cc \ RK2.cc \ rkSplit.cc \ boundaryConds.cc \ fluxVectorSplitting.cc \ SERIAL_SRCS = serialSaveData.cc \ - serialEnv.cc + serialEnv.cc \ + +ifeq ($(USE_HDF), 1) + SERIAL_SRCS += serialSaveDataHDF5.cc \ + initFuncFromCheckpoint.cc \ + serialCheckpointArgs.cc +endif PARALLEL_SRCS = parallelSaveData.cc \ - parallelBoundaryConds.cc \ - parallelEnv.cc + parallelBoundaryConds.cc \ + parallelEnv.cc + +ifeq ($(USE_HDF), 1) + PARALLEL_SRCS += parallelSaveDataHDF5.cc \ + parallelInitFuncFromCheckpoint.cc \ + parallelCheckpointArgs.cc + +endif + +# -------------- END TO EDIT ---------------------------------------------------- ifeq ($(USE_MPI), 1) - SRCS += ${PARALLEL_SRCS} + SRCS += ${PARALLEL_SRCS} +else + SRCS += ${SERIAL_SRCS} +endif + +ifeq ($(USE_HDF), 1) +# Using HDF5 + ifeq ($(USE_MPI), 1) + export HDF5_CXX := $(MPI_CC) + export HDF5_CLINKER := $(MPI_CC) + else + export HDF5_CXX := $(CC) + export HDF5_CLINKER := $(CC) + endif +# Compile all sources with the hdf5 compiler wrapper + CC = $(HDF5_CC) else - SRCS += ${SERIAL_SRCS} +# Not using HDF5 + ifeq ($(USE_MPI), 1) +# Compile all sources with the mpi compiler wrapper + CC = $(MPI_CC) + endif +# If not using hdf5 or mpi, CC remains as defined at top of Makefile endif # Headers @@ -99,7 +151,7 @@ run : $(RTFIND) $(EXEC) @echo '' @echo '' @echo '######################################################' - @echo '# Executing main programme #' + @echo '# Executing main programme #' @echo '######################################################' @./$(EXEC) @@ -111,21 +163,41 @@ clean : # Build objects # ################# +# -------------- TO EDIT IF SOURCE FILES ARE ADDED TO METHOD -------------------- + +# Object containing main function. +# This may need to be edited when creating new examples depending on the h files needed in the main script + +main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) + +## Build all remaining objects. Should only need to update this list if new source files are added to METHOD + simData.o : $(MODULE_DIR)/simData.cc $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +serialCheckpointArgs.o : $(MODULE_DIR)/serialCheckpointArgs.cc $(INC_DIR)/serialCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelCheckpointArgs.o : $(MODULE_DIR)/parallelCheckpointArgs.cc $(INC_DIR)/parallelCheckpointArgs.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + initFunc.o : $(MODULE_DIR)/initFunc.cc $(INC_DIR)/initFunc.h $(INC_DIR)/simData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) +initFuncFromCheckpoint.o : $(MODULE_DIR)/initFuncFromCheckpoint.cc $(INC_DIR)/initFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelInitFuncFromCheckpoint.o : $(MODULE_DIR)/parallelInitFuncFromCheckpoint.cc $(INC_DIR)/parallelInitFuncFromCheckpoint.h $(INC_DIR)/simData.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + + srmhd.o : $(MODULE_DIR)/srmhd.cc $(INC_DIR)/srmhd.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) simulation.o : $(MODULE_DIR)/simulation.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/saveData.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -main.o : ./main.cc $(INC_DIR)/simulation.h $(INC_DIR)/model.h $(INC_DIR)/simData.h $(INC_DIR)/initFunc.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) -I$(RTFIND_INC_DIR) $(CXXFLAGS) - wenoUpwinds.o : $(MODULE_DIR)/wenoUpwinds.cc $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -133,7 +205,7 @@ weno.o : $(MODULE_DIR)/weno.cc $(INC_DIR)/weno.h $(INC_DIR)/wenoUpwinds.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) RK2.o : $(MODULE_DIR)/RK2.cc $(INC_DIR)/RK2.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) rkSplit.o : $(MODULE_DIR)/rkSplit.cc $(INC_DIR)/rkSplit.h $(INC_DIR)/RK2.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) @@ -145,19 +217,27 @@ parallelBoundaryConds.o : $(MODULE_DIR)/parallelBoundaryConds.cc $(INC_DIR)/para $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) fluxVectorSplitting.o : $(MODULE_DIR)/fluxVectorSplitting.cc $(INC_DIR)/fluxVectorSplitting.h $(INC_DIR)/weno.h $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) parallelEnv.o : $(MODULE_DIR)/parallelEnv.cc $(INC_DIR)/parallelEnv.h - $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + $(CC) $< -c $(CXXFLAGS) -I$(INC_DIR) + +# -------------- END TO EDIT ---------------------------------------------------- # Executable main : main.o $(OBJS) $(RTFIND_OBJS) From d21994eea815c9ad8712035c1601519f592a5b5a Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Tue, 26 Jan 2021 11:20:27 +0000 Subject: [PATCH 03/15] cleaning up documentation and removing code no longer required for boundary conditions --- Project/CPU/Include/boundaryConds.h | 14 ------------- Project/CPU/Include/parallelBoundaryConds.h | 23 +++++++++------------ 2 files changed, 10 insertions(+), 27 deletions(-) diff --git a/Project/CPU/Include/boundaryConds.h b/Project/CPU/Include/boundaryConds.h index 91fba956..6987c50c 100644 --- a/Project/CPU/Include/boundaryConds.h +++ b/Project/CPU/Include/boundaryConds.h @@ -21,21 +21,7 @@ class Bcs Constructor simply stores the pointer to the Data class. @param[in] *data pointer to the Data class - @param[in] *env pointer to the PlatformEnv class */ - Bcs(Data * data, PlatformEnv * env) : data(data) - { - data->bcsSet = 1; - } - - //TODO -- We may not want to allow creation of Bcs object without env in future - //! Constructor store data about simulation (needed for domain) - /*! - Constructor simply stores the pointer to the Data class. - - @param[in] *data pointer to the Data class - */ - Bcs(Data * data) : data(data) { data->bcsSet = 1; diff --git a/Project/CPU/Include/parallelBoundaryConds.h b/Project/CPU/Include/parallelBoundaryConds.h index 8c4062bb..2df79d1c 100644 --- a/Project/CPU/Include/parallelBoundaryConds.h +++ b/Project/CPU/Include/parallelBoundaryConds.h @@ -20,11 +20,9 @@ class ParallelBcs : public Bcs ParallelEnv * env; //!< Pointer to ParallelEnv class containing platform specific info such as MPI details - int xPeriodic, yPeriodic, zPeriodic; - //! Constructor /*! - Calls constructor of base class to store the pointer to the Data class and ParallelEnv class. + Stores the pointer to the Data class and ParallelEnv class. @param[in] *data pointer to Data class @param[in] *env pointer to ParallelEnv class @@ -158,10 +156,10 @@ class ParallelOutflow : public ParallelBcs { public: //! Constructor - /*! - Calls constructor of base class to store the pointer to the Data class. - + /* + Calls constructor of base class to store the pointer to the Data class and ParallelEnv class. @param[in] *data pointer to Data class + @param[in] *env pointer to ParallelEnv class! @sa ParallelBcs::ParallelBcs */ ParallelOutflow(Data * data, ParallelEnv *env) : ParallelBcs(data, env) { } @@ -237,7 +235,6 @@ class ParallelPeriodic : public ParallelBcs //! Constructor /*! Calls constructor of base class to store the pointer to the Data class and ParallelEnv class. - @param[in] *data pointer to Data class @param[in] *env pointer to ParallelEnv class @sa ParallelBcs::ParallelBcs @@ -259,7 +256,7 @@ class ParallelPeriodic : public ParallelBcs }; -//! Flow boundary conditions +//! Flow boundary conditions for a data structure that has been distributed across ranks /*! Boundary conditions used for the Kelvin Helmholtz instability. The x-direction is periodic and y- and z-directions are outflow. @@ -270,9 +267,9 @@ class ParallelFlow : public ParallelBcs public: //! Constructor /*! - Calls constructor of base class to store the pointer to the Data class. - + Calls constructor of base class to store the pointer to the Data class and ParallelEnv class. @param[in] *data pointer to Data class + @param[in] *env pointer to ParallelEnv class @sa ParallelBcs::ParallelBcs */ ParallelFlow(Data * data, ParallelEnv *env) : ParallelBcs(data, env) { } @@ -281,7 +278,7 @@ class ParallelFlow : public ParallelBcs //! Application function /*! - Applies the Outflow boundary conditions to the ghost cells. + Applies the Flow boundary conditions to the ghost cells. @param[in, out] *cons pointer to the conservative (sized) vector @param[in, out] *prims optional pointer to the primitive vector @@ -291,7 +288,7 @@ class ParallelFlow : public ParallelBcs void apply(double * cons, double * prims = NULL, double * aux = NULL); /*! - Applies the Outflow boundary conditions to the ghost cells of subdomains that have an external face along + Applies the Flow boundary conditions to the ghost cells of subdomains that have an external face along the y dimension. @param[in, out] *stateVector pointer to one of cons, prims, aux @@ -300,7 +297,7 @@ class ParallelFlow : public ParallelBcs void setYBoundary(double *stateVector, int nVars); /*! - Applies the Outflow boundary conditions to the ghost cells of subdomains that have an external face along + Applies the Flow boundary conditions to the ghost cells of subdomains that have an external face along the z dimension. @param[in, out] *stateVector pointer to one of cons, prims, aux From 3641a47300bffdc5f7556ca7378fe42f9d5083ef Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Tue, 26 Jan 2021 12:25:28 +0000 Subject: [PATCH 04/15] cleaning up initialisation documentation --- Project/CPU/Include/initFuncFromCheckpoint.h | 27 +++++++++++++++++++ .../Include/parallelInitFuncFromCheckpoint.h | 27 +++++++++++++++++++ Project/CPU/Src/initFuncFromCheckpoint.cc | 12 +-------- .../CPU/Src/parallelInitFuncFromCheckpoint.cc | 11 -------- 4 files changed, 55 insertions(+), 22 deletions(-) diff --git a/Project/CPU/Include/initFuncFromCheckpoint.h b/Project/CPU/Include/initFuncFromCheckpoint.h index 4c93ff7a..f31b3274 100644 --- a/Project/CPU/Include/initFuncFromCheckpoint.h +++ b/Project/CPU/Include/initFuncFromCheckpoint.h @@ -6,13 +6,40 @@ #include "hdf5.h" #include "hdf5_hl.h" +//! Initialise from checkpoint restart file in serial +/*! + @oar + Initialises all cons and prims from a checkpoint restart file. Requires that the Data object has been correctly + initialised with the same parameters as in the checkpoint restart file, which is most easily done by + initialising Data using the CheckpointArgs object. + For distributed (MPI) execution use ParallelCheckpointRestart instead. +*/ class CheckpointRestart : public InitialFunc { public: + //! Initialise from checkpoint restart file + /*! + Stores a pointer to the Data class for reference in its methods + @param[in] *data pointer to Data class containing global simulation data + @param[in] name String, name of the checkpoint restart file including path (if not in execution folder) and extension + @sa InitialFunc + */ CheckpointRestart(Data * data, const char* name); virtual ~CheckpointRestart() { } //!< Destructor + /*! + /brief reads an HDF5 dataset for initialisation from checkpoint restart + + Prepares the buffer for reading from file, and reads a dataset. + + @param group The group within the file (or the file itself for root datasets) + @param name The name of the dataset + @param var Data is stored in 4-d arrays for each class of data (conserved/primitive/auxiliary), + with the 1st dimension being the variable. This argument indicates which variable is being output. + @param data The pointer to the data array. + */ + virtual void readDataSetDouble(const hid_t *group, const char *name, const int *var, double *varData); }; diff --git a/Project/CPU/Include/parallelInitFuncFromCheckpoint.h b/Project/CPU/Include/parallelInitFuncFromCheckpoint.h index a4832095..381d4acb 100644 --- a/Project/CPU/Include/parallelInitFuncFromCheckpoint.h +++ b/Project/CPU/Include/parallelInitFuncFromCheckpoint.h @@ -7,13 +7,40 @@ #include "hdf5_hl.h" #include "parallelEnv.h" +//! Initialise from checkpoint restart file for a data structure that has been distributed across ranks +/*! + @oar + Initialises all cons and prims from a checkpoint restart file. Requires that the Data object has been correctly + initialised with the same parameters as in the checkpoint restart file, which is most easily done by + initialising Data using the ParallelCheckpointArgs object. +*/ class ParallelCheckpointRestart : public InitialFunc { public: + //! Initialise from checkpoint restart file + /*! + Stores a pointer to the Data class for reference in its methods + @param[in] *data pointer to Data class containing global simulation data + @param[in] name String, name of the checkpoint restart file including path (if not in execution folder) and extension + @param[in] *env pointer to ParallelEnv class + @sa InitialFunc + */ ParallelCheckpointRestart(Data * data, const char* name, ParallelEnv *env); virtual ~ParallelCheckpointRestart() { } //!< Destructor + /*! + /brief reads an HDF5 dataset for initialisation from checkpoint restart + + Prepares the buffer for reading from file, and reads a dataset. + + @param group The group within the file (or the file itself for root datasets) + @param name The name of the dataset + @param var Data is stored in 4-d arrays for each class of data (conserved/primitive/auxiliary), + with the 1st dimension being the variable. This argument indicates which variable is being output. + @param data The pointer to the data array. + @param[in] *env pointer to ParallelEnv class + */ virtual void readDataSetDouble(const hid_t *group, const char *name, const int *var, double *varData, ParallelEnv *env); }; diff --git a/Project/CPU/Src/initFuncFromCheckpoint.cc b/Project/CPU/Src/initFuncFromCheckpoint.cc index d714b1aa..74107467 100644 --- a/Project/CPU/Src/initFuncFromCheckpoint.cc +++ b/Project/CPU/Src/initFuncFromCheckpoint.cc @@ -8,17 +8,7 @@ #include "hdf5.h" #include "hdf5_hl.h" -/*! - * /brief Writes an HDF5 dataset to file - * - * Prepares the buffer for writing to file, and writes a dataset. - * - * @param group The group within the file (or the file itself for root datasets) - * @param name The name the dataset should have - * @param var Data is stored in 4-d arrays for each class of data (conserved/primitive/auxiliary), - * with the 1st dimension being the variable. This argument indicates which variable is being output. - * @param data The pointer to the data array. - */ + void CheckpointRestart::readDataSetDouble(const hid_t *group, const char *name, const int *var, double *varData) { // Syntax diff --git a/Project/CPU/Src/parallelInitFuncFromCheckpoint.cc b/Project/CPU/Src/parallelInitFuncFromCheckpoint.cc index 17122142..263a8100 100644 --- a/Project/CPU/Src/parallelInitFuncFromCheckpoint.cc +++ b/Project/CPU/Src/parallelInitFuncFromCheckpoint.cc @@ -8,17 +8,6 @@ #include "hdf5.h" #include "hdf5_hl.h" -/*! - * /brief Writes an HDF5 dataset to file - * - * Prepares the buffer for writing to file, and writes a dataset. - * - * @param group The group within the file (or the file itself for root datasets) - * @param name The name the dataset should have - * @param var Data is stored in 4-d arrays for each class of data (conserved/primitive/auxiliary), - * with the 1st dimension being the variable. This argument indicates which variable is being output. - * @param data The pointer to the data array. - */ void ParallelCheckpointRestart::readDataSetDouble(const hid_t *group, const char *name, const int *var, double *varData, ParallelEnv* env) { // Syntax From cc44c45167a1cfcf168a5de941830e8ca77c5517 Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Tue, 26 Jan 2021 13:36:30 +0000 Subject: [PATCH 05/15] updating checkpointArg documentation --- Project/CPU/Include/checkpointArgs.h | 17 ++++++----------- Project/CPU/Include/parallelCheckpointArgs.h | 14 +++++--------- Project/CPU/Include/serialCheckpointArgs.h | 14 +++++--------- 3 files changed, 16 insertions(+), 29 deletions(-) diff --git a/Project/CPU/Include/checkpointArgs.h b/Project/CPU/Include/checkpointArgs.h index a1cb5478..68a48018 100644 --- a/Project/CPU/Include/checkpointArgs.h +++ b/Project/CPU/Include/checkpointArgs.h @@ -6,13 +6,12 @@ #include "platformEnv.h" -//! Wrapper around Data object for populating Data from a checkpoint restart file +//! Object containing parameters required to populate Data from a restart file /*! @par - Class contains all the data of the simulation relevant to any of the other - modules. Containing it in this way prevents issues of cyclic includes, also - results in Simulation as more of an interface than a class that needs to be - known to lower objects---good practice.
+ Parameters are read into CheckpointArgs from a checkpoint restart file. These are then used + to initialise Data. This is the best way to make sure that simulation parameters are consistent with + the restart file being used for initialisation. */ class CheckpointArgs @@ -54,12 +53,8 @@ int //! Constructor /*! - @par - Allocates the memory required for the state arrays and sets the simulation - constants to the given values. Does not set initial state, thats done by - the initialFunc object. - @param name name of checkpoint file to use for restart, including path and extension - @param env environment object containing platform details eg MPI ranks + @par + reads parameters from a checkpoint restart file into this object for use in Data constructor */ CheckpointArgs() {}; diff --git a/Project/CPU/Include/parallelCheckpointArgs.h b/Project/CPU/Include/parallelCheckpointArgs.h index d2a2f09e..588b1bba 100644 --- a/Project/CPU/Include/parallelCheckpointArgs.h +++ b/Project/CPU/Include/parallelCheckpointArgs.h @@ -5,14 +5,12 @@ #include #include "parallelEnv.h" - -//! Wrapper around Data object for populating Data from a checkpoint restart file +//! Object containing parameters required to populate Data from a restart file in parallel /*! @par - Class contains all the data of the simulation relevant to any of the other - modules. Containing it in this way prevents issues of cyclic includes, also - results in Simulation as more of an interface than a class that needs to be - known to lower objects---good practice.
+ Parameters are read into CheckpointArgs from a checkpoint restart file. These are then used + to initialise Data. This is the best way to make sure that simulation parameters are consistent with + the restart file being used for initialisation. */ class ParallelCheckpointArgs : public CheckpointArgs @@ -22,9 +20,7 @@ class ParallelCheckpointArgs : public CheckpointArgs //! Constructor /*! @par - Allocates the memory required for the state arrays and sets the simulation - constants to the given values. Does not set initial state, thats done by - the initialFunc object. + Reads parameters from a checkpoint restart file into this object for use in Data constructor, using parallel HDF5. @param name name of checkpoint file to use for restart, including path and extension @param env environment object containing platform details eg MPI ranks */ diff --git a/Project/CPU/Include/serialCheckpointArgs.h b/Project/CPU/Include/serialCheckpointArgs.h index 8b072fbd..42d4e06d 100644 --- a/Project/CPU/Include/serialCheckpointArgs.h +++ b/Project/CPU/Include/serialCheckpointArgs.h @@ -5,14 +5,12 @@ #include #include "platformEnv.h" - -//! Wrapper around Data object for populating Data from a checkpoint restart file +//! Object containing parameters required to populate Data from a restart file in serial /*! @par - Class contains all the data of the simulation relevant to any of the other - modules. Containing it in this way prevents issues of cyclic includes, also - results in Simulation as more of an interface than a class that needs to be - known to lower objects---good practice.
+ Parameters are read into CheckpointArgs from a checkpoint restart file. These are then used + to initialise Data. This is the best way to make sure that simulation parameters are consistent with + the restart file being used for initialisation. */ class SerialCheckpointArgs : public CheckpointArgs @@ -22,9 +20,7 @@ class SerialCheckpointArgs : public CheckpointArgs //! Constructor /*! @par - Allocates the memory required for the state arrays and sets the simulation - constants to the given values. Does not set initial state, thats done by - the initialFunc object. + Reads parameters from a checkpoint restart file into this object for use in Data constructor, using serial HDF5. @param name name of checkpoint file to use for restart, including path and extension @param env environment object containing platform details eg MPI ranks */ From 815800f5b5721f27629a28d3dcb225b3f392ca8e Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Tue, 26 Jan 2021 13:44:03 +0000 Subject: [PATCH 06/15] cleaning up platform env comments and removing code that isn't required --- Project/CPU/Include/platformEnv.h | 19 ------------------- Project/CPU/Include/serialEnv.h | 22 ++-------------------- Project/CPU/Src/serialEnv.cc | 30 ------------------------------ 3 files changed, 2 insertions(+), 69 deletions(-) diff --git a/Project/CPU/Include/platformEnv.h b/Project/CPU/Include/platformEnv.h index 3f3c7e11..24572053 100644 --- a/Project/CPU/Include/platformEnv.h +++ b/Project/CPU/Include/platformEnv.h @@ -41,25 +41,6 @@ class PlatformEnv //! Destructor virtual ~PlatformEnv() {} - - //! Check for external boundary - /*! - @par - Returns true if a subdomain is on the external boundary of the simulation grid in a particular direction - @param[in] dimension {x=0, y=1, z=2} - @param[in] direction direction to look for the external boundary in a particular direction {low=0, high=1} - */ - virtual int isNeighbourExternal(int dimension, int direction) = 0; - - //! Create cartesian grid of processes - /*! - @par - Creates the cartesian grid of processes that are responsible for the corresponding subdomains in the simulation grid - @param[in] xPeriodic whether the x dimension has periodic boundary conditions - @param[in] yPeriodic whether the y dimension has periodic boundary conditions - @param[in] zPeriodic whether the z dimension has periodic boundary conditions - */ - virtual void setParallelDecomposition(int xPeriodic, int yPeriodic, int zPeriodic) = 0; }; #endif diff --git a/Project/CPU/Include/serialEnv.h b/Project/CPU/Include/serialEnv.h index f1cae491..a54be5f2 100644 --- a/Project/CPU/Include/serialEnv.h +++ b/Project/CPU/Include/serialEnv.h @@ -19,30 +19,12 @@ class SerialEnv : public PlatformEnv { public: - //! Constructor -- Initialize global MPI communicator + // TODO -- should just hard code nxRanks=nyRanks=nzRanks=1 for serialEnv + //! Constructor -- record that we are running on only a single process SerialEnv(int *argcP, char **argvP[], int nxRanks, int nyRanks, int nzRanks, int testing=0); //! Destructor virtual ~SerialEnv(); - - //! Check for external boundary - /*! - @par - Returns true if a subdomain is on the external boundary of the simulation grid in a particular direction - @param[in] dimension {x=0, y=1, z=2} - @param[in] direction direction to look for the external boundary in a particular direction {low=0, high=1} - */ - int isNeighbourExternal(int dimension, int direction); - - //! Create cartesian grid of processes and calculate neighbours along that grid for each process - /*! - @par - Creates the cartesian grid of processes that are responsible for the corresponding subdomains in the simulation grid - @param[in] xPeriodic whether the x dimension has periodic boundary conditions - @param[in] yPeriodic whether the y dimension has periodic boundary conditions - @param[in] zPeriodic whether the z dimension has periodic boundary conditions - */ - void setParallelDecomposition(int xPeriodic, int yPeriodic, int zPeriodic); }; #endif diff --git a/Project/CPU/Src/serialEnv.cc b/Project/CPU/Src/serialEnv.cc index 7daf1799..6255c044 100644 --- a/Project/CPU/Src/serialEnv.cc +++ b/Project/CPU/Src/serialEnv.cc @@ -25,35 +25,5 @@ SerialEnv::~SerialEnv() } -int SerialEnv::isNeighbourExternal(int dimension, int direction) -{ - int isExternal = 0; - int dimRank = 0; - int maxRank = 0; - - if (dimension==0) { - dimRank = xRankId; - maxRank = nxRanks; - } else if (dimension==1) { - dimRank = yRankId; - maxRank = nyRanks; - } else { - dimRank = zRankId; - maxRank = nzRanks; - } - - if (direction==0){ - isExternal = (dimRank==0); - } else { - isExternal = (dimRank==maxRank-1); - } - - return isExternal; -} - -void SerialEnv::setParallelDecomposition(int xPeriodic, int yPeriodic, int zPeriodic) -{ - -} From 5cb17a002f56ac08ae5d3b9f2f197f8d42719295 Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Tue, 26 Jan 2021 15:46:24 +0000 Subject: [PATCH 07/15] cleaning up inline documentation --- Project/CPU/Include/parallelSaveData.h | 2 +- Project/CPU/Include/parallelSaveDataHDF5.h | 2 +- Project/CPU/Include/serialSaveData.h | 2 +- Project/CPU/Include/simData.h | 5 ++++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/Project/CPU/Include/parallelSaveData.h b/Project/CPU/Include/parallelSaveData.h index 3fb33dfd..0554ee03 100644 --- a/Project/CPU/Include/parallelSaveData.h +++ b/Project/CPU/Include/parallelSaveData.h @@ -12,7 +12,7 @@ using namespace std; -//! Class used to save simulation data using multiple processes +//! Class used to save simulation data to a text format using multiple processes /*! @par Write outputs through the simple system of collecting all simulation data onto process 0 diff --git a/Project/CPU/Include/parallelSaveDataHDF5.h b/Project/CPU/Include/parallelSaveDataHDF5.h index 52c2f5b0..e0f7efa2 100644 --- a/Project/CPU/Include/parallelSaveDataHDF5.h +++ b/Project/CPU/Include/parallelSaveDataHDF5.h @@ -14,7 +14,7 @@ using namespace std; -//! Class used to save simulation data to HDF5 using a single process +//! Class used to save simulation data to HDF5 using multiple processes /*! @par Class is initialized with the data that is to be saved. Saves the simulation diff --git a/Project/CPU/Include/serialSaveData.h b/Project/CPU/Include/serialSaveData.h index 7900ccba..33ce82eb 100644 --- a/Project/CPU/Include/serialSaveData.h +++ b/Project/CPU/Include/serialSaveData.h @@ -12,7 +12,7 @@ using namespace std; -//! Class used to save simulation data using a single process +//! Class used to save simulation data to a text format using a single process /*! @par Class is initialized with the data that is to be saved. Saves the simulation diff --git a/Project/CPU/Include/simData.h b/Project/CPU/Include/simData.h index d769ec19..9b074c56 100644 --- a/Project/CPU/Include/simData.h +++ b/Project/CPU/Include/simData.h @@ -184,6 +184,7 @@ class Data @param ymax maximum value of y domain @param zmin minimum value of z domain @param zmax maximum value of z domain + @param env environment object containing platform details eg MPI ranks @param endTime desired end time of the simulation @param cfl courant factor @param Ng number of ghost cells in each direction @@ -193,7 +194,6 @@ class Data @param mu1 charge mass ratio of species 1 @param mu2 charge mass ratio of species 2 @param frameskip number of timesteps per file output - @param env environment object containing platform details eg MPI ranks @param reportItersPeriod period with which time step data is reported to screen during program execution */ Data(int nx, int ny, int nz, @@ -220,6 +220,9 @@ class Data @param args simulation arguments such as cfl, sigma etc, as read from checkpoint restart file @param mu1 charge mass ratio of species 1 @param mu2 charge mass ratio of species 2 + @param env environment object containing platform details eg MPI ranks + @param frameskip number of timesteps per file output + @param reportItersPeriod period with which time step data is reported to screen during program execution */ Data(CheckpointArgs args, PlatformEnv *env, double mu1=-1.0e4, double mu2=1.0e4, int frameskip=10, int reportItersPeriod=1, int functionalSigma=false, double gam=12); From 5d748a0f1be823dfec1252692ba8599a2ccf4896 Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 11:39:47 +0000 Subject: [PATCH 08/15] Update README.md Initial clean up of user documentation. Adding quick start and removing detail to go in workflows.md --- README.md | 193 ++++++++++++++---------------------------------------- 1 file changed, 50 insertions(+), 143 deletions(-) diff --git a/README.md b/README.md index e37e1357..a89c0265 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,8 @@ additional source term into the equations of motion. ## Getting started + +### Quick start To begin using METHOD, first clone the repository git clone https://github.com/AlexJamesWright/METHOD.git @@ -53,121 +55,75 @@ To set up the correct directories for storing data, run the provided shell scrip bash makePaths.sh -Next, you will need to ensure the Makefiles are valid for your system, changing any compilers to your preferred ones and setting the GoogleTest home directory to its location on you machine. That should be it. Should be. - ---------------------------------------------- ---------------------------------------------- -

- - -## Note -METHOD is currently going through a revamp, with a number of additions and changes to its functionality. The CPU code (METHOD/Project/CPU/) can be run using a single process, openMP, or openMP/MPI, The Makefile will build the executable according to the `USE_MPI` and `USE_OMP` definitions (generally OMP is only faster for very high reslution runs). The GPU code (METHOD/Project/CPU/) should work, but has not been tested in a long time and has a reduced functionality compared to the CPU. This may change soon. - -## Testing -Once METHOD is installed, check the latest build is working by running the unit tests. - -We use the Google Test framework for unit testing---any tests are saved in the `Tests/CPU/Src` or `Tests/GPU/Src` directory. You will need to set the `GTEST_DIR` environment variable (in the Makefile within `Tests/GPU/Src` and `Tests/CPU/Src`) to point to the GoogleTest root directory. - -The CPU and GPU versions have separate testing directories. As far as possible the tests are the same, but there is additional testing such that the GPU results match the CPU to within floating point accuracy. In addition, the CPU code can be tested in serial and multiprocessor mode to ensure they match. First, run - - make test - -from the `Tests/CPU` directory, then - - py.test -v Src/compareSerialAndParallel.py - -to ensure the MPI run matches single processor mode. -To run the GPU tests, `cd` to `Tests/GPU` and run `make test`. Currently, as the -CPU code saves in a different format to the GPU code, checks that CPU and GPU code -match dont work anymore. - -It is a good idea to check that the examples run successfully next. - ---------------------------------------------- ---------------------------------------------- -

- - -## Example Simulations == Best way to understand functionality! - -Example simulations have been provided that illustrate how to use the -various classes. When running a parallel test problem (i.e. one that compiles -using MPI even if run in single processor mode), build the executable using - - make main - -and run it using - - mpirun -n nproc main - -where nprocs is the total number of processors used (defined in the main.cc for -that problem)---tests that require MPI are stored in directories with the name -`Tests/CPU/*/*MPI/*` and are all set to run with 4 processors, `nprocs=4`. To run -serial tests, using the naming convention `Tests/CPU/*/*Serial*/`, simply use - - make run - -Data is saved in the *Examples/Data* directory and is easily viewed -using the interactivePlot script. We suggest that the spyder environment from -Anaconda is the best way to use this tool. - -### InteractivePlot -------------------- -To use the plotting tools, run from the root Example directory something like - - spyder interactivePlot.py - -Running this script as main will load any simulation data into the `Plot` object. - -This object has a number of pre-made plotting routines, all of which begin with - - Plot.plot +Compile a simple example: single fluid, Kelvin-Helmholtz instability with random perturbation. This example will run in serial on one CPU core and should only require the gnu c++ compiler g++ to be installed. -If you wish to create your own plot, you can access the simulation data using the +``` +cd Examples/KelvinHelmholtz/SingleFluidIdealSerial/ +make +``` - Plot.prims +Run the example using: +``` +./main +``` - Plot.cons +This will run a small number of timesteps and save the final simulation state in plaintext form in Examples/Data/Final. -arrays, etc. The first index is the variable, followed by `x`, `y`, and `z` index. +``` +t = 0.000000 +t = 0.167385 +t = 0.498842 +t = 0.830298 +t = 1.161754 +t = 1.493211 +t = 1.824667 +t = 2.156123 +t = 2.487580 +t = 2.819036 +``` -To view the available primitive variables, use `Plot.cleanPrimLabels`. +For instructions on running other simulations on a range of computer architectures and with different options for data output format, see [workflows.md](workflows.md). -In addition, the Plot object contains a dictionary linking to all the system -constants. For example, to get the value for the adiabatic index used in the -simulation use `Plot.c['gamma']`. +### Running tests -### Animation -------------- +The following instructions will run a subset of unit tests for testing the core functionality of METHOD when running in serial on a single CPU core and outputting data in plain text format. These tests require the gnu c++ compiler g++ and Python 3 to be installed. For instructions on running the full test suite, which tests a range of computer architectures and output formats, see [workflows.md](workflows.md). -For the Kelvin-Helmholtz simulation, running the `animation.py` script will create an -animatation called `Output.gif` in the root Example directory to view (may take up -to ten minutes to run the simulation and make the animation). +First, make sure to have cloned METHOD and run `bash makePaths.sh` as above. -Make sure you clean any timeseries data before running the simulation by running +Set up a python virtual environment with modules required for testing by typing the following in the root directory: - bash cleanData.sh +``` +python3 -m venv venv +source venv/bin/activate +python -m pip install -r Scripts/IridisEnv/requirements.txt +``` -from the root Examples/ directory. The variable being animated can be changed -manually. +Clone the GoogleTest repository into the directory above the METHOD root directory: ---------------------------------------------- ---------------------------------------------- -

+``` +git clone https://github.com/google/googletest.git +``` +Run the serial CPU plain text tests using: -## Builds -To build all the elements of the programme at once go from the Project directory, to either Serial (if you dont have CUDA capable hardware) or Parallel (if you do) and use +``` +cd Tests/CPU +make test_serial +``` - make build +This will run a number of tests, and should end with all tests passing in output similar to: -to build each element of the programme. +``` +... +[----------] Global test environment tear-down +[==========] 21 tests from 5 test suites ran. (7238 ms total) +[ PASSED ] 21 tests. +``` --------------------------------------------- ---------------------------------------------

- ## Documentation I have tried to maintain good documentation standards, but cant guarantee that everything you want will be here. If you are unsure of any functionality, first look in the respective header file, source code, and you are still unsure contact the authors. @@ -178,55 +134,6 @@ To build the documentation locally simply go the the `Doxumentation` folder and doxygen -## Rootfinder -Some simulations will require the use of an N-dimensional footfinder, either for a (semi-) implicit time integrator or -for the conservative to primitive transformation. We have elected to use the [CMINPACK library](https://github.com/devernay/cminpack)\*, and to use or implement any changes in the library, *cd* into the Cminpack directory and hit - - make objects - -to compile all the object files. Then, if the build was successful, don't touch/look at this library again. - ---------------------------------------------- ---------------------------------------------- -

- -## Simulations - -Simulations are run from the *main.cc/cu* scripts. Simply use - - make run - -to compile and run the simulation from within `Project/CPU` or `Project/GPU`. The executable is labelled `main` so - - make build - ./main - -will also work. - ---------------------------------------------- ---------------------------------------------- - -

- -## Saving simulation data - -The *Src* directory has a tool for interactively plotting the end state of a simulation. The `interactivePlot.py` script requires data to be saved after the simulation in the *Data* -folder. This is done using the SaveData class---call the class constructor with a pointer to the SimData class whose data you wish to save. Then, simply include - - save.saveAll(); - -in *main* after the simulation has been evolved. Running the python script as main will load and store the data ready for plotting, and the easiest way to interact with the data is in a python environment such as spyder. - -There is also the functionality to save time series data. In order to reduce memory requirements, the user must specify the variables they wish to save (names of the variables should match those given as the labels in the model's header file. To save variables, go into `simulation.cc/cu` and change the three conditional blocks to save the variables you want using - - this->save->saveVar('SomeVar', totalNumberOfUserDefinedVars) - -NOTE: The second variable must be included and be the number of variables you wish to save at each output. - ---------------------------------------------- ---------------------------------------------- -

- ## Authors From 625a3e2a2d147b3ed652bfb273d5eef196b579d9 Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 11:41:02 +0000 Subject: [PATCH 09/15] Update README.md --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a89c0265..fc9d2035 100644 --- a/README.md +++ b/README.md @@ -133,7 +133,10 @@ Alternatively, if you are unsure of any of the functionality, find the documenta To build the documentation locally simply go the the `Doxumentation` folder and run doxygen - + +--------------------------------------------- +--------------------------------------------- +

## Authors @@ -143,5 +146,3 @@ To build the documentation locally simply go the the `Doxumentation` folder and --------------------------------------------- ---------------------------------------------

- -\* *due to this cryptic package we have moved bits about and re-ordered various headers and includes. Most of the preprocessor stuff has been deleted (using NVIDIA hardware will result in Cminpack reals defaulting to double precision), some functions have been excluded as they're not needed here, and now for any usage we just include the cminpack.h header file (as opposed to including the CUDA scripts directly).* From 2cc424992cc1ec13881fe379c8e2fe1bed4bd85d Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 11:47:53 +0000 Subject: [PATCH 10/15] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index fc9d2035..4229f7a3 100644 --- a/README.md +++ b/README.md @@ -142,6 +142,7 @@ To build the documentation locally simply go the the `Doxumentation` folder and [Alex Wright](http://cmg.soton.ac.uk/people/ajw1e16/) Email: a.j.wright@soton.ac.uk
[Ania Brown](https://github.com/aniabrown)
+[Sam Mangham](https://github.com/smangham)
[Ian Hawke](https://cmg.soton.ac.uk/people/ih3/) --------------------------------------------- --------------------------------------------- From b49d20339b4dbae92e5656bc3cf1e9fb8cacf59d Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 11:54:25 +0000 Subject: [PATCH 11/15] tweaking makefiles to match getting started instructions in readme --- Tests/CPU/Makefile | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/Tests/CPU/Makefile b/Tests/CPU/Makefile index 2e20c35d..02dca17c 100644 --- a/Tests/CPU/Makefile +++ b/Tests/CPU/Makefile @@ -18,6 +18,8 @@ # -------------- PARAMETERS FOR USERS TO EDIT -------------------- +# A c++ capable compiler +CC = g++ # The c++ capable mpi compiler. In systems with multiple versions of MPI, the particular version may need to be specified with eg # mpicxx.mpich MPI_CC = mpic++ @@ -28,11 +30,11 @@ MPIEXEC = mpirun # The hdf5 compiler. This must be the version of the hdf5 # compiler available on your system that links the correct mpi libraries. Should # be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. -CC = h5pcc +HD5_CC = h5pcc # Points to the root of Google Test, relative to where this file is. # Remember to tweak this if you move this file. -GTEST_DIR = ../../../GoogleTest +GTEST_DIR = ../../../googletest/googletest # -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- @@ -265,13 +267,13 @@ serialSaveData.o : $(MODULE_DIR)/serialSaveData.cc $(INC_DIR)/serialSaveData.h @$(CC) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/serialSaveData.cc -I$(INC_DIR) serialSaveDataHDF5.o : $(MODULE_DIR)/serialSaveDataHDF5.cc $(INC_DIR)/serialSaveDataHDF5.h - @$(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/serialSaveDataHDF5.cc -I$(INC_DIR) + @$(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/serialSaveDataHDF5.cc -I$(INC_DIR) parallelSaveData.o : $(MODULE_DIR)/parallelSaveData.cc $(INC_DIR)/parallelSaveData.h @$(CC) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/parallelSaveData.cc -I$(INC_DIR) parallelSaveDataHDF5.o : $(MODULE_DIR)/parallelSaveDataHDF5.cc $(INC_DIR)/parallelSaveDataHDF5.h - @$(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/parallelSaveDataHDF5.cc -I$(INC_DIR) + @$(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(MODULE_DIR)/parallelSaveDataHDF5.cc -I$(INC_DIR) # Platform env serialEnv.o : $(MODULE_DIR)/serialEnv.cc $(INC_DIR)/serialEnv.h @@ -392,10 +394,10 @@ test_REGIME : wenoUpwinds.o srmhd.o test_REGIME.o REGIME.o initFunc.o RK2.o rkS test_hdf5_rk2.o : $(TEST_DIR)/test_hdf5_rk2.cc $(INC_DIR)/RK2.h \ $(INC_DIR)/twoFluidEMHD.h $(INC_DIR)/simulation.h $(INC_DIR)/simData.h $(GTEST_HEADERS) - $(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(TEST_DIR)/test_hdf5_rk2.cc -I$(INC_DIR) -I$(RTFIND_INC_DIR) + $(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(TEST_DIR)/test_hdf5_rk2.cc -I$(INC_DIR) -I$(RTFIND_INC_DIR) test_hdf5_rk2 : wenoUpwinds.o test_hdf5_rk2.o srmhd.o srrmhd.o fluxVectorSplitting.o boundaryConds.o simData.o RK2.o initFunc.o simulation.o weno.o serialEnv.o serialSaveDataHDF5.o $(RTFIND_OBJS) gtest_main.a - $(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -pthread $^ -o $@ + $(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -pthread $^ -o $@ @@ -428,9 +430,9 @@ test_parallel_rkSplit : main.o test_parallel_rkSplit.o weno.o wenoUpwinds.o srmh # parallel HDF5 tests test_hdf5_parallel_rk2.o : $(TEST_DIR)/test_hdf5_parallel_rk2.cc $(INC_DIR)/RK2.h \ $(INC_DIR)/twoFluidEMHD.h $(INC_DIR)/simulation.h $(INC_DIR)/simData.h $(GTEST_HEADERS) - $(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(TEST_DIR)/test_hdf5_parallel_rk2.cc -I$(INC_DIR) -I$(RTFIND_INC_DIR) + $(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -c $(TEST_DIR)/test_hdf5_parallel_rk2.cc -I$(INC_DIR) -I$(RTFIND_INC_DIR) test_hdf5_parallel_rk2 : main.o wenoUpwinds.o test_hdf5_parallel_rk2.o srmhd.o srrmhd.o fluxVectorSplitting.o parallelBoundaryConds.o simData.o RK2.o initFunc.o simulation.o weno.o parallelEnv.o parallelSaveDataHDF5.o $(RTFIND_OBJS) gtest.a - $(CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -pthread $^ -o $@ + $(HD5_CC) $(HDF5_FLAGS) $(MPI_FLAGS) $(CPPFLAGS) $(CXXFLAGS) -pthread $^ -o $@ From 97eb74536eb0171e09174309112f2fccea37692a Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 11:58:52 +0000 Subject: [PATCH 12/15] updating directory names for doxygen documentation --- Doxumentation/Doxyfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Doxumentation/Doxyfile b/Doxumentation/Doxyfile index cda63d72..3b3134e7 100644 --- a/Doxumentation/Doxyfile +++ b/Doxumentation/Doxyfile @@ -790,7 +790,7 @@ WARN_LOGFILE = # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. -INPUT = ../Project/Serial . +INPUT = ../Project/CPU . # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses From 981fc53e5c9a524ca597a8847131af9c41d79009 Mon Sep 17 00:00:00 2001 From: aniabrown Date: Thu, 28 Jan 2021 12:06:15 +0000 Subject: [PATCH 13/15] Create workflows.md Update workflows.md Update workflows.md Update workflows.md Working on build instructions Update workflows.md Update workflows.md Update workflows.md Update workflows.md Update workflows.md Update workflows.md Added examples instructions submission script for examples renaming Update workflows.md Update workflows.md running from project folder Update workflows.md Update workflows.md Update workflows.md --- Project/CPU/Makefile | 2 +- Scripts/IridisEnv/examples_cpu_job.sh | 18 ++ workflows.md | 383 ++++++++++++++++++++++++++ 3 files changed, 402 insertions(+), 1 deletion(-) create mode 100644 Scripts/IridisEnv/examples_cpu_job.sh create mode 100644 workflows.md diff --git a/Project/CPU/Makefile b/Project/CPU/Makefile index 582a633d..b0834102 100644 --- a/Project/CPU/Makefile +++ b/Project/CPU/Makefile @@ -5,7 +5,7 @@ # Whether to use MPI for multi-cpu processing USE_MPI = 1 -USE_OMP = 1 +USE_OMP = 0 USE_HDF = 1 # Compiler diff --git a/Scripts/IridisEnv/examples_cpu_job.sh b/Scripts/IridisEnv/examples_cpu_job.sh new file mode 100644 index 00000000..4cd880c7 --- /dev/null +++ b/Scripts/IridisEnv/examples_cpu_job.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +#SBATCH --ntasks-per-node=4 # Tasks per node +#SBATCH --nodes=1 # Number of nodes requested +#SBATCH --time=00:10:00 # walltime + +module purge +module load gcc/6.4.0 +module load hdf5/1.10.2/gcc/parallel +#module load hdf5/1.10.2/gcc/serial + +module list + +make clean +make + +mpirun -np 4 ./main 10 + diff --git a/workflows.md b/workflows.md new file mode 100644 index 00000000..481fe9fb --- /dev/null +++ b/workflows.md @@ -0,0 +1,383 @@ + +# METHOD workflows + +## Introduction + +This document outlines detailed instructions for compiling and running METHOD on a variety of computer architectures. The document steps through installing dependencies, running the example scripts in `Examples`, running the test suite in `Tests`, and finally creating new simulation scripts in the `Project` directory. See the [README](README.md) for instructions on getting started with just the simplest available example. + +METHOD can run serially on a single CPU core and optionally also use OpenMP (suitable for using multiple CPU cores within a single computer) and MPI (suitable for running across several nodes in a compute cluster environment). There is also a GPU version which implements a more limited number of models and initial conditions. These options require a compiler which implements OpenMP 3.1, the MPI libraries (OpenMPI 2.0 or later, or mpich), and an NVIDIA GPU with CUDA drivers and toolkit, respectively. + +Both the CPU and GPU versions of METHOD can output data as either plain text or HDF5 format. To output HDF5 from a single process, either the serial or parallel HDF5 libraries must be installed. To output HDF5 from a multiprocess MPI code, the parallel version of HDF5 must be installed. + +Four target systems will be used as examples in these instructions. + +1. Ubuntu 18.04 (Bionic) with GTX 1050 NVIDIA GPU +2. Ubuntu 20.04 (Focal) with GTX 1050 NVIDIA GPU +3. MacOS 10.15 (Catalina) with no GPU +4. Iridis 5 supercomputer compute node, RHEL 7.0 with Module system for software, with GTX 1080 NVIDIA GPU + +--------------------------------------------- +--------------------------------------------- +

+ +## Installing METHOD and dependencies + +To begin using METHOD, first clone the repository + + git clone https://github.com/AlexJamesWright/METHOD.git + +Then set up the correct directories for storing data by running the provided shell script from the project root, + + bash makePaths.sh + +### OpenMP + +**Linux**: the GNU (gcc/g++) version 4.8 or later supports OpenMP 3.1 for c/c++. This or later versions are the default on most Linux systems. + +**MacOS Catalina**: By default, gcc maps to Apple Clang 11.0, which does not support OpenMP. gcc can in theory be installed using homebrew, though in practice this I would recommend disabling OpenMP (set USE_OMP=0 in and Makefiles used) for MacOS as MPI gives similar performance and seems to be easier to install. + +### MPI + +> **Note**: While it is possible to use multiple versions of MPI on the same system, it is easier if there is only one. Check if MPI is already installed using `mpirun --version`. Currently supported versions are MPICH 3.0 or later and OpenMPI 4.0 or later. + +**Ubuntu 20.04**: Install the MPI libraries using `sudo apt-get install libopenmpi-dev` or `sudo apt-get install libmpich-dev` This will install OpenMPI v4 or MPICH v3. + +**Ubuntu 18.04**: Install the MPI libraries using `sudo apt-get install libmpich-dev`. This will install MPICH v3. Note the version of OpenMPI available through the package manager (v2) is not supported by METHOD. + +**Iridis 5**: Load the mpich module using `module load mpich/3.2.1/gcc`. Note that this will need to be done in the login node if compiling on the login node, and also in the batch script used to run METHOD. + +**MacOS Catalina**: The two options are to install using [homebrew](https://brew.sh/ +) or to build from source. + +1) +Install homebrew using instructions here: https://brew.sh/. Then: +``` +brew update +brew install open-mpi +``` + +2) + +``` +curl https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.0.tar.gz --output openmpi-4.1.0.tar.gz +gunzip -c openmpi-4.1.0.tar.gz | tar xf - +cd openmpi-4.1.0 +./configure --prefix=/usr/local +<...lots of output...> +make all install +``` + +> **Known issues**: +OpenMPI v2 has been found to lead to stack smashing errors when creating an HDF5 file in parallel. See Issue [#31](https://github.com/AlexJamesWright/METHOD/issues/31) + +### CUDA toolkit and drivers + +**Ubuntu**: To install the latest version of CUDA available through the package manager, use `sudo apt install nvidia-cuda-toolkit`. Otherwise, to install a specific version, follow the instructions here: https://developer.nvidia.com/Cuda-Toolkit-archive. METHOD has been tested with CUDA 8 and CUDA 10. + +**Iridis 5**: Load the CUDA module using `module load cuda/8.0`. Note that this will need to be done in the login node if compiling on the login node, and also in the batch script used to run METHOD. + +### HDF5 + +**Ubuntu**: Depending on which version of MPI is installed, install HDF5 using either `sudo apt-get install libhdf5-openmpi` or `sudo apt-get install libhdf5-mpich` + +**Iridis 5**: If not using mpi, load the serial hdf5 module using `module load hdf5/1.10.2/gcc/serial`. If using mpi, use `module load hdf5/1.10.2/gcc/parallel`. Note that this will need to be done in the login node if compiling on the login node, and also in the batch script used to run METHOD. + +**MacOS Catalina**: Install homebrew using instructions here: https://brew.sh/. Then use `brew install hdf5` for the serial version or `brew install hdf5-mpi` for the mpi version. + +--------------------------------------------- +--------------------------------------------- +

+ +## Running example simulations == best way to understand functionality! + +Example simulations have been provided that illustrate how to use the various classes. + +> **Note**: Currently only the examples in Examples/KelvinHelmholtz are up to date with the most recent version of METHOD. All examples are configured to run on CPU, not GPU. + +There are two initial conditions modelled in this folder: SingleFluidIdeal and SingleFluidIdealRandom. For these initial conditions, the following configurations are available: + +* Serial: Run on a single CPU core, output data to plain text format +* SerialHDF5: Run on a single CPU core, output data to hdf5 format +* MPI: Run on multiple CPU cores using MPI, output data to plain text format +* MPIHDF5: Run on multiple CPU cores using MPI, output data to plain text format + +There are also SingleFluidIdealRandomMPIHDF5CheckpointRestart and SingleFluidIdealRandomSerialHDF5CheckpointRestart, which illustrate checkpoint restart by initialising the simulation from a restart file written out during a previous SingleFluidIdealRandom simulation (in either serial or parallel). + +### Building examples + +In general, you should be able to build the examples simply by typing `make` in each example directory (make sure to have loaded the correct modules if using Iridis 5). + +There are some instances where you will need to edit the first part of the Makefile, shown below. If your system has multiple versions of MPI running, you may need to specify the particular version of the mpi compiler to use through the MPI_CC variable. If using HDF5, also make sure that the correct version of MPI is being used by the HDF5 compiler using the HDF5_CC variable. + +``` +# -------------- PARAMETERS USERS ARE LIKELY TO NEED TO EDIT ------------------- +# Whether to use MPI for multi-cpu processing +USE_MPI = 1 +USE_OMP = 0 +USE_HDF = 1 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- +``` + +### Running examples locally after building + +**Serial**: To run a serial examples locally, type either `./main` or `./main [seed]` to run the SingleFluidIdeal or SingleFluidIdealRandom examples respectively. + +**Parallel**: To run examples which use MPI to launch multiple processes, type `mpirun -np 4 ./main [seed]`. This will run the code using 4 processes, the process count expected by the example codes. + +### Running examples in a batch environment (eg Iridis 5) + +To run any of the MPI examples using Iridis 5, submit the example job located in the Scripts/IridisEnv directory from the example directory: + +``` +cd Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5 +sbatch ../../../Scripts/IridisEnv/examples_cpu_job.sh +``` + +This will load the correct modules, do a clean build of the example and run it in parallel. + +You can also test building the example from the login node by load the modules from `example_job.sh` and typing `make`. + +### Viewing data + +Data is saved in the *Examples/Data* directory + +> **Note**: The InteractivePlot and Animation tools are no longer supported: Use at your own risk. + +#### InteractivePlot + +The example data can be viewed using the interactivePlot script. We suggest that the spyder environment from +Anaconda is the best way to use this tool. + +To use the plotting tools, run from the root Example directory something like + + spyder interactivePlot.py + +Running this script as main will load any simulation data into the `Plot` object. + +This object has a number of pre-made plotting routines, all of which begin with + + Plot.plot + +If you wish to create your own plot, you can access the simulation data using the + + Plot.prims + + Plot.cons + +arrays, etc. The first index is the variable, followed by `x`, `y`, and `z` index. + +To view the available primitive variables, use `Plot.cleanPrimLabels`. + +In addition, the Plot object contains a dictionary linking to all the system +constants. For example, to get the value for the adiabatic index used in the +simulation use `Plot.c['gamma']`. + +#### Animation + +For the Kelvin-Helmholtz simulation, running the `animation.py` script will create an +animatation called `Output.gif` in the root Example directory to view (may take up +to ten minutes to run the simulation and make the animation). + +Make sure you clean any timeseries data before running the simulation by running + + bash cleanData.sh + +from the root Examples/ directory. The variable being animated can be changed +manually. + +--------------------------------------------- +--------------------------------------------- +

+ + +## Running tests + +Once METHOD is installed, check the latest build is working by running the unit tests. + +We use the Google Test framework for unit testing---any tests are saved in the `Tests/CPU/Src` or `Tests/GPU/Src` directory. + +As far as possible the tests in the two testing directories are the same, though the full simulation tests are less comprehensive on GPU than on CPU. For both the CPU and GPU code, the core unit tests do not use MPI, but there is additional testing of full simulation runs such that the serial and MPI results match to within floating point accuracy. The outputs of CPU and GPU runs are not compared as the different order of floating point calculations between the CPU and GPU versions causes the results to diverge for some simulations. + +The CPU test suite additionally contains tests that compare the outputs of full simulations in the older plain text format with outputs in the newer HDF5 format, to verify the correctness of the HDF5 writer. + +To run tests, first set up a python virtual environment with modules required for testing by typing the following in the root directory: + +``` +python3 -m venv venv +source venv/bin/activate +python -m pip install -r Scripts/IridisEnv/requirements.txt +``` + +Clone the GoogleTest repository into the directory above the METHOD root directory: + +``` +git clone https://github.com/google/googletest.git +``` + +### CPU tests + +To run the full CPU test suite, the MPI and HDF5 dependencies need to be installed. Then, run + +``` +cd Tests/CPU +make test +``` + +On Iridis, instead submit this work as a job from the Tests/CPU folder using `sbatch ../../Scripts/IridisEnv/tests_cpu_job.sh` + +This will run a number of tests. Note that these tests are currently in separate batches that are not all summarised at the end of the test output, so it is currently necessary to look through the whole job output for any failing tests. + +If neither MPI nor HDF5 is installed, a smaller core of unit tests can be run using: + +``` +cd Tests/CPU +make test_serial +``` + +### GPU tests + +The run the full GPU test suite, the MPI, HDF5 and CUDA dependencies need to be installed. Then, run + +``` +cd Tests/GPU +make test +``` + +On Iridis, instead submit this work as a job from the Tests/GPU folder using `sbatch ../../Scripts/IridisEnv/tests_gpu_job.sh` + +As for the CPU version, these tests are currently in separate batches that are not all summarised at the end of the test output, so it is currently necessary to look through the whole job output for any failing tests. + +--------------------------------------------- +--------------------------------------------- +

+ + + +## Creating your own simulation scripts + +To create your own METHOD script, edit main.cc in Project/CPU/Src or main.cu in Project/GPU/Src. + +### Compiling and running CPU scripts + +Edit the first part of the Makefile to reflect whether you wish to use MPI, OpenMP and/or HDF5, and to specify your mpi and hdf5 compilers: + +``` +# -------------- PARAMETERS FOR USERS TO EDIT -------------------- + +# Whether to use MPI for multi-cpu processing +USE_MPI = 1 +USE_OMP = 0 +USE_HDF = 1 + +# Compiler +CC = g++ +# --- if USE_MPI --- +# If using mpi, aditionally specify a c++ capable mpi compiler. In systems with multiple versions of MPI, +# the particular version may need to be specified with eg mpicxx.mpich +MPI_CC = mpic++ +# --- if USE_HDF --- +# If using hdf5, additionally specify a hdf5 compiler. If using mpi, this must be the version of the hdf5 +# compiler available on your system that links the correct mpi libraries. Should +# be one of h5pcc, h5pcc.openmpi or h5pcc.mpich. +HDF5_CC = h5pcc + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- +``` + +Then compile with + + make + +**Serial**: Run from Project/CPU using `./main` + +**Parallel**: Run using `mpirun -np [nprocs] ./main`. Where nprocs is the number of processes to use. Note that nprocs must equal nxRanks x nyRanks x nzRanks in `ParallelEnv env(argc, argv, nxRanks, nyRanks, nzRanks)`. To run on Iridis 5, edit `Scripts/IridisEnv/examples_cpu_job.sh` so that --ntasks-per-node x nodes equals nprocs and replace `mpirun -np 4` with `mpirun -np [nprocs]`, then run from Project/CPU using `sbatch ../../Scripts/IridisEnv/examples_cpu_job.sh`. + +### Compiling and running GPU scripts + +Edit the first part of the Makefile to reflect whether you wish to use MPI, and/or HDF5, and to specify your mpi compiler and path to hdf5 libraries. Unfortunately we can't simply use the hdf5 compiler wrapper here as it seems to interact badly with the gpu compiler nvcc. + +Importantlly, make sure to set GPU_COMPUTE_CAPABILITY correctly according to the list here: https://developer.nvidia.com/cuda-GPUs (remove the decimal point, so cc 5.2 would be 52). You can find your GPU model by typing `nvidia-smi`. If compute capability is wrong, the code will likely still compile but may lead to wrong answers. + +``` +# -------------- PARAMETERS FOR USERS TO EDIT -------------------- + +# if USE_MPI=1, need to use parallel versions of objects, such as ParallelEnv, ParallelSaveData etc +USE_MPI=1 +USE_HDF=1 + +# The compute capability of the GPU +GPU_COMPUTE_CAPABILITY = 52 + +# --- IF USE_MPI --- +# The c++ capable mpi compiler. In systems with multiple versions of MPI, the particular version may need to be specified with eg +# mpicxx.mpich +MPI_CC = mpic++ + +# --- IF USE_HDF --- +# HDF5 libraries must be linked explicitly like this rather than using the hdf5 compiler h5pcc. +# h5pcc should wrap mpicc with the hdf5 libraries included, but appears to interact badly with nvcc +# The library paths below are found using h5pcc -show +HDF5_FLAGS = -I/local/software/szip/2.1.1/include -L/local/software/hdf5/1.10.2/gcc/parallel/lib -L/local/software/szip/2.1.1/lib -lsz -lz -ldl -lm -I/local/software/hdf5/1.10.2/gcc/parallel/include -lhdf5 -lhdf5_hl +# Ubuntu 18.04 mpich example +#HDF5_FLAGS = -I/usr/include/hdf5/mpich -L/usr/lib/x86_64-linux-gnu/hdf5/mpich /usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5_hl.a /usr/lib/x86_64-linux-gnu/hdf5/mpich/libhdf5.a -lsz -lz -lm + +# -------------- END PARAMETERS USERS ARE LIKELY TO NEED TO EDIT -------------------- +``` + +Then compile with + + make + +**Serial**: Run from Project/CPU using `./main` + +**Parallel**: Run using `mpirun -np [nprocs] ./main`. Where nprocs is the number of processes to use. Note that nprocs must equal nxRanks x nyRanks x nzRanks in `ParallelEnv env(argc, argv, nxRanks, nyRanks, nzRanks)`. To run on Iridis 5, edit `Scripts/IridisEnv/examples_gpu_job.sh` so that --ntasks-per-node x nodes equals nprocs and replace `mpirun -np 4` with `mpirun -np [nprocs]`, then run from Project/CPU using `sbatch ../../Scripts/IridisEnv/examples_gpu_job.sh`. + + +### Saving simulation data + +The *Src* directory has a tool for interactively plotting the end state of a simulation. The `interactivePlot.py` script requires data to be saved after the simulation in the *Data* +folder. This is done using the SaveData class---call the class constructor with a pointer to the SimData class whose data you wish to save. Then, simply include + + save.saveAll(); + +in *main* after the simulation has been evolved. Running the python script as main will load and store the data ready for plotting, and the easiest way to interact with the data is in a python environment such as spyder. + +There is also the functionality to save time series data. In order to reduce memory requirements, the user must specify the variables they wish to save (names of the variables should match those given as the labels in the model's header file. To save variables, go into `simulation.cc/cu` and change the three conditional blocks to save the variables you want using + + this->save->saveVar('SomeVar', totalNumberOfUserDefinedVars) + +NOTE: The second variable must be included and be the number of variables you wish to save at each output. + + +## Rootfinder +Some simulations will require the use of an N-dimensional footfinder, either for a (semi-) implicit time integrator or +for the conservative to primitive transformation. We have elected to use the [CMINPACK library](https://github.com/devernay/cminpack)\*. This library is built automatically from the build scripts used in Project, Examples and Tests and should not need to be touched. However, if you need to implement and test any changes to the library, you can also *cd* into the Cminpack directory and hit + + make objects + +to compile all the object files. + +--------------------------------------------- + +\* *due to this cryptic package we have moved bits about and re-ordered various headers and includes. Most of the preprocessor stuff has been deleted (using NVIDIA hardware will result in Cminpack reals defaulting to double precision), some functions have been excluded as they're not needed here, and now for any usage we just include the cminpack.h header file (as opposed to including the CUDA scripts directly).* + +--------------------------------------------- +--------------------------------------------- +

+ + + + From 2a5e081b2af23ce411a52e344a8fe7e256a54653 Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Mon, 1 Feb 2021 13:22:01 +0000 Subject: [PATCH 14/15] making sure USE_OMP actually controls openmp dependency --- Project/CPU/Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Project/CPU/Makefile b/Project/CPU/Makefile index b0834102..7226a8d5 100644 --- a/Project/CPU/Makefile +++ b/Project/CPU/Makefile @@ -37,6 +37,10 @@ RTFIND_INC_DIR = ./CminpackLibrary/Include # Cminpack source directory RTFIND_SRC_DIR = ./CminpackLibrary/Src +ifeq ($(USE_OMP), 1) + OMP_FLAGS = -fopenmp +endif + # C++ compiler flags CXXFLAGS = -Wall -std=c++11 -g -DUSE_MPI=$(USE_MPI) -O3 $(OMP_FLAGS) -Wno-unknown-pragmas @@ -83,13 +87,9 @@ endif SRCS += ${PARALLEL_SRCS} SRCS += ${SERIAL_SRCS} -ifeq ($(USE_OMP), 1) - OMP_FLAGS = -fopenmp -endif - ifeq ($(USE_HDF), 1) # Using HDF5 - ifeq($(USE_MPI), 1) + ifeq ($(USE_MPI), 1) export HDF5_CXX := $(MPI_CC) export HDF5_CLINKER := $(MPI_CC) else @@ -100,7 +100,7 @@ ifeq ($(USE_HDF), 1) CC = $(HDF5_CC) else # Not using HDF5 - ifeq($(USE_MPI), 1) + ifeq ($(USE_MPI), 1) # Compile all sources with the mpi compiler wrapper CC = $(MPI_CC) endif From 1c131e3f8cc6df3c326f6e1507214d3841715cb4 Mon Sep 17 00:00:00 2001 From: "ania.brown" Date: Fri, 26 Mar 2021 14:09:32 +0000 Subject: [PATCH 15/15] added description to Iridis submission script --- Scripts/IridisEnv/examples_cpu_job.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Scripts/IridisEnv/examples_cpu_job.sh b/Scripts/IridisEnv/examples_cpu_job.sh index 4cd880c7..915b294f 100644 --- a/Scripts/IridisEnv/examples_cpu_job.sh +++ b/Scripts/IridisEnv/examples_cpu_job.sh @@ -1,5 +1,11 @@ #!/bin/bash +# This script submits a Southampton Iridis5 batch job running on +# 4 procs for the example in Examples/KelvinHelmholtz/SingleFluidIdealRandomMPIHDF5 + +# Note the seed after the ./main command would need removing for non-random cases. +# (KelvinHelmholtz/SingleFluidIdealMPI) + #SBATCH --ntasks-per-node=4 # Tasks per node #SBATCH --nodes=1 # Number of nodes requested #SBATCH --time=00:10:00 # walltime