diff --git a/database.json b/database.json index 4b10527..fdb6481 100644 --- a/database.json +++ b/database.json @@ -1,25 +1,23 @@ { "daint.cscs.ch": { - "gpu": { + "mc": { "computer": { "computer-setup": { "label": "{{ label }}", "hostname": "daint.cscs.ch", - "description": "Piz Daint supercomputer at CSCS Lugano, Switzerland, using the GPU nodes. HyperThreading is off", + "description": "Piz Daint supercomputer at CSCS Lugano, Switzerland, multicore partition.", "transport": "core.ssh", "scheduler": "core.slurm", - "shebang": "#!/bin/bash -l", - "mpiprocs_per_machine": 12, - "num_cores_per_mpiproc": 1, - "queue_name": "normal", - "work_dir": "/scratch/snx3000/{username}/aiida/", + "work_dir": "/scratch/snx3000/{username}/aiida_run/", + "shebang": "#!/bin/bash", "mpirun_command": "srun -n {tot_num_mpiprocs}", - "prepend_text": "### computer prepend_text start ###\n#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account={{ slurm_account }}\n#SBATCH --constraint=gpu\n#SBATCH --hint={{ multithreading }}\n\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK\nexport CRAY_CUDA_MPS=1\nulimit -s unlimited\n### computer prepend_text end ###", + "mpiprocs_per_machine": 36, + "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account={{ slurm_account }}\n#SBATCH --constraint=mc\n#SBATCH --cpus-per-task=1\n#SBATCH --hint={{ multithreading }}\n\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK\nsource $MODULESHOME/init/bash\nulimit -s unlimited", "metadata": { "tooltip": "

\n Piz Daint supercomputer at CSCS Lugano, Switzerland, hybrid partition.
\n The CSCS now require MFA to login, please check the MFA CSCS documentation for details on how to set up SSH connection.
\n If you are using the AiiDAlab, please use the MFA CSCS plugin to set SSH connection.\n

\n", "template_variables": { "label": { - "default": "daint-gpu", + "default": "daint-mc", "description": "A short name to identify the computer", "type": "text", "key_display": "Computer Label" @@ -59,7 +57,7 @@ "computer-configure": { "username": "{{ username }}", "safe_interval": 60, - "proxy_command": "ssh -q -Y {username}@ela.cscs.ch netcat daint.cscs.ch 22", + "proxy_command": "ssh -q -Y {{ username }}@ela.cscs.ch netcat daint.cscs.ch 22", "metadata": { "tooltip": "

\n Setup up the SSH connection.\n

\n", "ssh_auth": "2FA", @@ -76,24 +74,54 @@ "codes": { "cp2k-9.1": { "label": "cp2k-9.1", - "description": "CP2K compiled for daint-gpu", + "description": "CP2K 9.1 compiled by CSCS", "default_calc_job_plugin": "cp2k", - "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-gpu/software/CP2K/9.1-CrayGNU-21.09-cuda/bin/cp2k.psmp", - "prepend_text": "module load daint-gpu\nmodule load CP2K\n", + "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/CP2K/9.1-CrayGNU-21.09/bin/cp2k.psmp", + "prepend_text": "module load daint-mc\nmodule load CP2K\n", "append_text": " " }, "QE-7.2-exe-template": { "label": "{{ code_binary_name }}-7.2", - "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for gpu nodes on daint", + "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for daint-mc", "default_calc_job_plugin": "quantumespresso.{{ code_binary_name }}", - "filepath_executable": "/apps/daint/UES/jenkins/7.0.UP03/21.09/daint-gpu/software/QuantumESPRESSO/7.2-CrayNvidia-21.09/bin/{{ code_binary_name }}.x", - "prepend_text": "module load daint-gpu\nmodule load QuantumESPRESSO\n", - "append_text": " ", + "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.2-CrayIntel-21.09/bin/{{ code_binary_name }}.x", + "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", + "append_text": "", "metadata": { "template_variables": { "code_binary_name": { + "key_display": "Code name", "type": "list", + "options": [ + "pw", + "ph", + "dos", + "projwfc" + ] + } + } + } + }, + "pw-7.0": { + "label": "pw-7.0", + "description": "PW compiled for daint-mc", + "default_calc_job_plugin": "quantumespresso.pw", + "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.0-CrayIntel-21.09/bin/pw.x", + "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", + "append_text": "" + }, + "QE-7.0-exe-template": { + "label": "{{ code_binary_name }}-7.0", + "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for daint-mc", + "default_calc_job_plugin": "quantumespresso.{{ code_binary_name }}", + "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.0-CrayIntel-21.09/bin/{{ code_binary_name }}.x", + "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", + "append_text": "", + "metadata": { + "template_variables": { + "code_binary_name": { "key_display": "Code name", + "type": "list", "options": [ "pw", "ph", @@ -106,24 +134,26 @@ } } }, - "mc": { + "gpu": { "computer": { "computer-setup": { "label": "{{ label }}", "hostname": "daint.cscs.ch", - "description": "Piz Daint supercomputer at CSCS Lugano, Switzerland, multicore partition.", + "description": "Piz Daint supercomputer at CSCS Lugano, Switzerland, using the GPU nodes. HyperThreading is off", "transport": "core.ssh", "scheduler": "core.slurm", - "work_dir": "/scratch/snx3000/{username}/aiida_run/", - "shebang": "#!/bin/bash", + "shebang": "#!/bin/bash -l", + "mpiprocs_per_machine": 12, + "num_cores_per_mpiproc": 1, + "queue_name": "normal", + "work_dir": "/scratch/snx3000/{username}/aiida/", "mpirun_command": "srun -n {tot_num_mpiprocs}", - "mpiprocs_per_machine": 36, - "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account={{ slurm_account }}\n#SBATCH --constraint=mc\n#SBATCH --cpus-per-task=1\n#SBATCH --hint={{ multithreading }}\n\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK\nsource $MODULESHOME/init/bash\nulimit -s unlimited", + "prepend_text": "### computer prepend_text start ###\n#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account={{ slurm_account }}\n#SBATCH --constraint=gpu\n#SBATCH --hint={{ multithreading }}\n\nexport OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK\nexport CRAY_CUDA_MPS=1\nulimit -s unlimited\n### computer prepend_text end ###", "metadata": { "tooltip": "

\n Piz Daint supercomputer at CSCS Lugano, Switzerland, hybrid partition.
\n The CSCS now require MFA to login, please check the MFA CSCS documentation for details on how to set up SSH connection.
\n If you are using the AiiDAlab, please use the MFA CSCS plugin to set SSH connection.\n

\n", "template_variables": { "label": { - "default": "daint-mc", + "default": "daint-gpu", "description": "A short name to identify the computer", "type": "text", "key_display": "Computer Label" @@ -163,7 +193,7 @@ "computer-configure": { "username": "{{ username }}", "safe_interval": 60, - "proxy_command": "ssh -q -Y {{ username }}@ela.cscs.ch netcat daint.cscs.ch 22", + "proxy_command": "ssh -q -Y {username}@ela.cscs.ch netcat daint.cscs.ch 22", "metadata": { "tooltip": "

\n Setup up the SSH connection.\n

\n", "ssh_auth": "2FA", @@ -180,46 +210,24 @@ "codes": { "cp2k-9.1": { "label": "cp2k-9.1", - "description": "CP2K 9.1 compiled by CSCS", + "description": "CP2K compiled for daint-gpu", "default_calc_job_plugin": "cp2k", - "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/CP2K/9.1-CrayGNU-21.09/bin/cp2k.psmp", - "prepend_text": "module load daint-mc\nmodule load CP2K\n", + "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-gpu/software/CP2K/9.1-CrayGNU-21.09-cuda/bin/cp2k.psmp", + "prepend_text": "module load daint-gpu\nmodule load CP2K\n", "append_text": " " }, - "QE-7.0-exe-template": { - "label": "{{ code_binary_name }}-7.0", - "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for daint-mc", - "default_calc_job_plugin": "quantumespresso.{{ code_binary_name }}", - "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.0-CrayIntel-21.09/bin/{{ code_binary_name }}.x", - "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", - "append_text": "", - "metadata": { - "template_variables": { - "code_binary_name": { - "key_display": "Code name", - "type": "list", - "options": [ - "pw", - "ph", - "dos", - "projwfc" - ] - } - } - } - }, "QE-7.2-exe-template": { "label": "{{ code_binary_name }}-7.2", - "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for daint-mc", + "description": "The code {{ code_binary_name }} of Quantum ESPRESSO compiled for gpu nodes on daint", "default_calc_job_plugin": "quantumespresso.{{ code_binary_name }}", - "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.2-CrayIntel-21.09/bin/{{ code_binary_name }}.x", - "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", - "append_text": "", + "filepath_executable": "/apps/daint/UES/jenkins/7.0.UP03/21.09/daint-gpu/software/QuantumESPRESSO/7.2-CrayNvidia-21.09/bin/{{ code_binary_name }}.x", + "prepend_text": "module load daint-gpu\nmodule load QuantumESPRESSO\n", + "append_text": " ", "metadata": { "template_variables": { "code_binary_name": { - "key_display": "Code name", "type": "list", + "key_display": "Code name", "options": [ "pw", "ph", @@ -229,53 +237,55 @@ } } } - }, - "pw-7.0": { - "label": "pw-7.0", - "description": "PW compiled for daint-mc", - "default_calc_job_plugin": "quantumespresso.pw", - "filepath_executable": "/apps/dom/UES/jenkins/7.0.UP03/21.09/dom-mc/software/QuantumESPRESSO/7.0-CrayIntel-21.09/bin/pw.x", - "prepend_text": "module load daint-mc\nmodule load QuantumESPRESSO\n", - "append_text": "" } } }, "default": "mc" }, "merlin.psi.ch": { - "cpu": { + "gpu": { "computer": { "computer-setup": { "label": "{{ label }}", "hostname": "merlin-l-01.psi.ch", - "description": "Merlin6 HPC at PSI (cpu section).", + "description": "Merlin6 HPC at PSI (gpu section).", "transport": "core.ssh", "scheduler": "core.slurm", "work_dir": "/shared-scratch/{username}/aiida_run/", "shebang": "#!/bin/bash", "mpirun_command": "srun -n {tot_num_mpiprocs}", - "mpiprocs_per_machine": 44, - "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account=merlin\n#SBATCH --cluster=merlin6\n\n#SBATCH --cpus-per-task=1\n#SBATCH --hint={{ multithreading }}\n\nulimit -s unlimited", + "mpiprocs_per_machine": 20, + "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account=merlin\n#SBATCH --cluster=gmerlin6\n#SBATCH --constraint={{ slurm_constraint }}\n\n#SBATCH --hint={{ multithreading }}\n\nulimit -s unlimited", "metadata": { - "tooltip": "

\n Merlin HPC at PSI.\n

\n", + "tooltip": "

\n Merlin HPC at PSI (gpu).\n

\n", "template_variables": { "label": { - "default": "merlin-cpu", + "default": "merlin-gpu", "description": "A short name to identify the computer", "type": "text", "key_display": "Computer Label" }, "slurm_partition": { - "default": "general", + "default": "gpu", "description": "The slurm partition to submit jobs to", "type": "list", "options": [ - "general", - "daily", - "hourly" + "gpu", + "gpu-short" ], "key_display": "Slurm partition" }, + "slurm_constraint": { + "default": "gpumem_8gb", + "description": "Specify the GPU by the amount of memory available in the GPU card itself.", + "type": "list", + "options": [ + "gpumem_8gb", + "gpumem_11gb", + "gpumem_40gb" + ], + "key_display": "Slurm constraint" + }, "multithreading": { "default": "nomultithread", "description": "The multithreading hint", @@ -330,49 +340,39 @@ } } }, - "gpu": { + "cpu": { "computer": { "computer-setup": { "label": "{{ label }}", "hostname": "merlin-l-01.psi.ch", - "description": "Merlin6 HPC at PSI (gpu section).", + "description": "Merlin6 HPC at PSI (cpu section).", "transport": "core.ssh", "scheduler": "core.slurm", "work_dir": "/shared-scratch/{username}/aiida_run/", "shebang": "#!/bin/bash", "mpirun_command": "srun -n {tot_num_mpiprocs}", - "mpiprocs_per_machine": 20, - "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account=merlin\n#SBATCH --cluster=gmerlin6\n#SBATCH --constraint={{ slurm_constraint }}\n\n#SBATCH --hint={{ multithreading }}\n\nulimit -s unlimited", + "mpiprocs_per_machine": 44, + "prepend_text": "#SBATCH --partition={{ slurm_partition }}\n#SBATCH --account=merlin\n#SBATCH --cluster=merlin6\n\n#SBATCH --cpus-per-task=1\n#SBATCH --hint={{ multithreading }}\n\nulimit -s unlimited", "metadata": { - "tooltip": "

\n Merlin HPC at PSI (gpu).\n

\n", + "tooltip": "

\n Merlin HPC at PSI.\n

\n", "template_variables": { "label": { - "default": "merlin-gpu", + "default": "merlin-cpu", "description": "A short name to identify the computer", "type": "text", "key_display": "Computer Label" }, "slurm_partition": { - "default": "gpu", + "default": "general", "description": "The slurm partition to submit jobs to", "type": "list", "options": [ - "gpu", - "gpu-short" + "general", + "daily", + "hourly" ], "key_display": "Slurm partition" }, - "slurm_constraint": { - "default": "gpumem_8gb", - "description": "Specify the GPU by the amount of memory available in the GPU card itself.", - "type": "list", - "options": [ - "gpumem_8gb", - "gpumem_11gb", - "gpumem_40gb" - ], - "key_display": "Slurm constraint" - }, "multithreading": { "default": "nomultithread", "description": "The multithreading hint", @@ -427,6 +427,6 @@ } } }, - "default": "cpu" + "default": "gpu" } } \ No newline at end of file