diff --git a/.gitignore b/.gitignore index 48c2a75..8c0e772 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,9 @@ __pycache__/ *.py[cod] *$py.class +# C extensions +*.so + # Distribution / packaging .Python env/ @@ -21,3 +24,93 @@ wheels/ *.egg-info/ .installed.cfg *.egg + +# Dev +.vscode +.history/ + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv +venv/ +ENV/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +*~ +.DS_Store +.idea +submit_test/ +_aiida_* +sssp_pseudos +# Autogenerated API docs +docs/source/reference/api/aiida_quantumespresso_hp +docs/source/reference/cli diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d160a54..6bba50f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,6 +7,11 @@ repos: - id: fix-encoding-pragma - id: mixed-line-ending - id: trailing-whitespace + exclude: >- + (?x)^( + tests/.*.*out| + tests/.*.in$ + )$ - repo: https://github.com/ikamensh/flynt/ rev: '0.76' @@ -25,6 +30,11 @@ repos: name: yapf types: [python] args: ['-i'] + exclude: &exclude_files > + (?x)^( + docs/.*| + tests/.*(? - (?x)^( - src/aiida_quantumespresso_hp/workflows/hubbard.py| - )$ + exclude: *exclude_files additional_dependencies: ['toml'] diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..25d8941 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,20 @@ +version: 2 + +build: + os: ubuntu-22.04 + apt_packages: + - quantum-espresso + +conda: + environment: environment.yml + +python: + install: + - method: pip + path: . + extra_requirements: + - docs + +sphinx: + configuration: docs/source/conf.py + fail_on_warning: false diff --git a/docs/Makefile b/docs/Makefile new file mode 100755 index 0000000..6f70353 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,40 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build +AIIDAWORKDIR = source/local_module/_aiida_* + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -n -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: all help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext customdefault + +customdefault: + $(SPHINXBUILD) -b html -nW --keep-going $(ALLSPHINXOPTS) $(BUILDDIR)/html + +all: html view + +clean: + rm -rf $(BUILDDIR); rm -rf $(AIIDAWORKDIR) + +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + + +view: + open $(BUILDDIR)/html/index.html diff --git a/docs/source/1_computing_hubbard.ipynb b/docs/source/1_computing_hubbard.ipynb new file mode 100644 index 0000000..cba2ae4 --- /dev/null +++ b/docs/source/1_computing_hubbard.ipynb @@ -0,0 +1,337 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "(tutorials-hubbard-base)=\n", + "\n", + "# Computing Hubbard parameters\n", + "\n", + "In this tutorial you will learn how to calculate the Hubbard parameters step by step using `aiida-quantumespresso-hp`.\n", + "\n", + "We can divide this goal in three phases:\n", + "\n", + "* __Define the manifolds__: define the target Hubbard manifolds via the {{ hubbard_structure }}\n", + "* __SCF ground-state__: calculate the ground-state using the {py:class}`~aiida_quantumespresso.workflows.pw.base.PwBaseWorkChain`\n", + "* __DFPT calculation__: use the {py:class}`~aiida_quantumespresso_hp.workflow.hp.base.HpBaseWorkChain` to do a self-consistent perturbation calculation to predict the Hubbard parameters.\n", + "\n", + "In this tutorial we will make use of the silicon structure to give you an overall understanding of the usage of the package.\n", + "If you are interested in more advanced features, please have a look at the [next tutorial](./2_parallel_hubbard.ipynb) or to the [how tos](../howto/index.rst).\n", + "\n", + "Let's get started!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Defining the target manifold through the `HubbardStructureData`\n", + "\n", + "The Hubbard correction is a corrective term that is added to the Hamiltonian of a system\n", + "which suffers from great __self-interaction errors__. This is usually the case for transition\n", + "metals on their _d_ manifolds. An extra correction to account for the hybridization can be accounted\n", + "for with the ligands, typically belonging to the _p_ element group. Such interaction needs to be\n", + "localized in space. This is the reason why we need to define the __projectors__. Quantum ESPRESSO\n", + "allows you to define different type of projections $| \\phi^I_m \\rangle$ ($m$ orbital quantum number, $I$ atom in cell). Currently, the __ortho-atomic__ projectors\n", + "are the most accurate ones implemented. \n", + "\n", + "Still, we need to ask the program on _which atoms_ $I$ and _which manifolds_ $m$ to project and correct for this\n", + "self-interaction.\n", + "\n", + "Since manifolds and atoms belong to the structure, then you need to definet them together as an {{ hubbard_structure }}.\n", + "\n", + "In the following, we take LiCoO{sub}`2` as example, and we suppose we want to target the _3d_ orbitals of cobalt and the intersite interaction between _2p_ of oxygen and _3d_ of cobalt.\n", + "\n", + "```{note}\n", + "By default we set ortho-atomic projectors and we use the Dudarev formulation.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "from local_module import load_temp_profile\n", + "\n", + "# If you download this file, you can run it with your own profile.\n", + "# Put these lines instead:\n", + "# from aiida import load_profile\n", + "# load_profile()\n", + "data = load_temp_profile(\n", + " name=\"hubbard-base-tutorial\",\n", + " add_computer=True,\n", + " add_pw_code=True,\n", + " add_hp_code=True,\n", + " add_sssp=True,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's define the {{ hubbard_structure }}:\n", + "\n", + ":::{note}\n", + ":class: dropdown\n", + "\n", + "If you already have a {py:class}`aiida.orm.StructureData`, you can load the structure information in `HubbardStructureData` as follows:\n", + "\n", + "```python\n", + "my_structure = load_node(IDENTIFIER)\n", + "hubbard_structure = HubbardStructureData.from_structure(my_structure)\n", + "```\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData\n", + "\n", + "a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585\n", + "cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]]\n", + "sites = [\n", + " ['Co', 'Co', (0, 0, 0)],\n", + " ['O', 'O', (0, 0, 3.6608)], \n", + " ['O', 'O', (0, 0, 10.392)], \n", + " ['Li', 'Li', (0, 0, 7.0268)],\n", + "]\n", + "hubbard_structure = HubbardStructureData(cell=cell, sites=sites)\n", + "hubbard_structure.initialize_onsites_hubbard(\"Co\", \"3d\")\n", + "hubbard_structure.initialize_intersites_hubbard(\"Co\", \"3d\", \"O\", \"2p\")\n", + "hubbard_structure.store()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's visualize what will be print in the Hubbard card of Quantum ESPRESSO." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from aiida_quantumespresso.utils.hubbard import HubbardUtils\n", + "print(HubbardUtils(hubbard_structure).get_hubbard_card())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can see, the desired interactions has been initialized correctly. This is important because ``hp.x`` needs to know which atoms need to be perturbed. As you will see later, the ``hp.x`` will take care of adding the remaining interactions with neighbouring atoms.\n", + "\n", + ":::{important}\n", + "When you will use your own structures, make sure to have your 'Hubbard atoms' first in the list of atoms. This is due to the way the ``hp.x`` routine works internally, requiring those to be first. You can simply do this with the following snippet (IF THE NODE IS YET NOT STORED!):\n", + "\n", + "```python\n", + "from aiida_quantumespresso.utils.hubbard import HubbardUtils\n", + "HubbardUtils(hubbard_structure).reorder_atoms\n", + "```\n", + ":::" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Calculating the SCF ground-state\n", + "\n", + "Now that we have defined the structure, we can calculate its ground-state via an SCF using the `PwBaseWorkChain`.\n", + "We can fill the inputs of the builder of the PwBaseWorkChain through the `get_builder_from_protocol`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "from aiida.engine import run_get_node\n", + "from aiida.orm import KpointsData\n", + "from aiida_quantumespresso.workflows.pw.base import PwBaseWorkChain\n", + "from aiida_quantumespresso.common.types import ElectronicType\n", + "kpoints = KpointsData()\n", + "kpoints.set_kpoints_mesh([2,2,2])\n", + "\n", + "builder = PwBaseWorkChain.get_builder_from_protocol(\n", + " code=data.pw_code, # modify here if you downloaded the notebook\n", + " structure=hubbard_structure,\n", + " protocol=\"fast\",\n", + " electronic_type=ElectronicType.INSULATOR,\n", + " overrides={\"kpoints\":kpoints, \"clean_workdir\":False}\n", + ")\n", + "results, pw_node = run_get_node(builder)\n", + "results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As you can notice from the results, the workchain (actually, the `PwCalculation`!) has a `remote_folder` output namespace. This is what we need in order to run the `HpBaseWorkChain`. " + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## DFPT calculation of Hubbard parameters\n", + "\n", + "We can perturb the ground-state previously found to compute the Hubbard parameters.\n", + "Here we will need to use the `HpBaseWorkChain`, and link the `parent folder` previously produced." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from aiida.orm import Dict\n", + "from aiida_quantumespresso_hp.workflows.hp.base import HpBaseWorkChain\n", + "\n", + "qpoints = KpointsData()\n", + "qpoints.set_kpoints_mesh([1,1,1])\n", + "\n", + "builder = HpBaseWorkChain.get_builder()\n", + "builder.hp.code = data.hp_code\n", + "builder.hp.hubbard_structure = data.structure\n", + "builder.hp.parameters = Dict({\"INPUTHP\":{\"conv_thr_chi\": 1e-4}})\n", + "builder.hp.qpoints = qpoints\n", + "builder.hp.parent_scf = pw_node.outputs.remote_folder" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or via the `get_builder_from_protocol`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "from aiida_quantumespresso_hp.workflows.hp.base import HpBaseWorkChain\n", + "\n", + "builder = HpBaseWorkChain.get_builder_from_protocol(\n", + " code=data.hp_code, # modify here if you downloaded the notebook\n", + " protocol=\"fast\",\n", + " parent_scf_folder=pw_node.outputs.remote_folder,\n", + " overrides={'hp':{'hubbard_structure':hubbard_structure}},\n", + ")\n", + "\n", + "results, hp_node = run_get_node(builder)\n", + "results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":rocket: Let's inspect the results!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(HubbardUtils(results['hubbard_structure']).get_hubbard_card())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final considerations\n", + "\n", + "We managed to compute the Hubbard parameters __fully__ ___ab initio___! :tada:\n", + "Although, as you could have noticed, there were some quite few passages to do by end. Moreover, there are the following considerations:\n", + "\n", + "1. For larger and more complex structures you will need to perturb many more atoms. Moreover, to get converged results you will need more the one q points. Clieck [here](./2_parallel_hubbard.ipynb). to learn how to parallelize over atoms and q points\n", + "2. To do a _full_ self-consistent calculation of these parameters, you should _relax_ your structure with the Hubbard parameters from the ``hp.x`` run, repeat the steps of this tutorial, relax _again_, and do this procedure over and over till convergence. Learn the automated way [here](./3_self_consistent.ipynb)!\n", + "\n", + "\n", + ":::{admonition} Learn more and in details\n", + ":class: hint\n", + "\n", + "To learn the full sets of inputs, to use proficiently the `get_builder_from_protocol` and more, have a look at the following sections:\n", + "- [Specific how tos](howto/workflows/hp/base.md)\n", + "- [General information of the implemented workchain](topics/workflows/hp/base.md)\n", + ":::\n", + "\n", + ":::{note}\n", + "We suggest to proceed first with the tutorial for point (1) and then the one for point (2). Nevertheless, tutorial (1) is not strictly necessary for (1).\n", + ":::" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/source/2_parallel_hubbard.ipynb b/docs/source/2_parallel_hubbard.ipynb new file mode 100644 index 0000000..658b71c --- /dev/null +++ b/docs/source/2_parallel_hubbard.ipynb @@ -0,0 +1,287 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "(tutorials-hubbard-parallel)=\n", + "\n", + "# Parallelizing the computation of Hubbard parameters\n", + "\n", + "In this tutorial you will learn how to parallelize the computation of the Hubbard parameters using the {py:class}`~aiida_quantumespresso_hp.workflows.hp.main.HpWorkChain`.\n", + "\n", + "We can divide this goal in two phases:\n", + "\n", + "* __Parallelize over independent atoms__: parallelize the ``hp.x`` calculation with multiple sub-``hp.x`` running single atoms.\n", + "* __Parallelize over independent q points__: parallelize each atom sub-``hp.x`` with other sub-``hp.x`` running single q points.\n", + "\n", + "As we learnt from the [previous tutorial](./1_computing_hubbard.ipynb), first we need to compute the ground-state with a ``pw.x`` calculation.\n", + "\n", + "Let's get started!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "from local_module import load_temp_profile\n", + "\n", + "# If you download this file, you can run it with your own profile.\n", + "# Put these lines instead:\n", + "# from aiida import load_profile\n", + "# load_profile()\n", + "data = load_temp_profile(\n", + " name=\"hubbard-parallel-tutorial\",\n", + " add_computer=True,\n", + " add_pw_code=True,\n", + " add_hp_code=True,\n", + " add_sssp=True,\n", + " add_structure_licoo=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [], + "source": [ + "from aiida.engine import run_get_node\n", + "from aiida.orm import KpointsData\n", + "from aiida_quantumespresso.workflows.pw.base import PwBaseWorkChain\n", + "from aiida_quantumespresso.common.types import ElectronicType\n", + "kpoints = KpointsData()\n", + "kpoints.set_kpoints_mesh([2,2,2])\n", + "\n", + "builder = PwBaseWorkChain.get_builder_from_protocol(\n", + " code=data.pw_code, # modify here if you downloaded the notebook\n", + " structure=data.structure, # modify here if you downloaded the notebook\n", + " protocol=\"fast\",\n", + " electronic_type=ElectronicType.INSULATOR,\n", + " overrides={\"kpoints\":kpoints, \"clean_workdir\":False}\n", + ")\n", + "results, pw_node = run_get_node(builder)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Parallelize over atoms\n", + "\n", + "To parallelize over atoms, we need a _new_ workchain which is dedicated to this purpose: the {py:class}`~aiida_quantumespresso_hp.workflows.hp.main.HpWorkChain`. This workchain is able to parallelize both over atoms and over q points.\n", + "\n", + "Let's see first the atom parallelization. As usual, we need to get the `builder` and fill the inputs.\n", + "Specifying the input `parallelize_atoms` as `True` in `HpWorkChain`, each _independent atom_ will be run as a separate `HpBaseWorkChain`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "from aiida_quantumespresso_hp.workflows.hp.main import HpWorkChain\n", + "\n", + "builder = HpWorkChain.get_builder_from_protocol(\n", + " code=data.hp_code,\n", + " protocol=\"fast\",\n", + " parent_scf_folder=pw_node.outputs.remote_folder,\n", + " overrides={\n", + " \"parallelize_atoms\":True, \n", + " \"parallelize_qpoints\":False, \n", + " \"hp\":{\"hubbard_structure\":data.structure},\n", + " \"qpoints_distance\": 1000, # to get few q points\n", + " }\n", + ")\n", + "\n", + "results, hp_node = run_get_node(builder)\n", + "results" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's have a look at the workflow:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%verdi process status {hp_node.pk}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following just happened:\n", + "- A grid of q points is generated automatically using the distance (between points) in $\\AA$ we gave in input (of 1000 $\\AA$ to have very sparse - it is just a tutorial!).\n", + "- The `HpParallelizeAtomsWorkChain` is called.\n", + "- This work chain calls first a `HpBaseWorkChain` to get the independent atoms to perturb.\n", + "- **Three** `HpBaseWorkChain` are submitted __simultaneously__, one for cobalt, and two for the two oxygen sites.\n", + "- The response matrices ($\\chi^{(0)}$,$\\chi$) of each atom are collected to post-process them and compute the final U/V values using $$V_{IJ} = (\\chi^{(0) -1} -\\chi^{-1})_{IJ}$$\n", + "\n", + "As for the `HpBaseWorkChain`, we also have here the `hubbard_structure` output namespace, containing the same results as the serial execution:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from aiida_quantumespresso.utils.hubbard import HubbardUtils\n", + "print(HubbardUtils(results['hubbard_structure']).get_hubbard_card())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Parallelize q points for each perturbed atom\n", + "\n", + "In density-functional perturbation theory, we can simulate linear responses in reciprocal space as monocrhomatic perturbations, described via a grid of __q points__: each q point a monocrhomatic perturbation. The number of q points can be reduced using symmetries, and each Hubbard atom (manifold) will have in principle different number of perturbations.\n", + "\n", + "Specifying the input `parallelize_qpoints` as `True` in `HpWorkChain`, each single independent q point _of each atom_ will run as a separate `HpBaseWorkChain`.\n", + "\n", + ":::{important}\n", + "To parallelize over q points you __MUST__ parallelize over atoms as well.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [], + "source": [ + "builder = HpWorkChain.get_builder_from_protocol(\n", + " code=data.hp_code,\n", + " protocol=\"fast\",\n", + " parent_scf_folder=pw_node.outputs.remote_folder,\n", + " overrides={\n", + " \"parallelize_atoms\":True, \n", + " \"parallelize_qpoints\":True, \n", + " \"hp\":{\"hubbard_structure\":data.structure},\n", + " \"qpoints_distance\": 1000, # to get few q points\n", + " }\n", + ")\n", + "\n", + "results, hp_node = run_get_node(builder)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%verdi process status {hp_node.pk}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following just happened:\n", + "- A grid of q points was generated automatically using the distance (between points) in $\\AA$ we gave in input (of 1000 $\\AA$ to have very sparse - it is just a tutorial!).\n", + "- The `HpParallelizeAtomsWorkChain` is called.\n", + "- This work chain calls first a `HpBaseWorkChain` to get the independent atoms to perturb.\n", + "- For independent each atom (three in total) an `HpParallelizeQpointsWorkChain` is submitted __simultaneously__, one for cobalt, and two for the two oxygen sites.\n", + "- Each of such work chain submit a fist `HpBaseWorkChain` to get the independent q points (in this case, only 1).\n", + "- An `HpBaseWorkCahin` is run for every q points, executed at the same time. __Imagine this on an HPC!__ :rocket:\n", + "- The response matrices ($\\chi^{(0)}_{\\mathbf{q}}$,$\\chi_{\\mathbf{q}}$) of each q point for each atom are collected to post-process them and compute the atomic response matrices.\n", + "- A last final `HpBaseWorkChain` collects such matrices to compute U/V values.\n", + "\n", + "And we check the results are the same as before:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(HubbardUtils(results['hubbard_structure']).get_hubbard_card())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final considerations\n", + "\n", + "We managed to compute the Hubbard parameters __parallelizing over atoms and q points__! :tada:\n", + "\n", + "Still, you might need to converge self-consistently the parameters using the iterative procedure of relax -> scf -> hubbard.\n", + "Learn the automated way [in the last tutorial](./3_self_consistent.ipynb)!\n", + "\n", + ":::{admonition} Learn more and in details\n", + ":class: hint\n", + "\n", + "To learn the full sets of inputs, to use proficiently the `get_builder_from_protocol` and more, have a look at the following sections:\n", + "- [Specific how tos](howto/workflows/hp/main.md)\n", + "- [General information of the implemented workchain](topics/workflows/hp/main.md)\n", + ":::" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/source/3_self_consistent.ipynb b/docs/source/3_self_consistent.ipynb new file mode 100644 index 0000000..acd865c --- /dev/null +++ b/docs/source/3_self_consistent.ipynb @@ -0,0 +1,353 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "(tutorials-hubbard-selfconsistent)=\n", + "\n", + "# Computing Hubbard parameters self-consistently\n", + "\n", + "In this tutorial you will learn how to compute iteratively the Hubbard parameters through the {py:class}`~aiida_quantumespresso_hp.workflows.hubbard.SelfConsistentHubbardWorkChain`." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": { + "tags": [ + "hide-cell" + ] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from local_module import load_temp_profile\n", + "from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData\n", + "\n", + "# If you download this file, you can run it with your own profile.\n", + "# Put these lines instead:\n", + "# from aiida import load_profile\n", + "# load_profile()\n", + "data = load_temp_profile(\n", + " name=\"hubbard-selfconsistent-tutorial\",\n", + " add_computer=True,\n", + " add_pw_code=True,\n", + " add_hp_code=True,\n", + " add_sssp=True,\n", + ")\n", + "\n", + "# We initialize only the U, so that `hp.x` will understand it\n", + "# needs to compute only the onsite parameters.\n", + "a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585\n", + "cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]]\n", + "sites = [\n", + " ['Co', 'Co', (0, 0, 0)],\n", + " ['O', 'O', (0, 0, 3.6608)], \n", + " ['O', 'O', (0, 0, 10.392)], \n", + " ['Li', 'Li', (0, 0, 7.0268)],\n", + "]\n", + "hubbard_structure = HubbardStructureData(cell=cell, sites=sites)\n", + "hubbard_structure.initialize_onsites_hubbard(\"Co\", \"3d\")\n", + "hubbard_structure.store()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The cycle\n", + "\n", + "To have a full ab-initio calculation of Hubbard parameters, an iterative procedure should be employed. This forsees the following steps, to do in a cyclic way till the parameters don't differ from the previous ones by a certain threshold, i.e. ___self-consistently___.\n", + "\n", + "The steps to do in order are:\n", + "1. Perform a volume relaxation of the structure, starting from a zero value of Hubbard parameters (i.e. if it was a 'non-Hubbard' calculation).\n", + "2. Perform the ground-state calculation (SCF) of the relaxed structure.\n", + "3. Perform the linear response calculation to predict the new Hubbard values.\n", + "4. If _all_ U (and V) are within the desired threshold, stop, otherwise restart with the new values from (1).\n", + "\n", + "::: {admonition} Note for SCF (step 2)\n", + ":class: note\n", + "\n", + "Tipically, as these are electronic responses, the gound-state SCF can be performed _with looser energy cutoffs and k poit density_, and still retain the same accuracy on the prediction of Hubbard parameters. \n", + "\n", + "```{important}\n", + "Before any production run, you should make sure to have converged such parameters.\n", + "```\n", + ":::\n", + "\n", + "::: {admonition} Note for thresholds\n", + ":class: note\n", + "\n", + "Threshold for U and V may depend on the final goal, or property, of your research. From our experience, good values are of the order of 0.1 eV for the onsite parameters (U) and 0.01 eV for the intersites (V).\n", + ":::\n", + "\n", + "### Automating the cycle\n", + "\n", + "As we already learnt from the previous tutorials ([1](./1_computing_hubbard.ipynb),[2](./2_parallel_hubbard.ipynb)), we can simply fill the builder of the work chain using the `get_builder_from_protocol` to get to know what the workflow is doing, and how this can help and speed up our research.\n", + "\n", + ":::{warning}\n", + "In this tutorial we will compute only the U on Co, and not the V for Co-O. This is to speed up the simulation, which on only a handful of cores would take tens of minutes, if not more.\n", + "\n", + "This workflow may take 5 minutes (or more) to complete depending on your local resources.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": { + "tags": [ + "hide-output" + ] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|setup]: system is treated to be non-magnetic because `nspin == 1` in `scf.pw.parameters` input.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_relax]: launching PwRelaxWorkChain<444> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<447>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|run_process]: launching PwCalculation<452> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [447|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: after iteration 1 cell volume of relaxed structure is 31.592539105379053\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<461>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|run_process]: launching PwCalculation<466> iteration #1\n", + "\u001b[31m\u001b[1mError\u001b[0m: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", + "\u001b[93m\u001b[1mWarning\u001b[0m: output parser returned exit code<501>: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|report_error_handled]: PwCalculation<466> failed with exit status 501: Then ionic minimization cycle converged but the thresholds are exceeded in the final SCF.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|report_error_handled]: Action taken: ionic convergence thresholds met except in final scf: consider structure relaxed.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|inspect_process]: PwCalculation<466> failed but a handler detected an unrecoverable problem, aborting\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [461|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: after iteration 2 cell volume of relaxed structure is 31.592538691211796\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|inspect_relax]: relative cell volume difference 1.310965400891578e-08 smaller than threshold 0.05\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|results]: workchain completed after 2 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [444|PwRelaxWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_smearing]: launching PwBaseWorkChain<475> with smeared occupations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|run_process]: launching PwCalculation<480> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [475|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|recon_scf]: after relaxation, system is determined to be an insulator\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_fixed]: launching PwBaseWorkChain<488> with fixed occupations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|run_process]: launching PwCalculation<493> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [488|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_hp]: launching HpWorkChain<499> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [499|HpWorkChain|run_base_workchain]: running in serial, launching HpBaseWorkChain<505>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|run_process]: launching HpCalculation<507> iteration #1\n", + "1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [505|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [499|HpWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|check_convergence]: Hubbard onsites parameters are not converged. Max difference is 8.14829999.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_relax]: launching PwRelaxWorkChain<516> iteration #2\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<519>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|run_process]: launching PwCalculation<524> iteration #1\n", + "\u001b[31m\u001b[1mError\u001b[0m: ERROR_IONIC_CYCLE_BFGS_HISTORY_FAILURE\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [519|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: after iteration 1 cell volume of relaxed structure is 31.944624207488268\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|run_relax]: launching PwBaseWorkChain<533>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|run_process]: launching PwCalculation<538> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [533|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: after iteration 2 cell volume of relaxed structure is 31.95904119405152\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|inspect_relax]: relative cell volume difference 0.0004513118222837555 smaller than threshold 0.05\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|results]: workchain completed after 2 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [516|PwRelaxWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_smearing]: launching PwBaseWorkChain<547> with smeared occupations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|run_process]: launching PwCalculation<552> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [547|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|recon_scf]: after relaxation, system is determined to be an insulator\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_scf_fixed]: launching PwBaseWorkChain<560> with fixed occupations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|run_process]: launching PwCalculation<565> iteration #1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [560|PwBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_hp]: launching HpWorkChain<571> iteration #2\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [571|HpWorkChain|run_base_workchain]: running in serial, launching HpBaseWorkChain<577>\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|run_process]: launching HpCalculation<579> iteration #1\n", + "1\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|results]: work chain completed after 1 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [577|HpBaseWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [571|HpWorkChain|on_terminated]: remote folders will not be cleaned\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|check_convergence]: Hubbard parameters are converged. Stopping the cycle.\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|run_results]: Hubbard parameters self-consistently converged in 2 iterations\n", + "\u001b[34m\u001b[1mReport\u001b[0m: [442|SelfConsistentHubbardWorkChain|on_terminated]: remote folders will not be cleaned\n" + ] + } + ], + "source": [ + "from aiida.engine import run_get_node\n", + "from aiida_quantumespresso_hp.workflows.hubbard import SelfConsistentHubbardWorkChain\n", + "\n", + "builder = SelfConsistentHubbardWorkChain.get_builder_from_protocol(\n", + " pw_code=data.pw_code, \n", + " hp_code=data.hp_code, \n", + " hubbard_structure=hubbard_structure,\n", + " protocol=\"fast\",\n", + " overrides={\n", + " \"clean_workdir\": False,\n", + " \"tolerance_onsite\": 0.5,\n", + " \"tolerance_intersite\": 0.1,\n", + " \"relax\":{\"base\":{\"kpoints_distance\":1.4}}, # to speed up the tutorial\n", + " \"scf\":{\"kpoints_distance\":1.4}, # to speed up the tutorial\n", + " \"hubbard\":{\"qpoints_distance\":1000, \"parallelize_atoms\":False, \"parallelize_qpoints\":False}}, # to speed up the tutorial\n", + ")\n", + "\n", + "results, node = run_get_node(builder)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's inspect the status of the work chain to see the full self-consistency on screen!" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[22mSelfConsistentHubbardWorkChain<442> Finished [0] [2:run_results]\n", + " ├── PwRelaxWorkChain<444> Finished [0] [3:results]\n", + " │ ├── PwBaseWorkChain<447> Finished [0] [3:results]\n", + " │ │ ├── create_kpoints_from_distance<448> Finished [0]\n", + " │ │ └── PwCalculation<452> Finished [0]\n", + " │ └── PwBaseWorkChain<461> Finished [501] [2:while_(should_run_process)(2:inspect_process)]\n", + " │ ├── create_kpoints_from_distance<462> Finished [0]\n", + " │ └── PwCalculation<466> Finished [501]\n", + " ├── PwBaseWorkChain<475> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<476> Finished [0]\n", + " │ └── PwCalculation<480> Finished [0]\n", + " ├── PwBaseWorkChain<488> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<489> Finished [0]\n", + " │ └── PwCalculation<493> Finished [0]\n", + " ├── HpWorkChain<499> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<501> Finished [0]\n", + " │ └── HpBaseWorkChain<505> Finished [0] [3:results]\n", + " │ └── HpCalculation<507> Finished [0]\n", + " ├── PwRelaxWorkChain<516> Finished [0] [3:results]\n", + " │ ├── PwBaseWorkChain<519> Finished [0] [3:results]\n", + " │ │ ├── create_kpoints_from_distance<520> Finished [0]\n", + " │ │ └── PwCalculation<524> Finished [0]\n", + " │ └── PwBaseWorkChain<533> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<534> Finished [0]\n", + " │ └── PwCalculation<538> Finished [0]\n", + " ├── PwBaseWorkChain<547> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<548> Finished [0]\n", + " │ └── PwCalculation<552> Finished [0]\n", + " ├── PwBaseWorkChain<560> Finished [0] [3:results]\n", + " │ ├── create_kpoints_from_distance<561> Finished [0]\n", + " │ └── PwCalculation<565> Finished [0]\n", + " └── HpWorkChain<571> Finished [0] [3:results]\n", + " ├── create_kpoints_from_distance<573> Finished [0]\n", + " └── HpBaseWorkChain<577> Finished [0] [3:results]\n", + " └── HpCalculation<579> Finished [0]\u001b[0m\n" + ] + } + ], + "source": [ + "%verdi process status {node.pk}" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And of course, here you have the final __relaxed__ structure with __fully self-consistent ab-initio Hubbard parameters__! :tada:" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HUBBARD\tortho-atomic\n", + " U\tCo-3d\t7.8264\n", + "\n" + ] + } + ], + "source": [ + "from aiida_quantumespresso.utils.hubbard import HubbardUtils\n", + "print(HubbardUtils(results['hubbard_structure']).get_hubbard_card())" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Final considerations\n", + "\n", + "We managed to compute the Hubbard parameters self-consistently with a series of relaxations, scfs, and hp calculations, ___all fully automated___! :tada:\n", + "\n", + "\n", + ":::{admonition} Learn more and in details\n", + ":class: hint\n", + "\n", + "To learn the full sets of inputs, to use proficiently the `get_builder_from_protocol` and more, have a look at the following sections:\n", + "- [Specific how tos](howto/workflows/hubbard.md)\n", + "- [General information of the implemented workchain](topics/workflows/hubbard.md)\n", + ":::" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "base", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/source/_static/aiida-custom.css b/docs/source/_static/aiida-custom.css new file mode 100644 index 0000000..a3ebd1c --- /dev/null +++ b/docs/source/_static/aiida-custom.css @@ -0,0 +1,117 @@ +/* Fix CSS of top bar link icons */ +a.nav-link.nav-external i { + padding-left: 0.3em !important; + font-size: inherit !important; + vertical-align: inherit !important; +} +/* Current fix for https://github.com/pandas-dev/pydata-sphinx-theme/issues/193 */ +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) minmax(0, 1fr); +} +/* For icon unicodes see https://fontawesome.com/v4.7.0/icons/ */ +.title-icon-rocket .admonition-title:before { + margin-right:.5rem; + content: "\f135" +} +.title-icon-info-circle .admonition-title:before { + margin-right:.5rem; + content: "\f05a" +} +.title-icon-question-circle .admonition-title:before { + margin-right:.5rem; + content: "\f059" +} +.title-icon-download .admonition-title:before { + margin-right:.5rem; + content: "\f019" +} +.title-icon-external-link .admonition-title:before { + margin-right:.5rem; + content: "\f08e" +} +.title-icon-lightbulb .admonition-title:before { + margin-right:.5rem; + content: "\f0eb" +} +.title-icon-wrench .admonition-title:before { + margin-right:.5rem; + content: "\f0ad" +} +.title-icon-cog .admonition-title:before { + margin-right:.5rem; + content: "\f013" +} +.title-icon-cogs .admonition-title:before { + margin-right:.5rem; + content: "\f085" +} +.title-icon-code-fork .admonition-title:before { + margin-right:.5rem; + content: "\f126" +} +/* Semantic icon names */ +.title-icon-launch-software .admonition-title:before { + margin-right:.5rem; + content: "\f135" /* rocket */ +} +.title-icon-install-software .admonition-title:before { + margin-right:.5rem; + content: "\f019" /* download */ +} +.title-icon-information .admonition-title:before { + margin-right:.5rem; + content: "\f05a" /* info-circle */ +} +.title-icon-tip .admonition-title:before { + margin-right:.5rem; + content: "\f0eb" /* lightbulb */ +} +.title-icon-important .admonition-title:before { + margin-right:.5rem; + content: "\f06a" /* exclamation-circle */ +} +.title-icon-warning .admonition-title:before { + margin-right:.5rem; + content: "\f071" /* exclamation-triangle */ +} +.title-icon-troubleshoot .admonition-title:before { + margin-right:.5rem; + content: "\f0ad" /* wrench */ +} +.title-icon-read-more .admonition-title:before { + margin-right:.5rem; + content: "\f518" /* external-link */ +} + +.dropdown-group .dropdown .summary-title { + border-bottom: 0 !important; + font-weight:700 !important; +} +.dropdown-group .dropdown:not(:last-child) { + margin-bottom: 0 !important; + border-radius: 0 !important; +} +.dropdown-group .dropdown:first-child, +.dropdown-group .dropdown:first-child .summary-title { + border-radius: 1rem 1rem 0rem 0rem !important; +} +.dropdown-group .dropdown:last-child, +.dropdown-group .dropdown:last-child .summary-title { + border-radius: 0rem 0rem 1rem 1rem !important; +} + +.dropdown-group .dropdown:last-child { + margin-bottom: 24px !important; +} + +div.admonition :last-child { + margin-bottom: 0 +} + +div.highlight-bash div.highlight { + background-color: aliceblue; +} +div.highlight-console div.highlight { + background-color: aliceblue; +} diff --git a/docs/source/_static/aiida-qe-custom.css b/docs/source/_static/aiida-qe-custom.css new file mode 100644 index 0000000..3d37cd7 --- /dev/null +++ b/docs/source/_static/aiida-qe-custom.css @@ -0,0 +1,28 @@ +.logo-table td { + padding: 20px 10px; +} + +.center { + text-align: center; + max-width: 90%; +} + +.bigfont { + font-size: 140%; +} + +div.navbar-brand-box a.navbar-brand img { + display: block; + height: auto; + width: auto; + max-height: 100vh; + max-width: 90%; + margin: 0 auto; + padding-left: 30px +} + +@media (min-width: 768px) { + div.navbar-brand-box a.navbar-brand img { + max-height: 100vh !important + } +} diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100755 index 0000000..fa3702d --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,314 @@ +# -*- coding: utf-8 -*- +# +# aiida-wannier90 documentation build configuration file, created by +# sphinx-quickstart on Fri Oct 10 02:14:52 2014. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. +"""Configuration file for the documentation.""" +import pathlib +import time + +# Load the dummy profile even if we are running locally, this way the documentation will succeed even if the current +# default profile of the AiiDA installation does not use a Django backend. +from aiida.manage.configuration import load_documentation_profile + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +import aiida_quantumespresso_hp + +load_documentation_profile() + +# -- Project information ----------------------------------------------------- + +project = 'aiida-quantumespresso-hp' +copyright = ( # pylint: disable=redefined-builtin, line-too-long + f"""2022-{time.localtime().tm_year}, UNIVERSITY OF BREMEN, Germany,""" + """and ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of""" + """Materials (THEOS) and National Centre for Computational Design and Discovery""" + """of Novel Materials (NCCR MARVEL)), Switzerland. All rights reserved""" +) # pylint: disable=redefined-builtin, line-too-long + +# The full version, including alpha/beta/rc tags. +release = aiida_quantumespresso_hp.__version__ +# The short X.Y version. +version = '.'.join(aiida_quantumespresso_hp.__version__.split('.')[:2]) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'myst_nb', + 'sphinx.ext.autodoc', + 'sphinx.ext.mathjax', + 'sphinx.ext.intersphinx', + 'sphinx.ext.viewcode', + 'sphinx_copybutton', + 'sphinx_togglebutton', + 'sphinx_design', + 'aiida.sphinxext', + 'autoapi.extension', +] + +# Setting the intersphinx mapping to other readthedocs +intersphinx_mapping = { + 'python': ('https://docs.python.org/3.8', None), + 'aiida': ('https://aiida.readthedocs.io/en/latest/', None), + 'aiida_pseudo': ('http://aiida-pseudo.readthedocs.io/en/latest/', None), + 'aiida_quantumespresso': ('http://aiida-quantumespresso.readthedocs.io/en/latest/', None), + 'sphinx': ('https://www.sphinx-doc.org/en/master', None), +} + +myst_enable_extensions = [ + 'amsmath', + 'colon_fence', + 'deflist', + 'dollarmath', + 'html_image', + 'substitution', +] + +myst_substitutions = { + 'release': release, + 'version': version, + 'hubbard_structure': '{py:class}`~aiida_quantumespresso.data.hubbard_structure.HubbardStructureData`' +} + +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'myst-nb', + '.ipynb': 'myst-nb', + '.myst': 'myst-nb', +} + +# Execution timeout (seconds) +nb_execution_timeout = 600 + +# Settings for the `autoapi.extenstion` automatically generating API docs +filepath_docs = pathlib.Path(__file__).parent.parent +filepath_src = filepath_docs.parent / 'src' +autoapi_type = 'python' +autoapi_dirs = [filepath_src] +autoapi_ignore = [filepath_src / 'aiida_quantumespresso_hp' / '*cli*'] +autoapi_root = str(filepath_docs / 'source' / 'reference' / 'api') +autoapi_keep_files = True +autoapi_add_toctree_entry = False + +# Settings for the `sphinx_copybutton` extension +copybutton_selector = 'div:not(.no-copy)>div.highlight pre' +copybutton_prompt_text = r'>>> |\.\.\. |(?:\(.*\) )?\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +# language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. + +html_theme = 'pydata_sphinx_theme' +html_theme_options = { + 'github_url': 'https://github.com/aiidateam/aiida-quantumespresso-hp', + 'twitter_url': 'https://twitter.com/aiidateam', + 'use_edit_page_button': True, +} +html_static_path = ['_static'] +html_context = { + 'github_user': 'aiidateam', + 'github_repo': 'aiida-quantumespresso-hp', + 'github_version': 'main', + 'doc_path': 'docs/source', + 'default_mode': 'light', +} + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = 'images/logo_docs.png' +html_static_path = ['_static'] +html_css_files = ['aiida-custom.css', 'aiida-qe-custom.css'] + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +html_use_opensearch = 'http://aiida-quantumespresso-hp.readthedocs.io' + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +html_search_language = 'en' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'aiida-quantumespresso-hpdoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + #'preamble': '', + + # Latex figure (float) alignment + #'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +# latex_documents = [ +# ] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +# man_pages = [ +# ] + +# If true, show URL addresses after external links. +#man_show_urls = False + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +# texinfo_documents = [ +# ] + +# Documents to append as an appendix to all manuals. +#texinfo_appendices = [] + +# If false, no module index is generated. +#texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +#texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False + +# Warnings to ignore when using the -n (nitpicky) option +# We should ignore any python built-in exception, for instance +nitpick_ignore = [ + ('py:exc', 'ArithmeticError'), + ('py:exc', 'AssertionError'), + ('py:exc', 'AttributeError'), + ('py:exc', 'BaseException'), + ('py:exc', 'BufferError'), + ('py:exc', 'DeprecationWarning'), + ('py:exc', 'EOFError'), + ('py:exc', 'EnvironmentError'), + ('py:exc', 'Exception'), + ('py:exc', 'FloatingPointError'), + ('py:exc', 'FutureWarning'), + ('py:exc', 'GeneratorExit'), + ('py:exc', 'IOError'), + ('py:exc', 'ImportError'), + ('py:exc', 'ImportWarning'), + ('py:exc', 'IndentationError'), + ('py:exc', 'IndexError'), + ('py:exc', 'KeyError'), + ('py:exc', 'KeyboardInterrupt'), + ('py:exc', 'LookupError'), + ('py:exc', 'MemoryError'), + ('py:exc', 'NameError'), + ('py:exc', 'NotImplementedError'), + ('py:exc', 'OSError'), + ('py:exc', 'OverflowError'), + ('py:exc', 'PendingDeprecationWarning'), + ('py:exc', 'ReferenceError'), + ('py:exc', 'RuntimeError'), + ('py:exc', 'RuntimeWarning'), + ('py:exc', 'StandardError'), + ('py:exc', 'StopIteration'), + ('py:exc', 'SyntaxError'), + ('py:exc', 'SyntaxWarning'), + ('py:exc', 'SystemError'), + ('py:exc', 'SystemExit'), + ('py:exc', 'TabError'), + ('py:exc', 'TypeError'), + ('py:exc', 'UnboundLocalError'), + ('py:exc', 'UnicodeDecodeError'), + ('py:exc', 'UnicodeEncodeError'), + ('py:exc', 'UnicodeError'), + ('py:exc', 'UnicodeTranslateError'), + ('py:exc', 'UnicodeWarning'), + ('py:exc', 'UserWarning'), + ('py:exc', 'VMSError'), + ('py:exc', 'ValueError'), + ('py:exc', 'Warning'), + ('py:exc', 'WindowsError'), + ('py:exc', 'ZeroDivisionError'), + ('py:obj', 'str'), + ('py:obj', 'list'), + ('py:obj', 'tuple'), + ('py:obj', 'int'), + ('py:obj', 'float'), + ('py:obj', 'bool'), + ('py:obj', 'Mapping'), + ('py:obj', 'qe_tools.parsers.CpInputFile'), + ('py:obj', 'qe_tools.parsers.PwInputFile'), + ('py:class', 'StructureData'), + ('py:class', 'PseudoPotentialFamily'), +] + +nitpick_ignore_regex = [ + (r'py:.*', key) for key in [ + r'data.*', + r'aiida.*', + r'orm.*', + r'phonopy.*', + r'numpy.*', + r'np.*', + ] +] diff --git a/docs/source/howto/calculations/hp.md b/docs/source/howto/calculations/hp.md new file mode 100644 index 0000000..61c26a4 --- /dev/null +++ b/docs/source/howto/calculations/hp.md @@ -0,0 +1,157 @@ +--- +myst: + substitutions: + aiida_pseudo: '[`aiida-pseudo`](https://aiida-pseudo.readthedocs.io/)' + hubbard_structure: '{py:class}`~aiida_quantumespresso.data.hubbard_structure.HubbardStructureData`' +--- + +(howto-calculations-hp)= + +# `hp.x` + +The `hp.x` code of Quantum ESPRESSO performs a self-consistent perturbative calculation of Hubbard parameters +within Density-Functional-Perturbation Theory (DFPT), using a plane-wave basis set and pseudopotentials (norm-conserving, ultra-soft and PAW). +This is a fundamental step to get accurate electronic properties of complex materials, mainly containing transition metals +for which the self-interaction error is relevant. + +| | | +|---------------------|---------------------------------------------------------------| +| Plugin class | {py:class}`~aiida_quantumespresso_hp.calculations.hp.HpCalculation` | +| Plugin entry point | ``quantumespresso.hp`` | + +:::{hint} +Remember that to exploit the best from the features of AiiDA to use the dedicated _WorkChains_, +which will provide this calculation with **automatic error handlings**. +Visit the [workflows](../workflows/index) section for more! +::: + +## How to launch a `hp.x` calculation + +Below is a script with a basic example of how to run a `hp.x` calculation through the `HpCalculation` plugin that computes the Hubbard +calculation of LiCoO{sub}`2`: + +```{literalinclude} ../include/run_hp_basic.py +:language: python +``` + +Note that you may have to change the name of the codes (hp and pw) that is loaded using `load_code` and the pseudopotential family loaded with `load_group`. + +:::{important} +The `hp.x` code needs to read the wavefunctions from a previously run `pw.x` calculation. +Thus, you need to first run a `PwCalculation` using the `HubbardStructureData` as input structure +with initialized Hubbard parameters to make `hp.x` understand which atoms to perturb. +You can find more information on how to do so on the [aiida-quantumespresso documentation](https://aiida-quantumespresso.readthedocs.io/en/latest/) . +Once this run is complete, you can move forward with the tutorial. +::: + +:::{note} +In the provided script, the PwCalculation is performed before the HpCalculation. +::: + +## How to define input file parameters + +The `hp.x` code supports many parameters that can be defined through the input file, +as shown on the [official documentation](https://www.quantum-espresso.org/Doc/INPUT_HP.html). +The parameters are divided into a unique section or "card". +Parameters that are part of cards that start with an ampersand (`&`) should +be specified through the `parameters` input of the `HpCalculation` plugin. +The parameters are specified using a Python dictionary, +where each card is its own sub-dictionary, for example: + +```python +parameters = { + 'INPUTHP': { + 'conv_thr_chi': 1.0e-6, + 'alpha_mix(10)': 0.1, + }, +} +``` + +The parameters dictionary should be wrapped in a {py:class}`~aiida.orm.nodes.data.dict.Dict` node + and assigned to the `parameters` input of the process builder: + +```python +from aiida.orm import Dict, load_code +builder = load_code('hp').get_builder() +parameters = { + ... +} +builder.parameters = Dict(parameters) +``` + +:::{warning} +There are a number of input parameters that *cannot* be set, as they will be automatically set by the plugin based on other inputs, such as the `structure`. +These include: + +- `INPUTHP.pseudo_dir` +- `INPUTHP.outdir` +- `INPUTHP.prefix` +- `INPUTHP.iverbosity` +- `INPUTHP.nq1` +- `INPUTHP.nq2` +- `INPUTHP.nq3` + +Defining them anyway will result in an exception when launching the calculation. +::: + +## How to define the ``pw.x`` (SCF) folder + +Each `hp.x` calculation requires a previously run `PwCalculation` from which to take the wavefunctions and +other parameters. The relative folder can be specified in the `HpCalculation` plugin through the `parent_scf` input namespace. +This input takes a remote folder, instance of the {py:class}`~aiida.orm.RemoteFolder`. +For example, say you have successfully performed a `PwCalculation` (or equivantely `PwBaseWorkChain`) with PK 1, then: + +```python +from aiida.orm import load_code + +# The `remote_folder` stores the information of the +# relative path of the computer it was run on. +parent_scf = load_node(1).outputs.remote_folder + +builder = load_code('hp').get_builder() +builder.parent_scf = parent_scf +``` + +## How to run a calculation without symlinking + +Specify `PARENT_FOLDER_SYMLINK: False` in the `settings` input: + +```python +builder = load_code('hp').get_builder() +builder.settings = Dict({'PARENT_FOLDER_SYMLINK': False}) +``` + +If this setting is specified, the plugin will NOT symlink the SCF folder. +By default, this is set to `True` in order to save disk space. + +## How to analyze the results + +When a `HpCalculation` is completed, there are quite a few possible analyses to perform. + +### How to inspect the final Hubbard parameters + +A _complete_ `HpCalculation` will produce an {{ hubbard_structure }} containing the parsed Hubbard parameters. +The parameters are stored under the `hubbard` namespace: + +```python +In [1]: node = load_node(HP_CALCULATION_IDENTIFIER) + +In [2]: node.outputs.hubbard_structure.hubbard +Out[2]: +Hubbard(parameters=(HubbardParameters([...]), ...), projectors='ortho-atomic', formulation='dudarev') +``` + +To visualize them as Quantum ESPRESSO HUBBARD card: + +```python +In [3]: from aiida_quantumespresso.utils.hubbard import HubbardUtils + +In [4]: hubbard_card = HubbardUtils(node.outputs.hubbard_structure.hubbard).get_hubbard_card + +In [5]: print(hubbard_card) +Out[5]: +HUBBARD ortho-atomic +V Co-3d Co-3d 1 1 5.11 +V Co-3d O-2p 1 2 1.65 +... +``` diff --git a/docs/source/howto/calculations/index.md b/docs/source/howto/calculations/index.md new file mode 100644 index 0000000..fe6d630 --- /dev/null +++ b/docs/source/howto/calculations/index.md @@ -0,0 +1,9 @@ +(howto-calculations)= + +# How-to run calculations + +```{toctree} +:maxdepth: 1 + +hp +``` diff --git a/docs/source/howto/include/run_hp_basic.py b/docs/source/howto/include/run_hp_basic.py new file mode 100644 index 0000000..367f492 --- /dev/null +++ b/docs/source/howto/include/run_hp_basic.py @@ -0,0 +1,107 @@ +#!/usr/bin/env runaiida +# -*- coding: utf-8 -*- +"""Example running a pw.x and hp.x in a squence.""" +from aiida.engine import run +from aiida.orm import Dict, KpointsData, StructureData, load_code, load_group +from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData + +# =================================================== # +# !!!!!!!!!!!!!!!!!! CHANGE HERE !!!!!!!!!!!!!!!!!!!! # +# =================================================== # +# Load the code configured for ``pw.x`` and ``hp.x``. +# Make sure to replace this string with the label of a +# ``Code`` that you configured in your profile. +hp_code = load_code('pw@localhost') +pw_code = load_code('pw@localhost') + +# ===================== RUN PW ======================= # +pw_builder = pw_code.get_builder() + +# Create a LiCoO3 crystal structure +a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585 +cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]] +positions = [[0, 0, 0], [0, 0, 3.6608], [0, 0, 10.392], [0, 0, 7.0268]] +symbols = ['Co', 'O', 'O', 'Li'] +structure = StructureData(cell=cell) +for position, symbol in zip(positions, symbols): + structure.append_atom(position=position, symbols=symbol) + +# Create a structure data with Hubbard parameters +hubbard_structure = HubbardStructureData.from_structure(structure) +hubbard_structure.initialize_onsites_hubbard('Co', '3d') # initialize Hubbard atom +hubbard_structure.initialize_intersites_hubbard('Co', '3d', 'O', '2p') # initialize Hubbard atom +pw_builder.structure = hubbard_structure + +# Load the pseudopotential family. +pseudo_family = load_group('SSSP/1.2/PBEsol/efficiency') +pw_builder.pseudos = pseudo_family.get_pseudos(structure=structure) + +# Request the recommended wavefunction and charge density cutoffs +# for the given structure and energy units. +cutoff_wfc, cutoff_rho = pseudo_family.get_recommended_cutoffs(structure=structure, unit='Ry') + +parameters = Dict({ + 'CONTROL': { + 'calculation': 'scf' + }, + 'SYSTEM': { + 'ecutwfc': cutoff_wfc, + 'ecutrho': cutoff_rho, + } +}) +pw_builder.parameters = parameters + +# Generate a 2x2x2 Monkhorst-Pack mesh +kpoints = KpointsData() +kpoints.set_kpoints_mesh([2, 2, 2]) +pw_builder.kpoints = kpoints + +# Run the calculation on 1 CPU and kill it if it runs longer than 1800 seconds. +# Set ``withmpi`` to ``False`` if ``pw.x`` was compiled without MPI support. +pw_builder.metadata.options = { + 'resources': { + 'num_machines': 1, + }, + 'max_wallclock_seconds': 1800, + 'withmpi': False, +} + +results, pw_node = run.get_node(pw_builder) +print(f'Calculation: {pw_node.process_class}<{pw_node.pk}> {pw_node.process_state.value} [{pw_node.exit_status}]') +print(f'Results: {results}') +assert pw_node.is_finished_ok, f'{pw_node} failed: [{pw_node.exit_status}] {pw_node.exit_message}' + +# ===================== RUN HP ======================= # +hp_builder = hp_code.get_builder() + +# Assign the remote folder where to take from the +# wavefunctions and other data for the ``hp.x`` to run +parent_scf = pw_node.outputs.remote_folder +hp_builder.parent_scf = parent_scf + +parameters = Dict({ + 'INPUTHP': { + 'conv_thr_chi': 1.0e-3 + }, +}) +hp_builder.parameters = parameters + +# Generate a 1x1x1 Monkhorst-Pack mesh +qpoints = KpointsData() +qpoints.set_kpoints_mesh([1, 1, 1]) +hp_builder.qpoints = qpoints + +# Run the calculation on 1 CPU and kill it if it runs longer than 1800 seconds. +# Set ``withmpi`` to ``False`` if ``pw.x`` was compiled without MPI support. +hp_builder.metadata.options = { + 'resources': { + 'num_machines': 1, + }, + 'max_wallclock_seconds': 1800, + 'withmpi': False, +} + +results, node = run.get_node(hp_builder) +print(f'Calculation: {node.process_class}<{node.pk}> {node.process_state.value} [{node.exit_status}]') +print(f'Results: {results}') +assert node.is_finished_ok, f'{node} failed: [{node.exit_status}] {node.exit_message}' diff --git a/docs/source/howto/index.md b/docs/source/howto/index.md new file mode 100644 index 0000000..b215b60 --- /dev/null +++ b/docs/source/howto/index.md @@ -0,0 +1,16 @@ +(howto)= + +# How-to guides + +:::{important} +The following how-to guides assume that you are familiar with the basics of AiiDA, such as creating data and running processes. +At the very least, make sure you have followed and understand the tutorial on [running an `hp.x` calculation through the API](howto-calculations-hp). +::: + +```{toctree} +:maxdepth: 2 + +understand +calculations/index +workflows/index +``` diff --git a/docs/source/howto/understand.md b/docs/source/howto/understand.md new file mode 100644 index 0000000..36c1d26 --- /dev/null +++ b/docs/source/howto/understand.md @@ -0,0 +1,15 @@ +(howto-understand)= + +# How-to understand the input/builder structure + +In AiiDA the CalcJobs and WorkChains have usually nested inputs and different options on how to run the calculation +and/or workflows. To understand the nested input structure of CalcJobs/Workflows, we made them available in a clickable +fashion in the [topics section](topics). + +Moreover, it could be useful to understand the +[_expose inputs/outputs_](https://aiida.readthedocs.io/projects/aiida-core/en/latest/topics/workflows/usage.html#modular-workflow-design) +mechanism used in AiiDA for workflows, which guarantees a __modular design__. +This means that the workflows can use the inputs of other workflows or calculations, and specify them under a new namespace. + +This is the case for many workflows in this package. For example, the {class}`~aiida_quantumespresso_hp.workflows.hubbard.SelfConsistentHubbardWorkChain` makes use of three WorkChains, the `PwBaseWorkChain` for the scf calculation (namespace used is `scf`), +the `PwRelaxWorkChain` for the (vc)relaxation part (namespace used is `relax`), and finally the `HpWorkChain` (namespace used is `hubbard`). diff --git a/docs/source/howto/workflows/hp/base.md b/docs/source/howto/workflows/hp/base.md new file mode 100644 index 0000000..4c7c6ac --- /dev/null +++ b/docs/source/howto/workflows/hp/base.md @@ -0,0 +1,5 @@ +(howto-workflows-hp-base)= + +# `HpBaseWorkChain` + +*To be added.* diff --git a/docs/source/howto/workflows/hp/main.md b/docs/source/howto/workflows/hp/main.md new file mode 100644 index 0000000..553447c --- /dev/null +++ b/docs/source/howto/workflows/hp/main.md @@ -0,0 +1,5 @@ +(howto-workflows-hp-main)= + +# `HpWorkChain` + +*To be added.* diff --git a/docs/source/howto/workflows/hp/parallelize_atoms.md b/docs/source/howto/workflows/hp/parallelize_atoms.md new file mode 100644 index 0000000..09ee092 --- /dev/null +++ b/docs/source/howto/workflows/hp/parallelize_atoms.md @@ -0,0 +1,5 @@ +(howto-workflows-hp-atoms)= + +# `HpParallelizeAtomsWorkChain` + +*To be added.* diff --git a/docs/source/howto/workflows/hp/parallelize_qpoints.md b/docs/source/howto/workflows/hp/parallelize_qpoints.md new file mode 100644 index 0000000..2fbe106 --- /dev/null +++ b/docs/source/howto/workflows/hp/parallelize_qpoints.md @@ -0,0 +1,5 @@ +(howto-workflows-hp-qpoints)= + +# `HpParallelizeQpointsWorkChain` + +*To be added.* diff --git a/docs/source/howto/workflows/hubbard.md b/docs/source/howto/workflows/hubbard.md new file mode 100644 index 0000000..d0e83a6 --- /dev/null +++ b/docs/source/howto/workflows/hubbard.md @@ -0,0 +1,5 @@ +(howto-workflows-hubbard)= + +# `SelfConsistentHubbardWorkChain` + +*To be added.* diff --git a/docs/source/howto/workflows/index.md b/docs/source/howto/workflows/index.md new file mode 100644 index 0000000..898b0b6 --- /dev/null +++ b/docs/source/howto/workflows/index.md @@ -0,0 +1,13 @@ +(howto-workflows)= + +# How-to run workflows + +```{toctree} +:maxdepth: 1 + +hp/base +hp/main +hp/parallelize_atoms +hp/parallelize_qpoints +hubbard +``` diff --git a/docs/source/images/BIG-MAP_logo.png b/docs/source/images/BIG-MAP_logo.png new file mode 100644 index 0000000..59caa9b Binary files /dev/null and b/docs/source/images/BIG-MAP_logo.png differ diff --git a/docs/source/images/MAPEX.jpg b/docs/source/images/MAPEX.jpg new file mode 100644 index 0000000..9b0c96d Binary files /dev/null and b/docs/source/images/MAPEX.jpg differ diff --git a/docs/source/images/MARVEL.png b/docs/source/images/MARVEL.png new file mode 100644 index 0000000..99b6e08 Binary files /dev/null and b/docs/source/images/MARVEL.png differ diff --git a/docs/source/images/MaX.png b/docs/source/images/MaX.png new file mode 100644 index 0000000..2ebda13 Binary files /dev/null and b/docs/source/images/MaX.png differ diff --git a/docs/source/images/UBREMEN.png b/docs/source/images/UBREMEN.png new file mode 100644 index 0000000..0a1bef9 Binary files /dev/null and b/docs/source/images/UBREMEN.png differ diff --git a/docs/source/images/logo_aiida.svg b/docs/source/images/logo_aiida.svg new file mode 100644 index 0000000..f28d962 --- /dev/null +++ b/docs/source/images/logo_aiida.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/source/images/logo_aiida_quantumespresso.png b/docs/source/images/logo_aiida_quantumespresso.png new file mode 100644 index 0000000..8c9ab00 Binary files /dev/null and b/docs/source/images/logo_aiida_quantumespresso.png differ diff --git a/docs/source/images/logo_docs.png b/docs/source/images/logo_docs.png new file mode 100644 index 0000000..2a24fe0 Binary files /dev/null and b/docs/source/images/logo_docs.png differ diff --git a/docs/source/images/qe_logo.jpg b/docs/source/images/qe_logo.jpg new file mode 100644 index 0000000..f205c65 Binary files /dev/null and b/docs/source/images/qe_logo.jpg differ diff --git a/docs/source/images/swissuniversities.png b/docs/source/images/swissuniversities.png new file mode 100644 index 0000000..dfea031 Binary files /dev/null and b/docs/source/images/swissuniversities.png differ diff --git a/docs/source/images/transparent.png b/docs/source/images/transparent.png new file mode 100644 index 0000000..014c2d0 Binary files /dev/null and b/docs/source/images/transparent.png differ diff --git a/docs/source/index.md b/docs/source/index.md new file mode 100644 index 0000000..a578ca6 --- /dev/null +++ b/docs/source/index.md @@ -0,0 +1,196 @@ +--- +myst: + substitutions: + README.md of the repository: '`README.md` of the repository' + aiida-core documentation: '`aiida-core` documentation' + aiida-quantumespresso-hp: '`aiida-quantumespresso-hp`' + mapex: '[MAPEX](https://www.uni-bremen.de/en/mapex)' + ubremen_exc: '[U Bremen Excellence Chair](https://www.uni-bremen.de/u-bremen-excellence-chairs)' + esg: "[Excellence Strategy of Germany\u2019s federal and state governments](https://www.dfg.de/en/research_funding/excellence_strategy/index.html)" +--- + +```{toctree} +:hidden: true +:maxdepth: 2 + +installation/index +tutorials/index +howto/index +topics/index +reference/index +``` + +::::{grid} +:reverse: +:gutter: 2 3 3 3 +:margin: 1 2 1 2 + +:::{grid-item} +:columns: 12 4 4 4 + +```{image} images/logo_aiida_quantumespresso.png +:width: 200px +:class: sd-m-auto +``` +::: + +:::{grid-item} +:columns: 12 8 8 8 +:child-align: justify +:class: sd-fs-5 + +# AiiDA Quantum ESPRESSO HP + +An AiiDA plugin package for the calculation of Hubbard parameters using the [Quantum ESPRESSO](http://www.quantumespresso.org) software suite. Compute onsites and intersites Hubbard parameters self-consistently and in automated fashion through state-of-the-art DFPT implementation with automatic data provenance provided by AiiDA. + +**aiida-quantumespresso-hp version:** {{ release }} + +::: + +:::: + +______________________________________________________________________ + + +::::{grid} 1 2 2 2 +:gutter: 3 + +:::{grid-item-card} {fa}`rocket;mr-1` Get started +:text-align: center +:shadow: md + +Instructions to install, configure and setup the plugin package. + ++++ + +```{button-ref} installation/index +:ref-type: doc +:click-parent: +:expand: +:color: primary +:outline: + +To the installation guides +``` +::: + +:::{grid-item-card} {fa}`info-circle;mr-1` Tutorials +:text-align: center +:shadow: md + +Easy examples to take the first steps with the plugin package. + ++++ + +```{button-ref} tutorials/index +:ref-type: doc +:click-parent: +:expand: +:color: primary +:outline: + +To the tutorials +``` +::: + +:::{grid-item-card} {fa}`question-circle;mr-1` How-to guides +:text-align: center +:shadow: md + +Hands-on guides to achieve specific goals. + ++++ + +```{button-ref} howto/index +:ref-type: doc +:click-parent: +:expand: +:color: primary +:outline: + +To the how-to guides +``` +::: + +:::{grid-item-card} {fa}`bookmark;mr-1` Topic guides +:text-align: center +:shadow: md + +Detailed background information on various concepts. + ++++ + +```{button-ref} topics/index +:ref-type: doc +:click-parent: +:expand: +:color: primary +:outline: + +To the topic guides +``` +::: + +:::{grid-item-card} {fa}`cogs;mr-1` Reference guides +:text-align: center +:shadow: md + +Detailed reference guides on the application programming and command line interfaces. + ++++ + +```{button-ref} reference/api/aiida_quantumespresso_hp/index +:ref-type: doc +:click-parent: +:expand: +:color: primary +:outline: + +To the reference guides +``` +::: +:::: + +# How to cite + +If you use this plugin for your research, please cite the following work: + +> Lorenzo Bastonero, Cristiano Malica, Marnik Bercx, Eric Macke, Iurii Timrov, Nicola Marzari, and Sebastiaan P. Huber, [*Automated self-consistent prediction of extended Hubbard parameters for Li-ion batteries*](), npj Comp. Mat., **?**, ? (2023) + +> Sebastiaan. P. Huber, Spyros Zoupanos, Martin Uhrin, Leopold Talirz, Leonid Kahle, Rico Häuselmann, Dominik Gresch, Tiziano Müller, Aliaksandr V. Yakutovich, Casper W. Andersen, Francisco F. Ramirez, Carl S. Adorf, Fernando Gargiulo, Snehal Kumbhar, Elsa Passaro, Conrad Johnston, Andrius Merkys, Andrea Cepellotti, Nicolas Mounet, Nicola Marzari, Boris Kozinsky, and Giovanni Pizzi, [*AiiDA 1.0, a scalable computational infrastructure for automated reproducible workflows and data provenance*](https://doi.org/10.1038/s41597-020-00638-4), Scientific Data **7**, 300 (2020) + +> Martin Uhrin, Sebastiaan. P. Huber, Jusong Yu, Nicola Marzari, and Giovanni Pizzi, [*Workflows in AiiDA: Engineering a high-throughput, event-based engine for robust and modular computational workflows*](https://www.sciencedirect.com/science/article/pii/S0010465522001746), Computational Materials Science **187**, 110086 (2021) + +> Iurii Timrov, Nicola Marzari, and Matteo Cococcioni, [*HP - A code for the calculation of Hubbard parameters using density-functional perturbation theory*](https://doi.org/10.1016/j.commatsci.2020.110086), Computational Materials Science **187**, 110086 (2021) + +> Iurii Timrov, Nicola Marzari, and Matteo Cococcioni, [*Self-consistent Hubbard parameters from density-functional perturbation theory in the ultrasoft and projector-augmented wave formulations*](https://journals.aps.org/prb/abstract/10.1103/PhysRevB.103.045141), Physical Reveview **B** **103**, 045141 + +# Acknowledgements + +We acknowledge support from: + +:::{list-table} +:widths: 60 40 +:class: logo-table +:header-rows: 0 + +* - The {{ ubremen_exc }} program funded within the scope of the {{ esg }}. + - ![ubremen](images/UBREMEN.png) +* - The {{ mapex }} Center for Materials and Processes. + - ![mapex](images/MAPEX.jpg) +* - The [NCCR MARVEL](http://nccr-marvel.ch/) funded by the Swiss National Science Foundation. + - ![marvel](images/MARVEL.png) +* - The EU Centre of Excellence ["MaX – Materials Design at the Exascale"](http://www.max-centre.eu/) (Horizon 2020 EINFRA-5, Grant No. 676598). + - ![max](images/MaX.png) +* - The [swissuniversities P-5 project "Materials Cloud"](https://www.materialscloud.org/swissuniversities) + - ![swissuniversities](images/swissuniversities.png) + +::: + +[aiida]: http://aiida.net +[aiida quantum espresso tutorial]: https://aiida-tutorials.readthedocs.io/en/tutorial-qe-short/ +[aiida-core documentation]: https://aiida.readthedocs.io/projects/aiida-core/en/latest/intro/get_started.html +[aiida-quantumespresso-hp]: https://github.com/aiidateam/aiida-quantumespresso-hp +[aiidalab demo cluster]: https://aiidalab-demo.materialscloud.org/ +[quantum espresso]: http://www.quantumespresso.org +[quantum mobile]: https://quantum-mobile.readthedocs.io/en/latest/index.html diff --git a/docs/source/installation/index.md b/docs/source/installation/index.md new file mode 100644 index 0000000..1bc23b7 --- /dev/null +++ b/docs/source/installation/index.md @@ -0,0 +1,213 @@ +--- +myst: + substitutions: + SSSP: Standard Solid-State Pseudopotentials (SSSP) + aiida-pseudo: '`aiida-pseudo`' + pip: '`pip`' +--- + +# Get started + +(installation-requirements)= + +## Requirements + +To work with `aiida-quantumespresso-hp`, you should have: + +- installed `aiida-core` +- configured an AiiDA profile. + +Please refer to the [documentation](https://aiida.readthedocs.io/projects/aiida-core/en/latest/intro/get_started.html) of `aiida-core` for detailed instructions. + +(installation-installation)= + +## Installation + +The Python package can be installed from the Python Package index [PyPI](https://pypi.org/) or directly from the source: + +::::{tab-set} + +:::{tab-item} PyPI +The recommended method of installation is to use the Python package manager `pip`: + +```console +$ pip install aiida-quantumespresso-hp +``` + +This will install the latest stable version that was released to PyPI. +::: + +:::{tab-item} Source +To install the package from source, first clone the repository and then install using `pip`: + +```console +$ git clone https://github.com/aiidateam/aiida-quantumespresso-hp +$ pip install -e aiida-quantumespresso-hp +``` + +The ``-e`` flag will install the package in editable mode, meaning that changes to the source code will be automatically picked up. +::: + +:::: + +(installation-configuration)= + +## Configuration + +To enable tab-completion for the command line interface, execute the following shell command (depending on the shell): + +::::{tab-set} + +:::{tab-item} bash +```console +$ eval "$(_AIIDA_QUANTUMESPRESSO_HP_COMPLETE=bash_source aiida-quantumespresso-hp)" +``` +::: + +:::{tab-item} zsh +```console +$ eval "$(_AIIDA_QUANTUMESPRESSO_COMPLETE=zsh_source aiida-quantumespresso-hp)" +``` +::: + +:::{tab-item} fish +```console +$ eval (env _AIIDA_QUANTUMESPRESSO_COMPLETE=fish_source aiida-quantumespresso-hp) +``` +::: + +:::: + +Place this command in your shell or virtual environment activation script to automatically enable tab completion when opening a new shell or activating an environment. +This file is shell specific, but likely one of the following: + +- the startup file of your shell (`.bashrc`, `.zsh`, ...), if aiida is installed system-wide +- the [activators](https://virtualenv.pypa.io/en/latest/user_guide.html#activators) of your virtual environment +- a [startup file](https://conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#saving-environment-variables) for your conda environment + +:::{important} +After having added the line to the start up script, make sure to restart the terminal or source the script for the changes to take effect. +::: + +(installation-setup)= + +## Setup + +(installation-setup-computer)= + +### Computer + +To run Quantum ESPRESSO calculations on a compute resource, the computer should first be set up in AiiDA. +This can be done from the command line interface (CLI) or the Python application programming interface (API). +In this example, we will set up the `localhost`, the computer where AiiDA itself is running: + +::::{tab-set} + +:::{tab-item} CLI + +To set up a computer, use the ``verdi`` CLI of ``aiida-core``. + +```console +$ verdi computer setup -n -L localhost -H localhost -T core.local -S core.direct +``` + +After creating the localhost computer, configure it using: + +```console +$ verdi computer configure core.local localhost -n --safe-interval 0 +``` + +Verify that the computer was properly setup by running: + +```console +$ verdi computer test localhost +``` +::: + +:::{tab-item} API + +To setup a computer using the Python API, run the following code in a Python script or interactive shell: + +```python + +from aiida.orm import Computer + +computer = Computer( + label='localhost', + hostname='localhost', + transport_type='core.local', + scheduler_type='core.direct' +).store() +computer.configure() +``` +::: +:::: + +For more detailed information, please refer to the documentation [on setting up compute resources](https://aiida.readthedocs.io/projects/aiida-core/en/latest/howto/run_codes.html#how-to-set-up-a-computer). + +(installation-setup-code)= + +### Code + +To run a Quantum ESPRESSO code, it should first be setup in AiiDA. +This can be done from the command line interface (CLI) or the Python application programming interface (API). +In this example, we will setup the `hp.x` code that is installed on the computer where AiiDA is running: + +::::{tab-set} + +:::{tab-item} CLI + +To setup a particular Quantum ESPRESSO code, use the ``verdi`` CLI of ``aiida-core``. + +```console +$ verdi code create core.code.installed -n --computer localhost --label hp --default-calc-job-plugin quantumespresso.hp --filepath-executable /path/to/hp.x +``` +::: + +:::{tab-item} API + +To setup particular Quantum ESPRESSO code using the Python API, run the following code in a Python script or interactive shell: + +```python + +from aiida.orm import InstalledCode + +computer = load_computer('localhost') +code = InstalledCode( +label='hp', +computer=computer, +filepath_executable='/path/to/hp.x', +default_calc_job_plugin='quantumespresso.hp', +).store() +``` +::: + +:::: + +:::{important} +Make sure to replace `/path/to/hp.x` with the actual absolute path to the `hp.x` binary. +::: + +For more detailed information, please refer to the documentation [on setting up codes](https://aiida.readthedocs.io/projects/aiida-core/en/latest/howto/run_codes.html#how-to-setup-a-code). + +(installation-setup-pseudopotentials)= + +### Pseudopotentials + +The `pw.x` and `hp.x` codes used in this plugin require pseudo potentials. +The simplest way of installing these is through the `aiida-pseudo` plugin package. +This should come as a dependency of `aiida-quantumespresso-hp` and should already be installed. +If this is not the case, it can be installed using: + +```console +$ pip install aiida-pseudo +``` + +At a minimum, at least one pseudo potential family should be installed. +We recommend using the [SSSP] with the PBEsol functional: + +```console +$ aiida-pseudo install sssp -x PBEsol +``` + +For more detailed information on installing other pseudo potential families, please refer to the documentation of [aiida-pseudo]. diff --git a/docs/source/local_module/__init__.py b/docs/source/local_module/__init__.py new file mode 100644 index 0000000..4d515cc --- /dev/null +++ b/docs/source/local_module/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +"""A module of code related to the tutorial.""" +import os +import pathlib +import warnings + +os.environ['AIIDA_PATH'] = str(pathlib.Path(__file__).parent / '_aiida_path') +# load the configuration without emitting a warning +with warnings.catch_warnings(): + warnings.simplefilter('ignore', UserWarning) + from aiida.manage.configuration import settings + + settings # pylint: disable=pointless-statement + +from .temp_profile import load_temp_profile # pylint: disable=wrong-import-position diff --git a/docs/source/local_module/temp_profile.py b/docs/source/local_module/temp_profile.py new file mode 100644 index 0000000..881d8d8 --- /dev/null +++ b/docs/source/local_module/temp_profile.py @@ -0,0 +1,212 @@ +# -*- coding: utf-8 -*- +"""Load and populate a temporary profile with a computer and code.""" +from __future__ import annotations + +from dataclasses import dataclass +import json +import os +import pathlib +import shutil + +from aiida import get_profile, load_ipython_extension, load_profile, manage, orm +from aiida.storage.sqlite_temp import SqliteTempBackend +from aiida_pseudo.cli.install import download_sssp +from aiida_pseudo.cli.utils import create_family_from_archive +from aiida_pseudo.groups.family import SsspConfiguration, SsspFamily +from aiida_quantumespresso.data.hubbard_structure import HubbardStructureData +import psutil + + +@dataclass +class AiiDALoaded: # pylint: disable=too-many-instance-attributes + """Dataclass for loading an AiiDA profile with predefined nodes.""" + profile: manage.Profile + computer: orm.Computer | None + pw_code: orm.Code | None + hp_code: orm.Code | None + pseudos: SsspFamily | None + structure: orm.StructureData | None + cpu_count: int + workdir: pathlib.Path + pwx_path: pathlib.Path + hwx_path: pathlib.Path + + +def load_temp_profile( + name='temp_profile', + add_computer=False, + add_pw_code=False, + add_hp_code=False, + add_sssp=False, + add_structure_licoo=False, + debug=False, + wipe_previous=True, + cpu_count: int | None = None, +): + """Load a temporary profile with a computer and code. + + This function is idempotent, so it can be called multiple times without + creating duplicate computers and codes. + + :param name: The name of the profile to load. + :param add_computer: Whether to add a computer to the profile. + :param add_pw_code: Whether to add a Quantum ESPRESSO pw.x code to the profile. + :param add_hp_code: Whether to add a Quantum ESPRESSO hp.x code to the profile. + :param add_sssp: Whether to add the SSSP pseudopotentials to the profile. + :param add_structure_licoo: Whether to add the LiCoO2 Hubbard structure to the profile. + :param debug: Whether to enable debug mode (printing all SQL queries). + :param wipe_previous: Whether to wipe any previous data + """ + # load the ipython extension, if possible + try: + load_ipython_extension(get_ipython()) + except NameError: + pass + + workdir_path = pathlib.Path(__file__).parent / '_aiida_workdir' / name + repo_path = pathlib.Path(os.environ['AIIDA_PATH']) / '.aiida' / 'repository' / name + + profile = get_profile() + + if not (profile and profile.name == name): + + if wipe_previous and repo_path.exists(): + shutil.rmtree(repo_path) + if wipe_previous and workdir_path.exists(): + shutil.rmtree(workdir_path) + + profile = SqliteTempBackend.create_profile( + name, + options={'runner.poll.interval': 1}, + debug=debug, + ) + load_profile(profile, allow_switch=True) + config = manage.get_config() + config.add_profile(profile) + + cpu_count = cpu_count or min(4, psutil.cpu_count(logical=False)) + if not shutil.which('pw.x'): + raise RuntimeError('pw.x not found in PATH') + pwx_path = pathlib.Path(shutil.which('pw.x')) + + if not shutil.which('hp.x'): + raise RuntimeError('hp.x not found in PATH') + hpx_path = pathlib.Path(shutil.which('hp.x')) + + computer = load_computer(workdir_path, cpu_count) if add_computer else None + pw_code = load_pw_code(computer, pwx_path) if (computer and add_pw_code) else None + hp_code = load_hp_code(computer, hpx_path) if (computer and add_hp_code) else None + pseudos = load_sssp_pseudos() if add_sssp else None + structure = create_licoo_hubbard_structure() if add_structure_licoo else None + + return AiiDALoaded( + profile, + computer, + pw_code, + hp_code, + pseudos, + structure, + cpu_count, + workdir_path, + pwx_path, + hpx_path, + ) + + +def load_computer(work_directory: pathlib.Path, cpu_count: int): + """Idempotent function to add the computer to the database.""" + created, computer = orm.Computer.collection.get_or_create( + label='localhost', + description='local computer with direct scheduler', + hostname='localhost', + workdir=str(work_directory.absolute()), + transport_type='core.local', + scheduler_type='core.direct', + ) + if created: + computer.store() + computer.set_minimum_job_poll_interval(0.0) + computer.set_default_mpiprocs_per_machine(cpu_count) + computer.configure() + return computer + + +def load_pw_code(computer, exec_path: pathlib.Path): + """Idempotent function to add the code to the database.""" + try: + code = orm.load_code('pw@localhost') + except: # pylint: disable=bare-except + code = orm.Code( + input_plugin_name='quantumespresso.pw', + remote_computer_exec=[computer, str(exec_path)], + ) + code.label = 'pw' + code.description = 'pw.x code on local computer' + code.set_prepend_text('export OMP_NUM_THREADS=1') + code.store() + return code + + +def load_hp_code(computer, exec_path: pathlib.Path): + """Idempotent function to add the code to the database.""" + try: + code = orm.load_code('hp@localhost') + except: # pylint: disable=bare-except + code = orm.Code( + input_plugin_name='quantumespresso.hp', + remote_computer_exec=[computer, str(exec_path)], + ) + code.label = 'hp' + code.description = 'hp.x code on local computer' + code.set_prepend_text('export OMP_NUM_THREADS=1') + code.store() + return code + + +def create_licoo_hubbard_structure(): + """Creates a LiCoO2 crystal structure with Hubbard parameters.""" + a, b, c, d = 1.40803, 0.81293, 4.68453, 1.62585 + cell = [[a, -b, c], [0.0, d, c], [-a, -b, c]] + sites = [ + ['Co', 'Co', (0, 0, 0)], + ['O', 'O', (0, 0, 3.6608)], + ['O', 'O', (0, 0, 10.392)], + ['Li', 'Li', (0, 0, 7.0268)], + ] + hubbard_structure = HubbardStructureData(cell=cell, sites=sites) + hubbard_structure.initialize_onsites_hubbard('Co', '3d') + hubbard_structure.initialize_intersites_hubbard('Co', '3d', 'O', '2p') + hubbard_structure.store() + return hubbard_structure + + +def load_sssp_pseudos(version='1.2', functional='PBEsol', protocol='efficiency'): + """Load the SSSP pseudopotentials.""" + config = SsspConfiguration(version, functional, protocol) + label = SsspFamily.format_configuration_label(config) + + try: + family = orm.Group.collection.get(label=label) + except: # pylint: disable=bare-except + pseudos = pathlib.Path(__file__).parent / 'sssp_pseudos' + pseudos.mkdir(exist_ok=True) + + filename = label.replace('/', '-') + + if not (pseudos / (filename + '.tar.gz')).exists(): + download_sssp(config, pseudos / (filename + '.tar.gz'), pseudos / (filename + '.json')) + + family = create_family_from_archive( + SsspFamily, + label, + pseudos / (filename + '.tar.gz'), + ) + family.set_cutoffs( + { + k: {i: v[i] for i in ['cutoff_wfc', 'cutoff_rho'] + } for k, v in json.loads((pseudos / (filename + '.json')).read_text()).items() + }, + 'normal', + unit='Ry', + ) + return family diff --git a/docs/source/reference/index.md b/docs/source/reference/index.md new file mode 100644 index 0000000..0f8c65e --- /dev/null +++ b/docs/source/reference/index.md @@ -0,0 +1,8 @@ +# Reference + +```{toctree} +:hidden: true +:maxdepth: 2 + +api/aiida_quantumespresso_hp/index +``` diff --git a/docs/source/topics/calculations/hp.md b/docs/source/topics/calculations/hp.md new file mode 100644 index 0000000..62c21de --- /dev/null +++ b/docs/source/topics/calculations/hp.md @@ -0,0 +1,8 @@ +(topics-calculations-hp)= + +# `hp.x` + +```{eval-rst} +.. aiida-calcjob:: HpCalculation + :module: aiida_quantumespresso_hp.calculations.hp +``` diff --git a/docs/source/topics/calculations/index.md b/docs/source/topics/calculations/index.md new file mode 100644 index 0000000..2402f22 --- /dev/null +++ b/docs/source/topics/calculations/index.md @@ -0,0 +1,9 @@ +(topics-calculations)= + +# Calculations + +```{toctree} +:maxdepth: 1 + +hp +``` diff --git a/docs/source/topics/index.md b/docs/source/topics/index.md new file mode 100644 index 0000000..7b69f59 --- /dev/null +++ b/docs/source/topics/index.md @@ -0,0 +1,8 @@ +# Topic guides + +```{toctree} +:maxdepth: 2 + +calculations/index +workflows/index +``` diff --git a/docs/source/topics/workflows/hp/base.md b/docs/source/topics/workflows/hp/base.md new file mode 100644 index 0000000..41c2e99 --- /dev/null +++ b/docs/source/topics/workflows/hp/base.md @@ -0,0 +1,8 @@ +(topics-workflows-hp-base)= + +# `HpBaseWorkChain` + +```{eval-rst} +.. aiida-workchain:: HpBaseWorkChain + :module: aiida_quantumespresso_hp.workflows.hp.base +``` diff --git a/docs/source/topics/workflows/hp/main.md b/docs/source/topics/workflows/hp/main.md new file mode 100644 index 0000000..a2a924c --- /dev/null +++ b/docs/source/topics/workflows/hp/main.md @@ -0,0 +1,8 @@ +(topics-workflows-hp-main)= + +# `HpWorkChain` + +```{eval-rst} +.. aiida-workchain:: HpWorkChain + :module: aiida_quantumespresso_hp.workflows.hp.main +``` diff --git a/docs/source/topics/workflows/hp/parallelize_atoms.md b/docs/source/topics/workflows/hp/parallelize_atoms.md new file mode 100644 index 0000000..94655c7 --- /dev/null +++ b/docs/source/topics/workflows/hp/parallelize_atoms.md @@ -0,0 +1,8 @@ +(topics-workflows-hp-atoms)= + +# `HpParallelizeAtomsWorkChain` + +```{eval-rst} +.. aiida-workchain:: HpParallelizeAtomsWorkChain + :module: aiida_quantumespresso_hp.workflows.hp.parallelize_atoms +``` diff --git a/docs/source/topics/workflows/hp/parallelize_qpoints.md b/docs/source/topics/workflows/hp/parallelize_qpoints.md new file mode 100644 index 0000000..92cc49a --- /dev/null +++ b/docs/source/topics/workflows/hp/parallelize_qpoints.md @@ -0,0 +1,8 @@ +(topics-workflows-hp-qpoints)= + +# `HpParallelizeQpointsWorkChain` + +```{eval-rst} +.. aiida-workchain:: HpParallelizeQpointsWorkChain + :module: aiida_quantumespresso_hp.workflows.hp.parallelize_qpoints +``` diff --git a/docs/source/topics/workflows/hubbard.md b/docs/source/topics/workflows/hubbard.md new file mode 100644 index 0000000..d819f88 --- /dev/null +++ b/docs/source/topics/workflows/hubbard.md @@ -0,0 +1,8 @@ +(topics-workflows-hubbard)= + +# `SelfConsistentHubbardWorkChain` + +```{eval-rst} +.. aiida-workchain:: SelfConsistentHubbardWorkChain + :module: aiida_quantumespresso_hp.workflows.hubbard +``` diff --git a/docs/source/topics/workflows/index.md b/docs/source/topics/workflows/index.md new file mode 100644 index 0000000..a2a399c --- /dev/null +++ b/docs/source/topics/workflows/index.md @@ -0,0 +1,13 @@ +(topics-workflows)= + +# Workflows + +```{toctree} +:maxdepth: 1 + +hp/base +hp/main +hp/parallelize_atoms +hp/parallelize_qpoints +hubbard +``` diff --git a/docs/source/tutorials/index.md b/docs/source/tutorials/index.md new file mode 100644 index 0000000..369067e --- /dev/null +++ b/docs/source/tutorials/index.md @@ -0,0 +1,33 @@ +(tutorials)= + +# Tutorials + +:::{important} +Before you get started, make sure that you have: + +- installed the `aiida-quantumespresso` package ([see instructions](installation-installation)) +- configured the `pw.x` *AND* the `hp.x` codes ([see instructions](installation-setup-code)) +- installed the SSSP pseudopotential family ([see instructions](installation-setup-pseudopotentials)) +::: + +In this section you will find some tutorials that you will guide you through how to use the aiida-quantumespresso-hp plugin, from **zero** to **hero**! We strongly recommend to start from the first one and work your way up with the other ones. + +Go to one of the tutorials! + +1. [Computing Hubbard parameters](../1_computing_hubbard.ipynb): get started with predicting the Hubbard parameters step by step, by using the _WorkChains_ of the aiida-quantumespresso(-hp) suite. +2. [Hubbard parameters in parallel](../2_parallel_hubbard.ipynb): learn the automated parallel calculation of Hubbard parameters to speed up your work. +3. [Self-consistent Hubbard parameters](../3_self_consistent.ipynb): compute self-consistently the Hubbard parameters in automated fashion. + +Here below the estimated time to run each tutorial (jupyter notebook): + +```{nb-exec-table} +``` + +```{toctree} +:maxdepth: 1 +:hidden: true + +../1_computing_hubbard.ipynb +../2_parallel_hubbard.ipynb +../3_self_consistent.ipynb +``` diff --git a/pyproject.toml b/pyproject.toml index 52eeb8b..be3d813 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -37,13 +37,15 @@ Documentation = 'https://aiida-quantumespresso-hp.readthedocs.io' [project.optional-dependencies] docs = [ - 'sphinx~=4.1', - 'sphinx-copybutton~=0.5.0', - 'sphinx-book-theme~=0.3.2', - 'sphinx-click~=4.0', - 'sphinx-design~=0.0.13', + 'myst-nb~=0.17', + 'jupytext>=1.11.2,<1.15.0', + 'sphinx-togglebutton', + 'sphinx~=5.2', + 'sphinx-copybutton~=0.5.2', + 'sphinx-book-theme~=1.0.1', + 'sphinx-design~=0.4.1', 'sphinxcontrib-details-directive~=0.1.0', - 'sphinx-autoapi' + 'sphinx-autoapi~=2.0.1', ] pre-commit = [ 'pre-commit~=2.17', diff --git a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py index 0504dfc..176c81e 100644 --- a/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py +++ b/src/aiida_quantumespresso_hp/calculations/functions/structure_relabel_kinds.py @@ -11,16 +11,20 @@ @calcfunction def structure_relabel_kinds( - hubbard_structure: HubbardStructureData, hubbard: Dict, magnetization: Dict | None = None + hubbard_structure: HubbardStructureData, + hubbard: Dict, + magnetization: Dict | None = None, ) -> Dict: """Create a clone of the given structure but with new kinds, based on the new hubbard sites. :param hubbard_structure: ``HubbardStructureData`` instance. :param hubbard: the ``hubbard`` output Dict node of a ``HpCalculation``. :param magnetization: Dict instance containing the `starting_magnetization` QuantumESPRESSO inputs. - :returns: dict with keys: + :return: dict with keys: + * ``hubbard_structure``: relabelled ``HubbardStructureData`` - * ``starting_magnetization``: updated magnetization as :class:`aiida.orm.Dict` (if provided in inputs) + * ``starting_magnetization``: updated magnetization as :class:`~aiida.orm.Dict` (if provided in inputs) + """ relabeled = hubbard_structure.clone() relabeled.clear_kinds() @@ -84,7 +88,7 @@ def get_relabelled_symbol(symbol: str, counter: int) -> str: :param counter: a integer to assing the new label. Up to 9 an interger is appended, while an *ascii uppercase letter* is used. Lower cases are discarded to avoid possible misleading names - returns: a 3 digit length symbol (QuantumESPRESSO allows only up to 3) + :return: a 3 digit length symbol (QuantumESPRESSO allows only up to 3) """ from string import ascii_uppercase, digits suffix = (digits + ascii_uppercase)[counter] diff --git a/src/aiida_quantumespresso_hp/workflows/hubbard.py b/src/aiida_quantumespresso_hp/workflows/hubbard.py index 7ad2b81..8ffc57d 100644 --- a/src/aiida_quantumespresso_hp/workflows/hubbard.py +++ b/src/aiida_quantumespresso_hp/workflows/hubbard.py @@ -25,11 +25,13 @@ HpWorkChain = WorkflowFactory('quantumespresso.hp.main') -def get_separated_parameters(hubbard_parameters: list(tuple(int, str, int, str, float, tuple(int, int, int), - str))) -> tuple[list, list]: +def get_separated_parameters( + hubbard_parameters: list[tuple[int, str, int, str, float, tuple[int, int, int], str]] +) -> tuple[list, list]: """Return a tuple with onsites and intersites parameters separated. - :return: tuple (onsites, intersites).""" + :return: tuple (list of onsites, list of intersites). + """ onsites = [] intersites = [] @@ -57,27 +59,31 @@ def validate_inputs(inputs, _): class SelfConsistentHubbardWorkChain(WorkChain, ProtocolMixin): - """Workchain that for a given input structure will compute the self-consistent Hubbard parameters - by iteratively relaxing the structure (optional) with the ``PwRelaxWorkChain`` and computing the Hubbard - parameters through the ``HpWorkChain``, after an scf performed via the ``PwBaseWorkChain``, + """Workchain computing the self-consistent Hubbard parameters of a structure. + + It iteratively relaxes the structure (optional) with the ``PwRelaxWorkChain`` + and computes the Hubbard parameters through the ``HpWorkChain``, + using the remote folder of an scf performed via the ``PwBaseWorkChain``, until the Hubbard values are converged within certain tolerance(s). The procedure in each step of the convergence cycle is slightly different depending on the electronic and magnetic properties of the system. Each cycle will roughly consist of three steps: - * Relaxing the structure at the current Hubbard values (optional) - * One or two SCF calculations depending whether the system is metallic or insulating, respectively - * A self-consistent calculation of the Hubbard parameters, restarted from the last SCF run + * Relaxing the structure at the current Hubbard values (optional). + * One or two SCF calculations depending whether the system is metallic or insulating, respectively. + * A self-consistent calculation of the Hubbard parameters, restarted from the last SCF run. The possible options for the set of SCF calculations that have to be run in the second step look are: - * Metals: - - SCF with smearing + * Metals: + + - SCF with smearing. - * Insulators - - SCF with smearing - - SCF with fixed occupations; if magnetic, total magnetization and number of bands - are fixed to the values found from the previous SCF calculation + * Insulators + + - SCF with smearing. + - SCF with fixed occupations; if magnetic, total magnetization and number of bands + are fixed to the values found from the previous SCF calculation. When convergence is achieved a node will be returned containing the final converged :class:`~aiida_quantumespresso.data.hubbard_structure.HubbardStructureData`. @@ -93,6 +99,7 @@ class SelfConsistentHubbardWorkChain(WorkChain, ProtocolMixin): @classmethod def define(cls, spec): + """Define the specifications of the process.""" super().define(spec) spec.input('hubbard_structure', valid_type=HubbardStructureData) @@ -400,7 +407,8 @@ def get_inputs(self, cls, namespace): def set_pw_parameters(self, inputs): """Set the input parameters for a generic `quantumespresso.pw` calculation. - :param inputs: AttributeDict of a ``PwBaseWorkChain`` builder input.""" + :param inputs: AttributeDict of a ``PwBaseWorkChain`` builder input. + """ parameters = inputs.pw.parameters.get_dict() parameters.setdefault('CONTROL', {}) parameters.setdefault('SYSTEM', {}) @@ -459,8 +467,11 @@ def inspect_relax(self): self.ctx.current_hubbard_structure = workchain.outputs.output_structure def run_scf_smearing(self): - """Run an scf `PwBaseWorkChain` with smeared occupations, always needed since we do not - a priori whether the material will be metallic or insulating.""" + """Run an scf `PwBaseWorkChain` with smeared occupations. + + This step is always needed since we do not a priori whether + the material will be metallic or insulating. + """ inputs = self.get_inputs(PwBaseWorkChain, 'scf') parameters = inputs.pw.parameters parameters['CONTROL']['calculation'] = 'scf' @@ -479,8 +490,8 @@ def run_scf_smearing(self): return ToContext(workchains_scf=append_(running)) def run_scf_fixed(self): - """ - Run an scf `PwBaseWorkChain` with fixed occupations on top of the previous calculation. + """Run an scf `PwBaseWorkChain` with fixed occupations on top of the previous calculation. + The nunmber of bands and total magnetization (if magnetic) are set according to those of the previous calculation that was run with smeared occupations. diff --git a/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml b/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml index 9f594ea..0e2a402 100644 --- a/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml +++ b/src/aiida_quantumespresso_hp/workflows/protocols/hp/base.yaml @@ -24,21 +24,21 @@ protocols: description: 'Protocol to perform the computation at normal precision at moderate computational cost.' precise: description: 'Protocol to perform the computation at high precision at higher computational cost.' - qpoints: - - 3 - - 3 - - 3 hp: parameters: INPUTHP: conv_thr_chi: 1.e-8 + qpoints: + - 3 + - 3 + - 3 fast: description: 'Protocol to perform the computation at low precision at minimal computational cost for testing purposes.' - qpoints: - - 1 - - 1 - - 1 hp: parameters: INPUTHP: conv_thr_chi: 1.e-4 + qpoints: + - 1 + - 1 + - 1