diff --git a/.github/workflows/ci-style.yml b/.github/workflows/ci-style.yml
index 6454e4bffe..064359bca5 100644
--- a/.github/workflows/ci-style.yml
+++ b/.github/workflows/ci-style.yml
@@ -16,18 +16,12 @@ jobs:
steps:
- uses: actions/checkout@v4
- - name: Install system dependencies
- # note libkrb5-dev is required as a dependency for the gssapi pip install
- run: |
- sudo apt update
- sudo apt install libkrb5-dev ruby ruby-dev
-
- name: Install python dependencies
uses: ./.github/actions/install-aiida-core
with:
- python-version: '3.10'
+ python-version: '3.11'
extras: '[pre-commit]'
- from-requirements: 'true'
+ from-requirements: 'false'
- name: Run pre-commit
run: pre-commit run --all-files || ( git status --short ; git diff ; exit 1 )
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index c212286b26..0fe70299b0 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -49,6 +49,10 @@ jobs:
ports:
- 5672:5672
- 15672:15672
+ slurm:
+ image: xenonmiddleware/slurm:17
+ ports:
+ - 5001:22
steps:
- uses: actions/checkout@v4
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 39091b2805..42cfd9584d 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -6,7 +6,7 @@ ci:
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.5.0
+ rev: v4.6.0
hooks:
- id: check-merge-conflict
- id: check-yaml
@@ -37,7 +37,7 @@ repos:
args: [--line-length=120, --fail-on-change]
- repo: https://github.com/astral-sh/ruff-pre-commit
- rev: v0.3.5
+ rev: v0.4.1
hooks:
- id: ruff-format
exclude: &exclude_ruff >
@@ -50,7 +50,7 @@ repos:
args: [--fix, --exit-non-zero-on-fix, --show-fixes]
- repo: https://github.com/macisamuele/language-formatters-pre-commit-hooks
- rev: v2.12.0
+ rev: v2.13.0
hooks:
- id: pretty-format-toml
args: [--autofix]
diff --git a/docs/source/_static/cheatsheet_h.pdf b/docs/source/_static/cheatsheet_h.pdf
new file mode 100644
index 0000000000..a5f41bb55d
Binary files /dev/null and b/docs/source/_static/cheatsheet_h.pdf differ
diff --git a/docs/source/intro/_cheatsheet/cheatsheet.png b/docs/source/intro/_cheatsheet/cheatsheet.png
new file mode 100644
index 0000000000..472ff62763
Binary files /dev/null and b/docs/source/intro/_cheatsheet/cheatsheet.png differ
diff --git a/docs/source/intro/_cheatsheet/cheatsheet.svg b/docs/source/intro/_cheatsheet/cheatsheet.svg
new file mode 100644
index 0000000000..49b2702f07
--- /dev/null
+++ b/docs/source/intro/_cheatsheet/cheatsheet.svg
@@ -0,0 +1,20713 @@
+
+
+
+
diff --git a/docs/source/intro/_cheatsheet/cheatsheet_v.pdf b/docs/source/intro/_cheatsheet/cheatsheet_v.pdf
new file mode 100644
index 0000000000..ddb1e19076
Binary files /dev/null and b/docs/source/intro/_cheatsheet/cheatsheet_v.pdf differ
diff --git a/docs/source/intro/cheatsheet.rst b/docs/source/intro/cheatsheet.rst
new file mode 100644
index 0000000000..f91c7cc698
--- /dev/null
+++ b/docs/source/intro/cheatsheet.rst
@@ -0,0 +1,16 @@
+.. _intro:cheatsheet:
+
+=====================
+The AiiDA cheat sheet
+=====================
+
+The AiiDA cheat sheet gives a broad overview of the most commonly used `verdi` commands, the inheritance hierarchy of the main AiiDA classes, their attributes and methods, as well as a showcase of the `QueryBuilder`.
+
+When clicking on the embedded image, the pdf version will be opened in the browser. Where applicable, text elements contain hyperlinks to the relevant sections of the documentation.
+
+The file can also be :download:`downloaded <_cheatsheet/cheatsheet_v.pdf>` in two-page layout for printing.
+
+Happy exploring!
+
+.. image:: ./_cheatsheet/cheatsheet.png
+ :target: ../_static/cheatsheet_h.pdf
diff --git a/docs/source/intro/index.rst b/docs/source/intro/index.rst
index 571dde9daf..779def1ae1 100644
--- a/docs/source/intro/index.rst
+++ b/docs/source/intro/index.rst
@@ -31,5 +31,5 @@ See also the `list of AiiDA-powered scientific publications
+ cheatsheet
troubleshooting
diff --git a/docs/source/reference/command_line.rst b/docs/source/reference/command_line.rst
index d15c3b3ce4..6822df9f0f 100644
--- a/docs/source/reference/command_line.rst
+++ b/docs/source/reference/command_line.rst
@@ -313,6 +313,44 @@ Below is a list with all available subcommands.
list Display a list of all available plugins.
+.. _reference:command-line:verdi-presto:
+
+``verdi presto``
+----------------
+
+.. code:: console
+
+ Usage: [OPTIONS]
+
+ Set up a new profile in a jiffy.
+
+ This command aims to make setting up a new profile as easy as possible. It intentionally
+ provides only a limited amount of options to customize the profile and by default does
+ not require any options to be specified at all. For full control, please use `verdi
+ profile setup`.
+
+ After running `verdi presto` you can immediately start using AiiDA without additional
+ setup. The created profile uses the `core.sqlite_dos` storage plugin which does not
+ require any services, such as PostgreSQL. The broker service RabbitMQ is also optional.
+ The command tries to connect to it using default settings and configures it for the
+ profile if found. Otherwise, the profile is created without a broker, in which case some
+ functionality will be unavailable, most notably running the daemon and submitting
+ processes to said daemon.
+
+ The command performs the following actions:
+
+ * Create a new profile that is set as the new default
+ * Create a default user for the profile (email can be configured through the `--email` option)
+ * Set up the localhost as a `Computer` and configure it
+ * Set a number of configuration options with sensible defaults
+
+ Options:
+ --profile-name TEXT Name of the profile. By default, a unique name starting with
+ `presto` is automatically generated. [default: (dynamic)]
+ --email TEXT Email of the default user. [default: aiida@localhost]
+ --help Show this message and exit.
+
+
.. _reference:command-line:verdi-process:
``verdi process``
diff --git a/pyproject.toml b/pyproject.toml
index 4d8543b6e2..dce5a30b7c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -210,6 +210,7 @@ notebook = [
'notebook~=6.1,>=6.1.5'
]
pre-commit = [
+ 'aiida-core[atomic_tools,rest,tests,tui]',
'mypy~=1.7.1',
'packaging~=23.0',
'pre-commit~=2.2',
@@ -411,6 +412,7 @@ select = [
[tool.tox]
legacy_tox_ini = """
[tox]
+minversion = 3.18.0
envlist = py39
[testenv]
@@ -443,7 +445,7 @@ passenv = RUN_APIDOC
setenv =
update: RUN_APIDOC = False
changedir = docs
-whitelist_externals = make
+allowlist_externals = make
commands =
clean: make clean
make debug
diff --git a/src/aiida/brokers/rabbitmq/broker.py b/src/aiida/brokers/rabbitmq/broker.py
index 9916d1398d..dab19e28b7 100644
--- a/src/aiida/brokers/rabbitmq/broker.py
+++ b/src/aiida/brokers/rabbitmq/broker.py
@@ -5,8 +5,6 @@
import functools
import typing as t
-from packaging.version import parse
-
from aiida.brokers.broker import Broker
from aiida.common.log import AIIDA_LOGGER
from aiida.manage.configuration import get_config_option
@@ -110,6 +108,8 @@ def is_rabbitmq_version_supported(self) -> bool:
:return: boolean whether the current RabbitMQ version is supported.
"""
+ from packaging.version import parse
+
return parse('3.6.0') <= self.get_rabbitmq_version() < parse('3.8.15')
def get_rabbitmq_version(self):
@@ -117,4 +117,6 @@ def get_rabbitmq_version(self):
:return: :class:`packaging.version.Version`
"""
+ from packaging.version import parse
+
return parse(self.get_communicator().server_properties['version'].decode('utf-8'))
diff --git a/src/aiida/brokers/rabbitmq/defaults.py b/src/aiida/brokers/rabbitmq/defaults.py
index 6e3684ae0e..c1e4daa5b8 100644
--- a/src/aiida/brokers/rabbitmq/defaults.py
+++ b/src/aiida/brokers/rabbitmq/defaults.py
@@ -1,5 +1,9 @@
"""Defaults related to RabbitMQ."""
+from __future__ import annotations
+
+import typing as t
+
from aiida.common.extendeddicts import AttributeDict
__all__ = ('BROKER_DEFAULTS',)
@@ -19,3 +23,20 @@
'heartbeat': 600,
}
)
+
+
+def detect_rabbitmq_config() -> dict[str, t.Any] | None:
+ """Try to connect to a RabbitMQ server with the default connection parameters.
+
+ :returns: The connection parameters if the RabbitMQ server was successfully connected to, or ``None`` otherwise.
+ """
+ from kiwipy.rmq.threadcomms import connect
+
+ connection_params = dict(BROKER_DEFAULTS)
+
+ try:
+ connect(connection_params=connection_params)
+ except ConnectionError:
+ return None
+
+ return connection_params
diff --git a/src/aiida/cmdline/commands/__init__.py b/src/aiida/cmdline/commands/__init__.py
index 4be9693761..79f9be05af 100644
--- a/src/aiida/cmdline/commands/__init__.py
+++ b/src/aiida/cmdline/commands/__init__.py
@@ -26,6 +26,7 @@
cmd_help,
cmd_node,
cmd_plugin,
+ cmd_presto,
cmd_process,
cmd_profile,
cmd_rabbitmq,
diff --git a/src/aiida/cmdline/commands/cmd_presto.py b/src/aiida/cmdline/commands/cmd_presto.py
new file mode 100644
index 0000000000..a887a1a42b
--- /dev/null
+++ b/src/aiida/cmdline/commands/cmd_presto.py
@@ -0,0 +1,133 @@
+###########################################################################
+# Copyright (c), The AiiDA team. All rights reserved. #
+# This file is part of the AiiDA code. #
+# #
+# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
+# For further information on the license, see the LICENSE.txt file #
+# For further information please visit http://www.aiida.net #
+###########################################################################
+"""``verdi presto`` command."""
+
+from __future__ import annotations
+
+import pathlib
+import re
+import typing as t
+
+import click
+
+from aiida.cmdline.commands.cmd_verdi import verdi
+from aiida.cmdline.utils import echo
+from aiida.manage.configuration import get_config_option
+
+DEFAULT_PROFILE_NAME_PREFIX: str = 'presto'
+
+
+def get_default_presto_profile_name():
+ from aiida.manage import get_config
+
+ profile_names = get_config().profile_names
+ indices = []
+
+ for profile_name in profile_names:
+ if match := re.search(r'presto[-]?(\d+)?', profile_name):
+ indices.append(match.group(1) or '0')
+
+ if not indices:
+ return DEFAULT_PROFILE_NAME_PREFIX
+
+ last_index = int(sorted(indices)[-1])
+
+ return f'{DEFAULT_PROFILE_NAME_PREFIX}-{last_index + 1}'
+
+
+@verdi.command('presto')
+@click.option(
+ '--profile-name',
+ default=lambda: get_default_presto_profile_name(),
+ show_default=True,
+ help=f'Name of the profile. By default, a unique name starting with `{DEFAULT_PROFILE_NAME_PREFIX}` is '
+ 'automatically generated.',
+)
+@click.option(
+ '--email',
+ default=get_config_option('autofill.user.email') or 'aiida@localhost',
+ show_default=True,
+ help='Email of the default user.',
+)
+@click.pass_context
+def verdi_presto(ctx, profile_name, email):
+ """Set up a new profile in a jiffy.
+
+ This command aims to make setting up a new profile as easy as possible. It intentionally provides only a limited
+ amount of options to customize the profile and by default does not require any options to be specified at all. For
+ full control, please use `verdi profile setup`.
+
+ After running `verdi presto` you can immediately start using AiiDA without additional setup. The created profile
+ uses the `core.sqlite_dos` storage plugin which does not require any services, such as PostgreSQL. The broker
+ service RabbitMQ is also optional. The command tries to connect to it using default settings and configures it for
+ the profile if found. Otherwise, the profile is created without a broker, in which case some functionality will be
+ unavailable, most notably running the daemon and submitting processes to said daemon.
+
+ The command performs the following actions:
+
+ \b
+ * Create a new profile that is set as the new default
+ * Create a default user for the profile (email can be configured through the `--email` option)
+ * Set up the localhost as a `Computer` and configure it
+ * Set a number of configuration options with sensible defaults
+
+ """
+ from aiida.brokers.rabbitmq.defaults import detect_rabbitmq_config
+ from aiida.common import exceptions
+ from aiida.manage.configuration import create_profile, load_profile
+ from aiida.orm import Computer
+
+ storage_config: dict[str, t.Any] = {}
+ storage_backend = 'core.sqlite_dos'
+
+ broker_config = detect_rabbitmq_config()
+ broker_backend = 'core.rabbitmq' if broker_config is not None else None
+
+ if broker_config is None:
+ echo.echo_report('RabbitMQ server not found: configuring the profile without a broker.')
+ else:
+ echo.echo_report('RabbitMQ server detected: configuring the profile with a broker.')
+
+ try:
+ profile = create_profile(
+ ctx.obj.config,
+ name=profile_name,
+ email=email,
+ storage_backend=storage_backend,
+ storage_config=storage_config,
+ broker_backend=broker_backend,
+ broker_config=broker_config,
+ )
+ except (ValueError, TypeError, exceptions.EntryPointError, exceptions.StorageMigrationError) as exception:
+ echo.echo_critical(str(exception))
+
+ echo.echo_success(f'Created new profile `{profile.name}`.')
+
+ ctx.obj.config.set_option('runner.poll.interval', 1, scope=profile.name)
+ ctx.obj.config.set_default_profile(profile.name, overwrite=True)
+ ctx.obj.config.store()
+
+ load_profile(profile.name, allow_switch=True)
+ echo.echo_info(f'Loaded newly created profile `{profile.name}`.')
+
+ filepath_scratch = pathlib.Path(ctx.obj.config.dirpath) / 'scratch' / profile.name
+
+ computer = Computer(
+ label='localhost',
+ hostname='localhost',
+ description='Localhost automatically created by `verdi presto`',
+ transport_type='core.local',
+ scheduler_type='core.direct',
+ workdir=str(filepath_scratch),
+ ).store()
+ computer.configure(safe_interval=0)
+ computer.set_minimum_job_poll_interval(1)
+ computer.set_default_mpiprocs_per_machine(1)
+
+ echo.echo_success('Configured the localhost as a computer.')
diff --git a/src/aiida/cmdline/commands/cmd_profile.py b/src/aiida/cmdline/commands/cmd_profile.py
index e84a6aab4a..0d22b9025b 100644
--- a/src/aiida/cmdline/commands/cmd_profile.py
+++ b/src/aiida/cmdline/commands/cmd_profile.py
@@ -116,7 +116,10 @@ def profile_list():
echo.echo_report(f'configuration folder: {config.dirpath}')
if not config.profiles:
- echo.echo_warning('no profiles configured: run `verdi setup` to create one')
+ echo.echo_warning(
+ 'no profiles configured: Run `verdi presto` to automatically setup a profile using all defaults or use '
+ '`verdi profile setup` for more control.'
+ )
else:
sort = lambda profile: profile.name # noqa: E731
highlight = lambda profile: profile.name == config.default_profile_name # noqa: E731
diff --git a/src/aiida/cmdline/commands/cmd_status.py b/src/aiida/cmdline/commands/cmd_status.py
index 7317e334fc..dc4521af02 100644
--- a/src/aiida/cmdline/commands/cmd_status.py
+++ b/src/aiida/cmdline/commands/cmd_status.py
@@ -75,7 +75,10 @@ def verdi_status(print_traceback, no_rmq):
if profile is None:
print_status(ServiceStatus.WARNING, 'profile', 'no profile configured yet')
- echo.echo_report('Configure a profile by running `verdi quicksetup` or `verdi setup`.')
+ echo.echo_report(
+ 'Run `verdi presto` to automatically setup a profile using all defaults or use `verdi profile setup` '
+ 'for more control.'
+ )
return
print_status(ServiceStatus.UP, 'profile', profile.name)
diff --git a/src/aiida/cmdline/params/types/path.py b/src/aiida/cmdline/params/types/path.py
index f52acd82c6..7332b98a70 100644
--- a/src/aiida/cmdline/params/types/path.py
+++ b/src/aiida/cmdline/params/types/path.py
@@ -9,6 +9,7 @@
"""Click parameter types for paths."""
import os
+import pathlib
import click
@@ -60,13 +61,42 @@ def __repr__(self):
return 'ABSOLUTEPATHEMPTY'
-class PathOrUrl(click.Path):
- """Extension of click's Path-type to include URLs.
+def convert_possible_url(value: str, timeout: int):
+ """If ``value`` does not correspond to a path on disk, try to open it as a URL.
+
+ :param value: Potential path to file on disk or URL.
+ :param timeout: The timeout in seconds when opening the URL.
+ :param return_handle: Return the ``value`` as is. When set to ``True`` return an open file handle instead.
+ :returns: The URL if ``value`` could be opened as a URL
+ """
+ import socket
+ import urllib.error
+ import urllib.request
+
+ filepath = pathlib.Path(value)
+
+ # Check whether the path actually corresponds to a file on disk, in which case the exception is reraised.
+ if filepath.exists():
+ raise click.BadParameter(f'The path `{value}` exists but could not be read.')
- A PathOrUrl can either be a `click.Path`-type or a URL.
+ try:
+ return urllib.request.urlopen(value, timeout=timeout)
+ except urllib.error.URLError:
+ raise click.BadParameter(f'The URL `{value}` could not be reached.')
+ except socket.timeout:
+ raise click.BadParameter(f'The URL `{value}` could not be reached within {timeout} seconds.')
+ except ValueError as exception_url:
+ raise click.BadParameter(
+ f'The path `{value}` does not correspond to a file and also could not be reached as a URL.\n'
+ 'Please check the spelling for typos and if it is a URL, make sure to include the protocol, e.g., http://'
+ ) from exception_url
+
+
+class PathOrUrl(click.Path):
+ """Parameter type that accepts a path on the local file system or a URL.
- :param int timeout_seconds: Maximum timeout accepted for URL response.
- Must be an integer in the range [0;60].
+ :param timeout_seconds: Maximum timeout accepted for URL response. Must be an integer in the range [0;60].
+ :returns: The path or URL.
"""
name = 'PathOrUrl'
@@ -77,34 +107,18 @@ def __init__(self, timeout_seconds=URL_TIMEOUT_SECONDS, **kwargs):
self.timeout_seconds = check_timeout_seconds(timeout_seconds)
def convert(self, value, param, ctx):
- """Overwrite `convert` Check first if `click.Path`-type, then check if URL."""
try:
return super().convert(value, param, ctx)
except click.exceptions.BadParameter:
- return self.checks_url(value, param, ctx)
-
- def checks_url(self, url, param, ctx):
- """Check whether URL is reachable within timeout."""
- import socket
- import urllib.error
- import urllib.request
-
- try:
- with urllib.request.urlopen(url, timeout=self.timeout_seconds):
- pass
- except (urllib.error.URLError, urllib.error.HTTPError, socket.timeout):
- self.fail(f'{self.name} "{url}" could not be reached within {self.timeout_seconds} s.\n', param, ctx)
-
- return url
+ convert_possible_url(value, self.timeout_seconds)
+ return value
class FileOrUrl(click.File):
- """Extension of click's File-type to include URLs.
+ """Parameter type that accepts a path on the local file system or a URL.
- Returns handle either to local file or to remote file fetched from URL.
-
- :param int timeout_seconds: Maximum timeout accepted for URL response.
- Must be an integer in the range [0;60].
+ :param timeout_seconds: Maximum timeout accepted for URL response. Must be an integer in the range [0;60].
+ :returns: The file or URL.
"""
name = 'FileOrUrl'
@@ -115,20 +129,7 @@ def __init__(self, timeout_seconds=URL_TIMEOUT_SECONDS, **kwargs):
self.timeout_seconds = check_timeout_seconds(timeout_seconds)
def convert(self, value, param, ctx):
- """Return file handle."""
try:
return super().convert(value, param, ctx)
except click.exceptions.BadParameter:
- handle = self.get_url(value, param, ctx)
- return handle
-
- def get_url(self, url, param, ctx):
- """Retrieve file from URL."""
- import socket
- import urllib.error
- import urllib.request
-
- try:
- return urllib.request.urlopen(url, timeout=self.timeout_seconds)
- except (urllib.error.URLError, urllib.error.HTTPError, socket.timeout):
- self.fail(f'{self.name} "{url}" could not be reached within {self.timeout_seconds} s.\n', param, ctx)
+ return convert_possible_url(value, self.timeout_seconds)
diff --git a/src/aiida/manage/configuration/__init__.py b/src/aiida/manage/configuration/__init__.py
index 119311594a..7227281507 100644
--- a/src/aiida/manage/configuration/__init__.py
+++ b/src/aiida/manage/configuration/__init__.py
@@ -188,8 +188,7 @@ def profile_context(profile: 'Profile' | str | None = None, allow_switch=False)
manager = get_manager()
current_profile = manager.get_profile()
- manager.load_profile(profile, allow_switch)
- yield profile
+ yield manager.load_profile(profile, allow_switch)
if current_profile is None:
manager.unload_profile()
else:
@@ -234,7 +233,7 @@ def create_default_user(
if user:
manager.set_default_user_email(profile, user.email)
- return
+ return user
def create_profile(
diff --git a/src/aiida/orm/implementation/storage_backend.py b/src/aiida/orm/implementation/storage_backend.py
index 10a0c96875..6137508f51 100644
--- a/src/aiida/orm/implementation/storage_backend.py
+++ b/src/aiida/orm/implementation/storage_backend.py
@@ -405,9 +405,18 @@ def backup(
:raises StorageBackupError: If an error occurred during the backup procedure.
:raises NotImplementedError: If the storage backend doesn't implement a backup procedure.
"""
+ from aiida.common.exceptions import LockedProfileError, StorageBackupError
from aiida.manage.configuration.settings import DEFAULT_CONFIG_FILE_NAME
+ from aiida.manage.profile_access import ProfileAccessManager
from aiida.storage.log import STORAGE_LOGGER
+ # check that the AiiDA profile is not locked and request access for the duration of this backup process
+ # (locked means that possibly a maintenance operation is running that could interfere with the backup)
+ try:
+ ProfileAccessManager(self._profile).request_access()
+ except LockedProfileError as exc:
+ raise StorageBackupError(f'{self._profile} is locked!') from exc
+
backup_manager = self._validate_or_init_backup_folder(dest, keep)
try:
diff --git a/src/aiida/orm/nodes/data/array/bands.py b/src/aiida/orm/nodes/data/array/bands.py
index b31f0f44d2..a33f104111 100644
--- a/src/aiida/orm/nodes/data/array/bands.py
+++ b/src/aiida/orm/nodes/data/array/bands.py
@@ -1033,20 +1033,17 @@ def _prepare_gnuplot(
script.append(f'set ylabel "Dispersion ({self.units})"')
if title:
- script.append('set title "{}"'.format(title.replace('"', '"')))
+ script.append(f'set title "{title}"')
# Plot, escaping filename
+ filename = os.path.basename(dat_filename)
if len(x) > 1:
script.append(f'set xrange [{x_min_lim}:{x_max_lim}]')
script.append('set grid xtics lt 1 lc rgb "#888888"')
- script.append('plot "{}" with l lc rgb "#000000"'.format(os.path.basename(dat_filename).replace('"', '"')))
+ script.append(f'plot "{filename}" with l lc rgb "#000000"')
else:
script.append('set xrange [-1.0:1.0]')
- script.append(
- 'plot "{}" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb "#000000"'.format(
- os.path.basename(dat_filename).replace('"', '"')
- )
- )
+ script.append(f'plot "{filename}" using ($1-0.25):($2):(0.5):(0) with vectors nohead lc rgb "#000000"')
script_data = '\n'.join(script) + '\n'
extra_files = {dat_filename: raw_data}
diff --git a/src/aiida/orm/nodes/data/code/abstract.py b/src/aiida/orm/nodes/data/code/abstract.py
index ef8aef920b..204312bdd8 100644
--- a/src/aiida/orm/nodes/data/code/abstract.py
+++ b/src/aiida/orm/nodes/data/code/abstract.py
@@ -45,7 +45,7 @@ class AbstractCode(Data, metaclass=abc.ABCMeta):
_KEY_ATTRIBUTE_WRAP_CMDLINE_PARAMS: str = 'wrap_cmdline_params'
_KEY_EXTRA_IS_HIDDEN: str = 'hidden' # Should become ``is_hidden`` once ``Code`` is dropped
- class Model(BaseModel):
+ class Model(BaseModel, defer_build=True):
"""Model describing required information to create an instance."""
label: str = MetadataField(
diff --git a/src/aiida/orm/nodes/data/folder.py b/src/aiida/orm/nodes/data/folder.py
index 86787e6c1d..c0d385c961 100644
--- a/src/aiida/orm/nodes/data/folder.py
+++ b/src/aiida/orm/nodes/data/folder.py
@@ -15,10 +15,12 @@
import pathlib
import typing as t
-from aiida.repository import File
-
from .data import Data
+if t.TYPE_CHECKING:
+ from aiida.repository import File
+
+
__all__ = ('FolderData',)
FilePath = t.Union[str, pathlib.PurePosixPath]
diff --git a/src/aiida/orm/nodes/node.py b/src/aiida/orm/nodes/node.py
index e678c5afd8..442d3624e9 100644
--- a/src/aiida/orm/nodes/node.py
+++ b/src/aiida/orm/nodes/node.py
@@ -38,13 +38,13 @@
from .caching import NodeCaching
from .comments import NodeComments
from .links import NodeLinks
-from .repository import NodeRepository
if TYPE_CHECKING:
from importlib_metadata import EntryPoint
from ..implementation import StorageBackend
from ..implementation.nodes import BackendNode # noqa: F401
+ from .repository import NodeRepository
__all__ = ('Node',)
@@ -107,6 +107,8 @@ def __init__(self, node: 'Node') -> None:
@cached_property
def repository(self) -> 'NodeRepository':
"""Return the repository for this node."""
+ from .repository import NodeRepository
+
return NodeRepository(self._node)
@cached_property
diff --git a/src/aiida/orm/nodes/repository.py b/src/aiida/orm/nodes/repository.py
index a886c896cf..bc24fe1377 100644
--- a/src/aiida/orm/nodes/repository.py
+++ b/src/aiida/orm/nodes/repository.py
@@ -12,10 +12,10 @@
from aiida.common import exceptions
from aiida.manage import get_config_option
-from aiida.repository import File, Repository
-from aiida.repository.backend import SandboxRepositoryBackend
if t.TYPE_CHECKING:
+ from aiida.repository import File, Repository
+
from .node import Node
__all__ = ('NodeRepository',)
@@ -77,6 +77,9 @@ def _repository(self) -> Repository:
:return: the file repository instance.
"""
+ from aiida.repository import Repository
+ from aiida.repository.backend import SandboxRepositoryBackend
+
if self._repository_instance is None:
if self._node.is_stored:
backend = self._node.backend.get_repository()
@@ -100,6 +103,9 @@ def _repository(self, repository: Repository) -> None:
def _store(self) -> None:
"""Store the repository in the backend."""
+ from aiida.repository import Repository
+ from aiida.repository.backend import SandboxRepositoryBackend
+
if isinstance(self._repository.backend, SandboxRepositoryBackend):
# Only if the backend repository is a sandbox do we have to clone its contents to the permanent repository.
repository_backend = self._node.backend.get_repository()
diff --git a/src/aiida/storage/psql_dos/backend.py b/src/aiida/storage/psql_dos/backend.py
index 2431f456dd..82c92f1bfe 100644
--- a/src/aiida/storage/psql_dos/backend.py
+++ b/src/aiida/storage/psql_dos/backend.py
@@ -74,7 +74,7 @@ class PsqlDosBackend(StorageBackend):
The `django` backend was removed, to consolidate access to this storage.
"""
- class Model(BaseModel):
+ class Model(BaseModel, defer_build=True):
"""Model describing required information to configure an instance of the storage."""
database_engine: str = Field(
@@ -506,8 +506,6 @@ def _backup_storage(
import subprocess
import tempfile
- from aiida.manage.profile_access import ProfileAccessManager
-
STORAGE_LOGGER.report('Starting backup...')
# This command calls `rsync` and `pg_dump` executables. check that they are in PATH
@@ -518,13 +516,6 @@ def _backup_storage(
cfg = self._profile.storage_config
container = Container(get_filepath_container(self.profile))
- # check that the AiiDA profile is not locked and request access for the duration of this backup process
- # (locked means that possibly a maintenance operation is running that could interfere with the backup)
- try:
- ProfileAccessManager(self._profile).request_access()
- except exceptions.LockedProfileError as exc:
- raise exceptions.StorageBackupError('The profile is locked!') from exc
-
# step 1: first run the storage maintenance version that can safely be performed while aiida is running
STORAGE_LOGGER.report('Running basic maintenance...')
self.maintain(full=False, compress=False)
diff --git a/src/aiida/storage/sqlite_dos/backend.py b/src/aiida/storage/sqlite_dos/backend.py
index 890e082914..21195d2475 100644
--- a/src/aiida/storage/sqlite_dos/backend.py
+++ b/src/aiida/storage/sqlite_dos/backend.py
@@ -10,17 +10,18 @@
from __future__ import annotations
-from functools import cached_property
+from functools import cached_property, lru_cache
from pathlib import Path
from shutil import rmtree
from typing import TYPE_CHECKING, Optional
from uuid import uuid4
-from disk_objectstore import Container
+from disk_objectstore import Container, backup_utils
from pydantic import BaseModel, Field, field_validator
from sqlalchemy import insert
from sqlalchemy.orm import scoped_session, sessionmaker
+from aiida.common import exceptions
from aiida.common.log import AIIDA_LOGGER
from aiida.manage import Profile
from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER
@@ -34,11 +35,14 @@
from ..psql_dos.migrator import REPOSITORY_UUID_KEY, PsqlDosMigrator
if TYPE_CHECKING:
+ from aiida.orm.entities import EntityTypes
from aiida.repository.backend import DiskObjectStoreRepositoryBackend
__all__ = ('SqliteDosStorage',)
LOGGER = AIIDA_LOGGER.getChild(__file__)
+FILENAME_DATABASE = 'database.sqlite'
+FILENAME_CONTAINER = 'container'
class SqliteDosMigrator(PsqlDosMigrator):
@@ -51,7 +55,7 @@ class SqliteDosMigrator(PsqlDosMigrator):
"""
def __init__(self, profile: Profile) -> None:
- filepath_database = Path(profile.storage_config['filepath']) / 'database.sqlite'
+ filepath_database = Path(profile.storage_config['filepath']) / FILENAME_DATABASE
filepath_database.touch()
self.profile = profile
@@ -63,7 +67,7 @@ def get_container(self) -> Container:
:returns: The disk-object store container configured for the repository path of the current profile.
"""
- filepath_container = Path(self.profile.storage_config['filepath']) / 'container'
+ filepath_container = Path(self.profile.storage_config['filepath']) / FILENAME_CONTAINER
return Container(str(filepath_container))
def initialise_database(self) -> None:
@@ -96,13 +100,13 @@ class SqliteDosStorage(PsqlDosBackend):
migrator = SqliteDosMigrator
- class Model(BaseModel):
+ class Model(BaseModel, defer_build=True):
"""Model describing required information to configure an instance of the storage."""
filepath: str = Field(
title='Directory of the backend',
description='Filepath of the directory in which to store data for this backend.',
- default_factory=lambda: AIIDA_CONFIG_FOLDER / 'repository' / f'sqlite_dos_{uuid4().hex}',
+ default_factory=lambda: str(AIIDA_CONFIG_FOLDER / 'repository' / f'sqlite_dos_{uuid4().hex}'),
)
@field_validator('filepath')
@@ -111,6 +115,18 @@ def filepath_is_absolute(cls, value: str) -> str:
"""Return the resolved and absolute filepath."""
return str(Path(value).resolve().absolute())
+ @property
+ def filepath_root(self) -> Path:
+ return Path(self.profile.storage_config['filepath'])
+
+ @property
+ def filepath_container(self) -> Path:
+ return self.filepath_root / FILENAME_CONTAINER
+
+ @property
+ def filepath_database(self) -> Path:
+ return self.filepath_root / FILENAME_DATABASE
+
@classmethod
def initialise(cls, profile: Profile, reset: bool = False) -> bool:
filepath = Path(profile.storage_config['filepath'])
@@ -131,7 +147,7 @@ def initialise(cls, profile: Profile, reset: bool = False) -> bool:
def __str__(self) -> str:
state = 'closed' if self.is_closed else 'open'
- return f'SqliteDosStorage[{self._profile.storage_config["filepath"]}]: {state},'
+ return f'SqliteDosStorage[{self.filepath_root}]: {state},'
def _initialise_session(self):
"""Initialise the SQLAlchemy session factory.
@@ -143,28 +159,22 @@ def _initialise_session(self):
Multi-thread support is currently required by the REST API.
Although, in the future, we may want to move the multi-thread handling to higher in the AiiDA stack.
"""
- engine = create_sqla_engine(Path(self._profile.storage_config['filepath']) / 'database.sqlite')
+ engine = create_sqla_engine(self.filepath_database)
self._session_factory = scoped_session(sessionmaker(bind=engine, future=True, expire_on_commit=True))
- def _backup(
- self,
- dest: str,
- keep: Optional[int] = None,
- ):
- raise NotImplementedError
-
def delete(self) -> None: # type: ignore[override]
"""Delete the storage and all the data."""
- filepath = Path(self.profile.storage_config['filepath'])
- if filepath.exists():
- rmtree(filepath)
- LOGGER.report(f'Deleted storage directory at `{filepath}`.')
+ if self.filepath_root.exists():
+ rmtree(self.filepath_root)
+ LOGGER.report(f'Deleted storage directory at `{self.filepath_root}`.')
+
+ def get_container(self) -> 'Container':
+ return Container(str(self.filepath_container))
def get_repository(self) -> 'DiskObjectStoreRepositoryBackend':
from aiida.repository.backend import DiskObjectStoreRepositoryBackend
- container = Container(str(Path(self.profile.storage_config['filepath']) / 'container'))
- return DiskObjectStoreRepositoryBackend(container=container)
+ return DiskObjectStoreRepositoryBackend(container=self.get_container())
@classmethod
def version_head(cls) -> str:
@@ -208,3 +218,59 @@ def nodes(self):
@cached_property
def users(self):
return orm.SqliteUserCollection(self)
+
+ @staticmethod
+ @lru_cache(maxsize=18)
+ def _get_mapper_from_entity(entity_type: 'EntityTypes', with_pk: bool):
+ """Return the Sqlalchemy mapper and fields corresponding to the given entity.
+
+ :param with_pk: if True, the fields returned will include the primary key
+ """
+ from sqlalchemy import inspect
+
+ from ..sqlite_zip.models import MAP_ENTITY_TYPE_TO_MODEL
+
+ model = MAP_ENTITY_TYPE_TO_MODEL[entity_type]
+ mapper = inspect(model).mapper # type: ignore[union-attr]
+ keys = {key for key, col in mapper.c.items() if with_pk or col not in mapper.primary_key}
+ return mapper, keys
+
+ def _backup(
+ self,
+ dest: str,
+ keep: Optional[int] = None,
+ ):
+ """Create a backup of the storage.
+
+ :param dest: Path to where the backup will be created. Can be a path on the local file system, or a path on a
+ remote that can be accessed over SSH in the form ``@:``.
+ :param keep: The maximum number of backups to keep. If the number of copies exceeds this number, the oldest
+ backups are removed.
+ """
+ try:
+ backup_manager = backup_utils.BackupManager(dest, keep=keep)
+ backup_manager.backup_auto_folders(lambda path, prev: self._backup_storage(backup_manager, path, prev))
+ except backup_utils.BackupError as exc:
+ raise exceptions.StorageBackupError(*exc.args) from exc
+
+ def _backup_storage(
+ self,
+ manager: backup_utils.BackupManager,
+ path: Path,
+ prev_backup: Path | None = None,
+ ) -> None:
+ """Create a backup of the sqlite database and disk-objectstore to the provided path.
+
+ :param manager: BackupManager from backup_utils containing utilities such as for calling the rsync.
+ :param path: Path to where the backup will be created.
+ :param prev_backup: Path to the previous backup. Rsync calls will be hard-linked to this path, making the backup
+ incremental and efficient.
+ """
+ LOGGER.report('Running storage maintenance')
+ self.maintain(full=False, compress=False)
+
+ LOGGER.report('Backing up disk-objectstore container')
+ manager.call_rsync(self.filepath_container, path, link_dest=prev_backup, dest_trailing_slash=True)
+
+ LOGGER.report('Backing up sqlite database')
+ manager.call_rsync(self.filepath_database, path, link_dest=prev_backup, dest_trailing_slash=True)
diff --git a/src/aiida/storage/sqlite_temp/backend.py b/src/aiida/storage/sqlite_temp/backend.py
index 66b0919a6c..ec06f2c803 100644
--- a/src/aiida/storage/sqlite_temp/backend.py
+++ b/src/aiida/storage/sqlite_temp/backend.py
@@ -43,7 +43,7 @@ class SqliteTempBackend(StorageBackend):
and destroys it when it is garbage collected.
"""
- class Model(BaseModel):
+ class Model(BaseModel, defer_build=True):
filepath: str = Field(
title='Temporary directory',
description='Temporary directory in which to store data for this backend.',
diff --git a/src/aiida/storage/sqlite_zip/backend.py b/src/aiida/storage/sqlite_zip/backend.py
index ea852783a5..771660d315 100644
--- a/src/aiida/storage/sqlite_zip/backend.py
+++ b/src/aiida/storage/sqlite_zip/backend.py
@@ -20,7 +20,6 @@
from typing import BinaryIO, Iterable, Iterator, Optional, Sequence, Tuple, cast
from zipfile import ZipFile, is_zipfile
-from archive_path import ZipPath, extract_file_in_zip
from pydantic import BaseModel, Field, field_validator
from sqlalchemy.orm import Session
@@ -33,7 +32,6 @@
from aiida.repository.backend.abstract import AbstractRepositoryBackend
from . import orm
-from .migrator import get_schema_version_head, migrate, validate_storage
from .utils import (
DB_FILENAME,
META_FILENAME,
@@ -68,7 +66,7 @@ class SqliteZipBackend(StorageBackend):
read_only = True
"""This plugin is read only and data cannot be created or mutated."""
- class Model(BaseModel):
+ class Model(BaseModel, defer_build=True):
"""Model describing required information to configure an instance of the storage."""
filepath: str = Field(title='Filepath of the archive', description='Filepath of the archive.')
@@ -83,6 +81,8 @@ def filepath_exists_and_is_absolute(cls, value: str) -> str:
@classmethod
def version_head(cls) -> str:
+ from .migrator import get_schema_version_head
+
return get_schema_version_head()
@staticmethod
@@ -111,9 +111,13 @@ def initialise(cls, profile: 'Profile', reset: bool = False) -> bool:
tests having run.
:returns: ``True`` if the storage was initialised by the function call, ``False`` if it was already initialised.
"""
+ from archive_path import ZipPath
+
filepath_archive = Path(profile.storage_config['filepath'])
if filepath_archive.exists() and not reset:
+ from .migrator import migrate
+
# The archive exists but ``reset == False``, so we try to migrate to the latest schema version. If the
# migration works, we replace the original archive with the migrated one.
with tempfile.TemporaryDirectory() as dirpath:
@@ -162,6 +166,8 @@ def migrate(cls, profile: Profile):
raise NotImplementedError('use the :func:`aiida.storage.sqlite_zip.migrator.migrate` function directly.')
def __init__(self, profile: Profile):
+ from .migrator import validate_storage
+
super().__init__(profile)
self._path = Path(profile.storage_config['filepath'])
validate_storage(self._path)
@@ -194,6 +200,8 @@ def close(self):
def get_session(self) -> Session:
"""Return an SQLAlchemy session."""
+ from archive_path import extract_file_in_zip
+
if self._closed:
raise ClosedStorage(str(self))
if self._session is None:
diff --git a/src/aiida/storage/sqlite_zip/migrations/legacy_to_main.py b/src/aiida/storage/sqlite_zip/migrations/legacy_to_main.py
index 3683844e30..b6f40f7e98 100644
--- a/src/aiida/storage/sqlite_zip/migrations/legacy_to_main.py
+++ b/src/aiida/storage/sqlite_zip/migrations/legacy_to_main.py
@@ -27,7 +27,6 @@
from aiida.storage.log import MIGRATE_LOGGER
from ..utils import DB_FILENAME, REPO_FOLDER, create_sqla_engine
-from . import v1_db_schema as v1_schema
from .utils import update_metadata
_NODE_ENTITY_NAME = 'Node'
@@ -45,15 +44,6 @@
_COMMENT_ENTITY_NAME: {'dbnode': 'dbnode_id', 'user': 'user_id'},
}
-aiida_orm_to_backend = {
- _USER_ENTITY_NAME: v1_schema.DbUser,
- _GROUP_ENTITY_NAME: v1_schema.DbGroup,
- _NODE_ENTITY_NAME: v1_schema.DbNode,
- _COMMENT_ENTITY_NAME: v1_schema.DbComment,
- _COMPUTER_ENTITY_NAME: v1_schema.DbComputer,
- _LOG_ENTITY_NAME: v1_schema.DbLog,
-}
-
LEGACY_TO_MAIN_REVISION = 'main_0000'
@@ -138,6 +128,17 @@ def _json_to_sqlite(
"""Convert a JSON archive format to SQLite."""
from aiida.tools.archive.common import batch_iter
+ from . import v1_db_schema as v1_schema
+
+ aiida_orm_to_backend = {
+ _USER_ENTITY_NAME: v1_schema.DbUser,
+ _GROUP_ENTITY_NAME: v1_schema.DbGroup,
+ _NODE_ENTITY_NAME: v1_schema.DbNode,
+ _COMMENT_ENTITY_NAME: v1_schema.DbComment,
+ _COMPUTER_ENTITY_NAME: v1_schema.DbComputer,
+ _LOG_ENTITY_NAME: v1_schema.DbLog,
+ }
+
MIGRATE_LOGGER.report('Converting DB to SQLite')
engine = create_sqla_engine(outpath)
diff --git a/src/aiida/storage/sqlite_zip/migrations/versions/main_0000a_replace_nulls.py b/src/aiida/storage/sqlite_zip/migrations/versions/main_0000a_replace_nulls.py
index 22f8200f6d..af718dc302 100644
--- a/src/aiida/storage/sqlite_zip/migrations/versions/main_0000a_replace_nulls.py
+++ b/src/aiida/storage/sqlite_zip/migrations/versions/main_0000a_replace_nulls.py
@@ -42,8 +42,8 @@ def upgrade():
)
# remove rows with null values, which may have previously resulted from deletion of a user or computer
- op.execute(db_dbauthinfo.delete().where(db_dbauthinfo.c.aiidauser_id.is_(None))) # type: ignore[arg-type]
- op.execute(db_dbauthinfo.delete().where(db_dbauthinfo.c.dbcomputer_id.is_(None))) # type: ignore[arg-type]
+ op.execute(db_dbauthinfo.delete().where(db_dbauthinfo.c.aiidauser_id.is_(None)))
+ op.execute(db_dbauthinfo.delete().where(db_dbauthinfo.c.dbcomputer_id.is_(None)))
op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.enabled.is_(None)).values(enabled=True))
op.execute(db_dbauthinfo.update().where(db_dbauthinfo.c.auth_params.is_(None)).values(auth_params={}))
@@ -60,8 +60,8 @@ def upgrade():
)
# remove rows with null values, which may have previously resulted from deletion of a node or user
- op.execute(db_dbcomment.delete().where(db_dbcomment.c.dbnode_id.is_(None))) # type: ignore[arg-type]
- op.execute(db_dbcomment.delete().where(db_dbcomment.c.user_id.is_(None))) # type: ignore[arg-type]
+ op.execute(db_dbcomment.delete().where(db_dbcomment.c.dbnode_id.is_(None)))
+ op.execute(db_dbcomment.delete().where(db_dbcomment.c.user_id.is_(None)))
op.execute(db_dbcomment.update().where(db_dbcomment.c.content.is_(None)).values(content=''))
op.execute(db_dbcomment.update().where(db_dbcomment.c.ctime.is_(None)).values(ctime=timezone.now()))
diff --git a/src/aiida/storage/sqlite_zip/models.py b/src/aiida/storage/sqlite_zip/models.py
index 8303120f01..6358d60d26 100644
--- a/src/aiida/storage/sqlite_zip/models.py
+++ b/src/aiida/storage/sqlite_zip/models.py
@@ -158,21 +158,23 @@ def create_orm_cls(klass: base.Base) -> SqliteBase:
),
)
+MAP_ENTITY_TYPE_TO_MODEL = {
+ EntityTypes.USER: DbUser,
+ EntityTypes.AUTHINFO: DbAuthInfo,
+ EntityTypes.GROUP: DbGroup,
+ EntityTypes.NODE: DbNode,
+ EntityTypes.COMMENT: DbComment,
+ EntityTypes.COMPUTER: DbComputer,
+ EntityTypes.LOG: DbLog,
+ EntityTypes.LINK: DbLink,
+ EntityTypes.GROUP_NODE: DbGroupNodes,
+}
+
@functools.lru_cache(maxsize=10)
def get_model_from_entity(entity_type: EntityTypes) -> Tuple[Any, Set[str]]:
"""Return the Sqlalchemy model and column names corresponding to the given entity."""
- model = {
- EntityTypes.USER: DbUser,
- EntityTypes.AUTHINFO: DbAuthInfo,
- EntityTypes.GROUP: DbGroup,
- EntityTypes.NODE: DbNode,
- EntityTypes.COMMENT: DbComment,
- EntityTypes.COMPUTER: DbComputer,
- EntityTypes.LOG: DbLog,
- EntityTypes.LINK: DbLink,
- EntityTypes.GROUP_NODE: DbGroupNodes,
- }[entity_type]
+ model = MAP_ENTITY_TYPE_TO_MODEL[entity_type]
mapper = sa.inspect(model).mapper
column_names = {col.name for col in mapper.c.values()}
return model, column_names
diff --git a/src/aiida/storage/sqlite_zip/utils.py b/src/aiida/storage/sqlite_zip/utils.py
index 8d3bfb0f12..2438c18fcb 100644
--- a/src/aiida/storage/sqlite_zip/utils.py
+++ b/src/aiida/storage/sqlite_zip/utils.py
@@ -9,12 +9,10 @@
"""Utilities for this backend."""
import json
-import tarfile
import zipfile
from pathlib import Path
from typing import Any, Dict, Optional, Union
-from archive_path import read_file_in_tar, read_file_in_zip
from sqlalchemy import event
from sqlalchemy.future.engine import Engine, create_engine
@@ -64,6 +62,10 @@ def extract_metadata(path: Union[str, Path], *, search_limit: Optional[int] = 10
:param search_limit: the maximum number of records to search for the metadata file in a zip file.
"""
+ import tarfile
+
+ from archive_path import read_file_in_tar, read_file_in_zip
+
path = Path(path)
if not path.exists():
raise UnreachableStorage(f'path not found: {path}')
diff --git a/src/aiida/tools/archive/create.py b/src/aiida/tools/archive/create.py
index 296e3d9aed..ebf6064ebe 100644
--- a/src/aiida/tools/archive/create.py
+++ b/src/aiida/tools/archive/create.py
@@ -365,6 +365,8 @@ def transform(d):
if filename.exists():
filename.unlink()
+
+ filename.parent.mkdir(parents=True, exist_ok=True)
shutil.move(tmp_filename, filename)
EXPORT_LOGGER.report('Archive created successfully')
diff --git a/src/aiida/tools/pytest_fixtures/configuration.py b/src/aiida/tools/pytest_fixtures/configuration.py
index b5ef8c4a63..ca38db3fb0 100644
--- a/src/aiida/tools/pytest_fixtures/configuration.py
+++ b/src/aiida/tools/pytest_fixtures/configuration.py
@@ -115,7 +115,8 @@ def factory(
from aiida.manage.manager import get_manager
manager = get_manager()
- storage_config = storage_config or {'filepath': str(pathlib.Path(config.dirpath) / 'storage')}
+ name = name or secrets.token_hex(16)
+ storage_config = storage_config or {'filepath': str(pathlib.Path(config.dirpath) / name / 'storage')}
if broker_backend and broker_config is None:
broker_config = {
@@ -133,7 +134,7 @@ def factory(
storage_config=storage_config,
broker_backend=broker_backend,
broker_config=broker_config,
- name=name or secrets.token_hex(16),
+ name=name,
email=email,
is_test_profile=True,
)
diff --git a/tests/cmdline/commands/test_archive_create.py b/tests/cmdline/commands/test_archive_create.py
index ce1f933054..4a9f4133d3 100644
--- a/tests/cmdline/commands/test_archive_create.py
+++ b/tests/cmdline/commands/test_archive_create.py
@@ -37,6 +37,14 @@ def test_create_force(run_cli_command, tmp_path):
run_cli_command(cmd_archive.create, options)
+def test_create_file_nested_directory(run_cli_command, tmp_path):
+ """Test that output files that contains nested directories are created automatically."""
+ filepath = tmp_path / 'some' / 'sub' / 'directory' / 'output.aiida'
+ options = [str(filepath)]
+ run_cli_command(cmd_archive.create, options)
+ assert filepath.exists()
+
+
@pytest.mark.usefixtures('aiida_profile_clean')
def test_create_all(run_cli_command, tmp_path, aiida_localhost):
"""Test that creating an archive for a set of various ORM entities works with the zip format."""
diff --git a/tests/cmdline/commands/test_archive_import.py b/tests/cmdline/commands/test_archive_import.py
index a5cd90dc9f..92b6690433 100644
--- a/tests/cmdline/commands/test_archive_import.py
+++ b/tests/cmdline/commands/test_archive_import.py
@@ -13,7 +13,6 @@
from aiida.orm import Group
from aiida.storage.sqlite_zip.migrator import list_versions
from aiida.tools.archive import ArchiveFormatSqlZip
-from click.exceptions import BadParameter
from tests.utils.archives import get_archive_file
@@ -174,23 +173,12 @@ def test_import_url_and_local_archives(run_cli_command, newest_archive):
run_cli_command(cmd_archive.import_archive, options)
-def test_import_url_timeout():
- """Test a timeout to valid URL is correctly errored"""
- from aiida.cmdline.params.types import PathOrUrl
-
- timeout_url = 'http://www.google.com:81'
-
- test_timeout_path = PathOrUrl(exists=True, readable=True, timeout_seconds=0)
- with pytest.raises(BadParameter, match=f'ath "{timeout_url}" could not be reached within 0 s.'):
- test_timeout_path(timeout_url)
-
-
def test_raise_malformed_url(run_cli_command):
"""Test the correct error is raised when supplying a malformed URL"""
malformed_url = 'htp://www.aiida.net'
result = run_cli_command(cmd_archive.import_archive, [malformed_url], raises=True)
- assert 'could not be reached within' in result.output, result.exception
+ assert 'could not be reached.' in result.output, result.exception
def test_migration(run_cli_command):
diff --git a/tests/cmdline/commands/test_presto.py b/tests/cmdline/commands/test_presto.py
new file mode 100644
index 0000000000..8d5bba2d77
--- /dev/null
+++ b/tests/cmdline/commands/test_presto.py
@@ -0,0 +1,50 @@
+"""Tests for ``verdi presto``."""
+
+import pytest
+from aiida.cmdline.commands.cmd_presto import get_default_presto_profile_name, verdi_presto
+from aiida.manage.configuration import profile_context
+from aiida.manage.configuration.config import Config
+from aiida.orm import Computer
+
+
+@pytest.mark.parametrize(
+ 'profile_names, expected',
+ (
+ ([], 'presto'),
+ (['main', 'sqlite'], 'presto'),
+ (['presto'], 'presto-1'),
+ (['presto', 'presto-5', 'presto-2'], 'presto-6'),
+ (['presto', 'main', 'presto-2', 'sqlite'], 'presto-3'),
+ ),
+)
+def test_get_default_presto_profile_name(monkeypatch, profile_names, expected):
+ """Test the dynamic default profile function."""
+
+ def get_profile_names(self):
+ return profile_names
+
+ monkeypatch.setattr(Config, 'profile_names', property(get_profile_names))
+ assert get_default_presto_profile_name() == expected
+
+
+@pytest.mark.usefixtures('empty_config')
+@pytest.mark.parametrize('with_broker', (True, False))
+def test_presto(run_cli_command, with_broker, monkeypatch):
+ """Test the ``verdi presto``."""
+ from aiida.brokers.rabbitmq import defaults
+
+ if not with_broker:
+ # Patch the RabbitMQ detection function to pretend it could not find the service
+ monkeypatch.setattr(defaults, 'detect_rabbitmq_config', lambda: None)
+
+ result = run_cli_command(verdi_presto)
+ assert 'Created new profile `presto`.' in result.output
+
+ with profile_context('presto', allow_switch=True) as profile:
+ assert profile.name == 'presto'
+ localhost = Computer.collection.get(label='localhost')
+ assert localhost.is_configured
+ if with_broker:
+ assert profile.process_control_backend == 'core.rabbitmq'
+ else:
+ assert profile.process_control_backend is None
diff --git a/tests/cmdline/params/types/test_path.py b/tests/cmdline/params/types/test_path.py
index 88d5aa176a..d9fbb1fb07 100644
--- a/tests/cmdline/params/types/test_path.py
+++ b/tests/cmdline/params/types/test_path.py
@@ -8,38 +8,75 @@
###########################################################################
"""Tests for Path types"""
+import click
import pytest
from aiida.cmdline.params.types.path import PathOrUrl, check_timeout_seconds
-class TestPath:
- """Tests for `PathOrUrl` and `FileOrUrl`"""
+def test_default_timeout():
+ """Test the default timeout_seconds value is correct"""
+ from aiida.cmdline.params.types.path import URL_TIMEOUT_SECONDS
- def test_default_timeout(self):
- """Test the default timeout_seconds value is correct"""
- from aiida.cmdline.params.types.path import URL_TIMEOUT_SECONDS
+ import_path = PathOrUrl()
- import_path = PathOrUrl()
+ assert import_path.timeout_seconds == URL_TIMEOUT_SECONDS
- assert import_path.timeout_seconds == URL_TIMEOUT_SECONDS
- def test_timeout_checks(self):
- """Test that timeout check handles different values.
+def test_timeout_checks():
+ """Test that timeout check handles different values.
- * valid
- * none
- * wrong type
- * outside range
- """
- valid_values = [42, '42']
+ * valid
+ * none
+ * wrong type
+ * outside range
+ """
+ valid_values = [42, '42']
- for value in valid_values:
- assert check_timeout_seconds(value) == int(value)
+ for value in valid_values:
+ assert check_timeout_seconds(value) == int(value)
- for invalid in [None, 'test']:
- with pytest.raises(TypeError):
- check_timeout_seconds(invalid)
+ for invalid in [None, 'test']:
+ with pytest.raises(TypeError):
+ check_timeout_seconds(invalid)
- for invalid in [-5, 65]:
- with pytest.raises(ValueError):
- check_timeout_seconds(invalid)
+ for invalid in [-5, 65]:
+ with pytest.raises(ValueError):
+ check_timeout_seconds(invalid)
+
+
+def test_fail_non_existing_path():
+ """Test the parameter for a non-existing path when ``exists=True``."""
+ with pytest.raises(
+ click.BadParameter, match=r'.*does not correspond to a file and also could not be reached as a URL.'
+ ):
+ PathOrUrl(exists=True).convert('non-existent.txt', None, None)
+
+
+def test_fail_non_readable_path(tmp_path):
+ """Test that if the path exists but cannot be read, the parameter does not try to treat it as a URL."""
+ filepath = tmp_path / 'some_file'
+ filepath.touch()
+ filepath.chmod(0o333) # Make it writable and executable only, so it is not readable
+
+ with pytest.raises(click.BadParameter, match=r'.*exists but could not be read.'):
+ PathOrUrl(exists=True).convert(str(filepath), None, None)
+
+
+def test_fail_unreachable_url():
+ """Test the parameter in case of a valid URL that cannot be reached."""
+ with pytest.raises(click.BadParameter, match=r'.* could not be reached.'):
+ PathOrUrl(exists=True).convert('http://domain/some/path', None, None)
+
+
+def test_fail_timeout(monkeypatch):
+ """Test the parameter in case of a valid URL that times out."""
+ import socket
+ from urllib import request
+
+ def raise_timeout(*args, **kwargs):
+ raise socket.timeout
+
+ monkeypatch.setattr(request, 'urlopen', raise_timeout)
+
+ with pytest.raises(click.BadParameter, match=r'.* could not be reached within .* seconds.'):
+ PathOrUrl(exists=True).convert('http://domain/some/pat', None, None)
diff --git a/tests/storage/sqlite_dos/test_backend.py b/tests/storage/sqlite_dos/test_backend.py
index cbb778a5a3..61988db1e9 100644
--- a/tests/storage/sqlite_dos/test_backend.py
+++ b/tests/storage/sqlite_dos/test_backend.py
@@ -3,7 +3,7 @@
import pathlib
import pytest
-from aiida.storage.sqlite_dos.backend import SqliteDosStorage
+from aiida.storage.sqlite_dos.backend import FILENAME_CONTAINER, FILENAME_DATABASE, SqliteDosStorage
@pytest.mark.usefixtures('chdir_tmp_path')
@@ -12,3 +12,29 @@ def test_model():
filepath = pathlib.Path.cwd() / 'archive.aiida'
model = SqliteDosStorage.Model(filepath=filepath.name)
assert pathlib.Path(model.filepath).is_absolute()
+
+
+def test_archive_import(aiida_config, aiida_profile_factory):
+ """Test that archives can be imported."""
+ from aiida.orm import Node, QueryBuilder
+ from aiida.tools.archive.imports import import_archive
+
+ from tests.utils.archives import get_archive_file
+
+ with aiida_profile_factory(aiida_config, storage_backend='core.sqlite_dos'):
+ assert QueryBuilder().append(Node).count() == 0
+ import_archive(get_archive_file('calcjob/arithmetic.add.aiida'))
+ assert QueryBuilder().append(Node).count() > 0
+
+
+def test_backup(aiida_config, aiida_profile_factory, tmp_path, manager):
+ """Test the backup implementation."""
+ with aiida_profile_factory(aiida_config, storage_backend='core.sqlite_dos'):
+ storage = manager.get_profile_storage()
+ storage.backup(str(tmp_path))
+ filepath_last = tmp_path / 'last-backup'
+ assert (tmp_path / 'config.json').exists()
+ assert filepath_last.is_symlink()
+ dirpath_backup = filepath_last.resolve()
+ assert (dirpath_backup / FILENAME_DATABASE).exists()
+ assert (dirpath_backup / FILENAME_CONTAINER).exists()