diff --git a/docs/source/conf.py b/docs/source/conf.py index 3be770690..b93e99c2b 100755 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,7 +84,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -# language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -151,6 +151,10 @@ (r'.*', r'Tuple.*'), ] nitpick_ignore = [ + ('py:class', 'AttributeDict'), + ('py:class', 'ExitCode'), + ('py:class', 'StructureData'), + ('py:class', 'PseudoPotentialFamily'), ('py:exc', 'ArithmeticError'), ('py:exc', 'AssertionError'), ('py:exc', 'AttributeError'), @@ -209,6 +213,4 @@ ('py:obj', 'Mapping'), ('py:obj', 'qe_tools.parsers.CpInputFile'), ('py:obj', 'qe_tools.parsers.PwInputFile'), - ('py:class', 'StructureData'), - ('py:class', 'PseudoPotentialFamily'), ] diff --git a/src/aiida_quantumespresso/calculations/__init__.py b/src/aiida_quantumespresso/calculations/__init__.py index b03b1e28d..05bd80e7b 100644 --- a/src/aiida_quantumespresso/calculations/__init__.py +++ b/src/aiida_quantumespresso/calculations/__init__.py @@ -143,6 +143,22 @@ def define(cls, spec): ) spec.inputs.validator = cls.validate_inputs + spec.exit_code( + 302, + 'ERROR_OUTPUT_STDOUT_MISSING', + message='The retrieved folder did not contain the required stdout output file.' + ) + spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', message='The stdout output file could not be read.') + spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', message='The stdout output file could not be parsed.') + spec.exit_code( + 312, + 'ERROR_OUTPUT_STDOUT_INCOMPLETE', + message='The stdout output file was incomplete probably because the calculation got interrupted.' + ) + spec.exit_code( + 400, 'ERROR_OUT_OF_WALLTIME', message='The calculation stopped prematurely because it ran out of walltime.' + ) + @classmethod def validate_inputs(cls, value, port_namespace): """Validate the entire inputs namespace.""" diff --git a/src/aiida_quantumespresso/calculations/cp.py b/src/aiida_quantumespresso/calculations/cp.py index 76db82ed9..d0652a83f 100644 --- a/src/aiida_quantumespresso/calculations/cp.py +++ b/src/aiida_quantumespresso/calculations/cp.py @@ -131,12 +131,6 @@ def define(cls, spec): message='The required XML file is not present in the retrieved folder.') spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contains multiple XML files.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', - message='The output file contains invalid output.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The required XML file could not be read.') spec.exit_code(330, 'ERROR_READING_POS_FILE', diff --git a/src/aiida_quantumespresso/calculations/dos.py b/src/aiida_quantumespresso/calculations/dos.py index 8a4cf15b4..4481b2768 100644 --- a/src/aiida_quantumespresso/calculations/dos.py +++ b/src/aiida_quantumespresso/calculations/dos.py @@ -27,10 +27,6 @@ def define(cls, spec): spec.output('output_parameters', valid_type=orm.Dict) spec.output('output_dos', valid_type=orm.XyData) spec.default_output_node = 'output_parameters' - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(330, 'ERROR_READING_DOS_FILE', message='The dos file could not be read from the retrieved folder.') # yapf: enable diff --git a/src/aiida_quantumespresso/calculations/matdyn.py b/src/aiida_quantumespresso/calculations/matdyn.py index b5a0a6d3e..4d34671a5 100644 --- a/src/aiida_quantumespresso/calculations/matdyn.py +++ b/src/aiida_quantumespresso/calculations/matdyn.py @@ -37,10 +37,6 @@ def define(cls, spec): spec.output('output_phonon_bands', valid_type=orm.BandsData) spec.default_output_node = 'output_parameters' - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(330, 'ERROR_OUTPUT_FREQUENCIES', message='The output frequencies file could not be read from the retrieved folder.') spec.exit_code(410, 'ERROR_OUTPUT_KPOINTS_MISSING', diff --git a/src/aiida_quantumespresso/calculations/namelists.py b/src/aiida_quantumespresso/calculations/namelists.py index 09767c860..6e435c644 100644 --- a/src/aiida_quantumespresso/calculations/namelists.py +++ b/src/aiida_quantumespresso/calculations/namelists.py @@ -58,6 +58,14 @@ def define(cls, spec): help='Use an additional node for special settings') spec.input('parent_folder', valid_type=(RemoteData, FolderData, SinglefileData), required=False, help='Use a local or remote folder as parent folder (for restarts and similar)') + spec.exit_code(302, 'ERROR_OUTPUT_STDOUT_MISSING', + message='The retrieved folder did not contain the required stdout output file.') + spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', + message='An exception was raised while reading the `stdout` file: {exception}') + spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', + message='An exception was raised while parsing the `stdout` file: {exception}') + spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', + message='The stdout output file was incomplete probably because the calculation got interrupted.') # yapf: enable @classmethod diff --git a/src/aiida_quantumespresso/calculations/neb.py b/src/aiida_quantumespresso/calculations/neb.py index eb6de7cdb..60ca78212 100644 --- a/src/aiida_quantumespresso/calculations/neb.py +++ b/src/aiida_quantumespresso/calculations/neb.py @@ -78,12 +78,6 @@ def define(cls, spec): spec.default_output_node = 'output_parameters' spec.exit_code(303, 'ERROR_MISSING_XML_FILE', message='The required XML file is not present in the retrieved folder.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', - message='The output file contains invalid output.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The XML output file could not be read.') spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE', diff --git a/src/aiida_quantumespresso/calculations/open_grid.py b/src/aiida_quantumespresso/calculations/open_grid.py index d642fdbf4..2929aac1e 100644 --- a/src/aiida_quantumespresso/calculations/open_grid.py +++ b/src/aiida_quantumespresso/calculations/open_grid.py @@ -28,13 +28,7 @@ def define(cls, spec): spec.exit_code(300, 'ERROR_NO_RETRIEVED_FOLDER', message='The retrieved folder data node could not be accessed.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(312, 'ERROR_INCOMPATIBLE_FFT_GRID', message='Found rotation or fractional translation not compatible with FFT grid.') - spec.exit_code(340, 'ERROR_GENERIC_QE_ERROR', - message='Encountered a generic error message.') spec.exit_code(350, 'ERROR_OUTPUT_KPOINTS_MISMATCH', message='Mismatch between kmesh dimensions and number of kpoints.') diff --git a/src/aiida_quantumespresso/calculations/pp.py b/src/aiida_quantumespresso/calculations/pp.py index 1ae8a63bd..5ce2d676d 100644 --- a/src/aiida_quantumespresso/calculations/pp.py +++ b/src/aiida_quantumespresso/calculations/pp.py @@ -98,6 +98,8 @@ def define(cls, spec): message='The parent folder did not contain the required XML output file.') spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', message='The stdout output file could not be read.') + spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', + message='The stdout output file could not be parsed.') spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', message='The stdout output file was incomplete.') spec.exit_code(340, 'ERROR_OUT_OF_WALLTIME_INTERRUPTED', diff --git a/src/aiida_quantumespresso/calculations/projwfc.py b/src/aiida_quantumespresso/calculations/projwfc.py index 5318e5cb0..41bc77726 100644 --- a/src/aiida_quantumespresso/calculations/projwfc.py +++ b/src/aiida_quantumespresso/calculations/projwfc.py @@ -62,10 +62,6 @@ def define(cls, spec): message='The retrieved temporary folder could not be accessed.') spec.exit_code(303, 'ERROR_OUTPUT_XML_MISSING', message='The retrieved folder did not contain the required XML file.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The XML output file could not be read.') spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE', diff --git a/src/aiida_quantumespresso/calculations/pw.py b/src/aiida_quantumespresso/calculations/pw.py index 8c13fde38..6a2b420f1 100644 --- a/src/aiida_quantumespresso/calculations/pw.py +++ b/src/aiida_quantumespresso/calculations/pw.py @@ -87,20 +87,12 @@ def define(cls, spec): # Unrecoverable errors: required retrieved files could not be read, parsed or are otherwise incomplete spec.exit_code(301, 'ERROR_NO_RETRIEVED_TEMPORARY_FOLDER', message='The retrieved temporary folder could not be accessed.') - spec.exit_code(302, 'ERROR_OUTPUT_STDOUT_MISSING', - message='The retrieved folder did not contain the required stdout output file.') spec.exit_code(303, 'ERROR_OUTPUT_XML_MISSING', message='The retrieved folder did not contain the required XML file.') spec.exit_code(304, 'ERROR_OUTPUT_XML_MULTIPLE', message='The retrieved folder contained multiple XML files.') spec.exit_code(305, 'ERROR_OUTPUT_FILES', message='Both the stdout and XML output files could not be read or parsed.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', - message='The stdout output file could not be parsed.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(320, 'ERROR_OUTPUT_XML_READ', message='The XML output file could not be read.') spec.exit_code(321, 'ERROR_OUTPUT_XML_PARSE', @@ -116,8 +108,6 @@ def define(cls, spec): message='The code failed in finding a valid reciprocal lattice vector.') # Significant errors but calculation can be used to restart - spec.exit_code(400, 'ERROR_OUT_OF_WALLTIME', - message='The calculation stopped prematurely because it ran out of walltime.') spec.exit_code(410, 'ERROR_ELECTRONIC_CONVERGENCE_NOT_REACHED', message='The electronic minimization cycle did not reach self-consistency.') diff --git a/src/aiida_quantumespresso/calculations/pw2gw.py b/src/aiida_quantumespresso/calculations/pw2gw.py index 2a6a9d8c1..fe9cc03cf 100644 --- a/src/aiida_quantumespresso/calculations/pw2gw.py +++ b/src/aiida_quantumespresso/calculations/pw2gw.py @@ -38,16 +38,8 @@ def define(cls, spec): spec.output('eps', valid_type=orm.ArrayData, help='The `eps` output node containing 5 arrays `energy`, `epsX`, `epsY`, `epsZ`, `epsTOT`') - spec.exit_code(302, 'ERROR_OUTPUT_STDOUT_MISSING', - message='The retrieved folder did not contain the required stdout output file.') spec.exit_code(305, 'ERROR_OUTPUT_FILES', message='The eps*.dat output files could not be read or parsed.') - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(311, 'ERROR_OUTPUT_STDOUT_PARSE', - message='The stdout output file could not be parsed.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(330, 'ERROR_OUTPUT_FILES_INVALID_FORMAT', message='The eps*.dat output files do not have the expected shape (N, 2).') spec.exit_code(331, 'ERROR_OUTPUT_FILES_ENERGY_MISMATCH', diff --git a/src/aiida_quantumespresso/calculations/pw2wannier90.py b/src/aiida_quantumespresso/calculations/pw2wannier90.py index 4b2db5a96..f0ce2e885 100644 --- a/src/aiida_quantumespresso/calculations/pw2wannier90.py +++ b/src/aiida_quantumespresso/calculations/pw2wannier90.py @@ -31,10 +31,6 @@ def define(cls, spec): help='The output folder of a pw.x calculation') spec.output('output_parameters', valid_type=Dict) spec.default_output_node = 'output_parameters' - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(340, 'ERROR_GENERIC_QE_ERROR', message='Encountered a generic error message') spec.exit_code(350, 'ERROR_UNEXPECTED_PARSER_EXCEPTION', diff --git a/src/aiida_quantumespresso/calculations/q2r.py b/src/aiida_quantumespresso/calculations/q2r.py index ead3cd7ae..a4650c479 100644 --- a/src/aiida_quantumespresso/calculations/q2r.py +++ b/src/aiida_quantumespresso/calculations/q2r.py @@ -32,10 +32,6 @@ def define(cls, spec): super().define(spec) spec.input('parent_folder', valid_type=(orm.RemoteData, orm.FolderData), required=True) spec.output('force_constants', valid_type=ForceConstantsData) - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', - message='The stdout output file could not be read.') - spec.exit_code(312, 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.') spec.exit_code(330, 'ERROR_READING_FORCE_CONSTANTS_FILE', message='The force constants file could not be read.') # yapf: enable diff --git a/src/aiida_quantumespresso/calculations/xspectra.py b/src/aiida_quantumespresso/calculations/xspectra.py index 505a31257..6ad9140c5 100644 --- a/src/aiida_quantumespresso/calculations/xspectra.py +++ b/src/aiida_quantumespresso/calculations/xspectra.py @@ -61,12 +61,6 @@ def define(cls, spec): spec.output('spectra', valid_type=XyData) spec.default_output_node = 'output_parameters' - spec.exit_code(310, 'ERROR_OUTPUT_STDOUT_READ', message='The stdout output file could not be read.') - spec.exit_code( - 312, - 'ERROR_OUTPUT_STDOUT_INCOMPLETE', - message='The stdout output file was incomplete probably because the calculation got interrupted.' - ) spec.exit_code( 313, 'ERROR_OUTPUT_ABSORBING_SPECIES_WRONG', diff --git a/src/aiida_quantumespresso/parsers/base.py b/src/aiida_quantumespresso/parsers/base.py index 063985d02..020f754ed 100644 --- a/src/aiida_quantumespresso/parsers/base.py +++ b/src/aiida_quantumespresso/parsers/base.py @@ -3,15 +3,80 @@ All `Parser` implementations in `aiida-quantumespresso` must use this base class, not `aiida.parsers.Parser`. """ -from aiida.parsers import Parser as _BaseParser +from __future__ import annotations -__all__ = ('Parser',) +import abc +import re +from typing import Optional, Tuple +from aiida.common import AttributeDict +from aiida.engine import ExitCode +from aiida.parsers import Parser -class Parser(_BaseParser): # pylint: disable=abstract-method - """Custom `Parser` class for `aiida-quantumespresso` parser implementations.""" +from aiida_quantumespresso.parsers.parse_raw.base import convert_qe_time_to_sec - def emit_logs(self, logging_dictionaries, ignore=None): +__all__ = ('BaseParser',) + + +class BaseParser(Parser, metaclass=abc.ABCMeta): + """Custom ``Parser`` class for ``aiida-quantumespresso`` parser implementations.""" + + class_error_map = {} + class_warning_map = {} + + base_error_map = { + 'Maximum CPU time exceeded': 'ERROR_OUT_OF_WALLTIME', + } + base_warning_map = { + 'Warning:': None, + 'DEPRECATED:': None, + } + success_string = 'JOB DONE' + + @classmethod + def get_error_map(cls): + """The full error map of the parser class.""" + error_map = cls.base_error_map.copy() + error_map.update(cls.class_error_map) + return error_map + + @classmethod + def get_warning_map(cls): + """The full warning map of the parser class.""" + warning_map = cls.base_warning_map.copy() + warning_map.update(cls.class_warning_map) + return warning_map + + def parse_stdout_from_retrieved(self, logs: AttributeDict) -> Tuple[str, dict, AttributeDict]: + """Read and parse the ``stdout`` content of a Quantum ESPRESSO calculation. + + :param logs: Logging container that will be updated during parsing. + :returns: size 3 tuple: (``stdout`` content, parsed data, updated logs). + """ + filename_stdout = self.node.get_option('output_filename') + + if filename_stdout not in self.retrieved.base.repository.list_object_names(): + logs.error.append('ERROR_OUTPUT_STDOUT_MISSING') + return '', {}, logs + + try: + with self.retrieved.open(filename_stdout, 'r') as handle: + stdout = handle.read() + except OSError as exception: + logs.error.append('ERROR_OUTPUT_STDOUT_READ') + logs.error.append(exception) + return '', {}, logs + + try: + parsed_data, logs = self._parse_stdout_base(stdout, logs) + except Exception as exception: + logs.error.append('ERROR_OUTPUT_STDOUT_PARSE') + logs.error.append(exception) + return stdout, {}, logs + + return stdout, parsed_data, logs + + def emit_logs(self, logs: list[AttributeDict] | tuple[AttributeDict] | AttributeDict, ignore: list = None) -> None: """Emit the messages in one or multiple "log dictionaries" through the logger of the parser. A log dictionary is expected to have the following structure: each key must correspond to a log level of the @@ -25,39 +90,127 @@ def emit_logs(self, logging_dictionaries, ignore=None): 'error': ['Self-consistency was not achieved'] } - :param logging_dictionaries: log dictionaries + :param logs: log dictionaries :param ignore: list of log messages to ignore """ ignore = ignore or [] - if not isinstance(logging_dictionaries, (list, tuple)): - logging_dictionaries = [logging_dictionaries] + if not isinstance(logs, (list, tuple)): + logs = [logs] - for logs in logging_dictionaries: + for logs in logs: for level, messages in logs.items(): for message in messages: - if message is None: - continue - stripped = message.strip() - if not stripped or stripped in ignore: + if stripped in ignore: continue - try: - getattr(self.logger, level)(stripped) - except AttributeError: - pass + getattr(self.logger, level)(stripped) + + def check_base_errors(self, logs: AttributeDict) -> Optional[ExitCode]: + """Check the ``logs`` for the following "basic" parsing error and return a (formatted) version: - def exit(self, exit_code): - """Log the exit message of the give exit code with level `ERROR` and return the exit code. + * ``ERROR_OUTPUT_STDOUT_MISSING`` + * ``ERROR_OUTPUT_STDOUT_READ`` + * ``ERROR_OUTPUT_STDOUT_PARSE`` - This is a utility function if one wants to return from the parse method and automically add the exit message - associated to the exit code as a log message to the node: e.g. `return self.exit(self.exit_codes.LABEL))` + These errors mean that there is no ``stdout`` to parse. - :param exit_code: an `ExitCode` - :return: the exit code + The ``ERROR_OUTPUT_STDOUT_INCOMPLETE`` error is not checked here because in this case there might still be + useful information in the ``stdout``. """ - self.logger.error(exit_code.message) + + for exit_code in [ + 'ERROR_OUTPUT_STDOUT_MISSING', + ]: + if exit_code in logs.error: + return self.exit_codes.get(exit_code) + + # These exit codes have additional information that needs to be formatted in the message. + for exit_code in [ + 'ERROR_OUTPUT_STDOUT_READ', + 'ERROR_OUTPUT_STDOUT_PARSE' + ]: + if exit_code in logs.error: + exception = logs.error[logs.index(exit_code) + 1] + return self.exit_codes.get(exit_code).format(exception=exception) + + def exit(self, exit_code: ExitCode | None = None, logs: AttributeDict | None = None) -> ExitCode: + """Log all messages in the ``logs`` as well as the ``exit_code`` message and return the correct exit code. + + This is a utility function if one wants to return from the parse method and automically add the ``logs`` and + exit message associated to and exit code as a log message to the node: e.g. + ``return self._exit(self.exit_codes.LABEL))`` + + If no ``exit_code`` is provided, the method will check if an ``exit_status`` has already been set on the node + and return the corresponding ``ExitCode`` in this case. If not, ``ExitCode(0)`` is returned. + + :param logs: log dictionaries + :param exit_code: an ``ExitCode`` + :return: The correct exit code + """ + if logs: + self.emit_logs(logs) + + if exit_code is not None: + self.logger.error(exit_code.message) + elif self.node.exit_status is not None: + exit_code = ExitCode(self.node.exit_status, self.node.exit_message) + else: + exit_code = ExitCode(0) + return exit_code + + @classmethod + def _parse_stdout_base(cls, stdout: str, logs: AttributeDict) -> Tuple[dict, AttributeDict]: + """Parse the ``stdout`` content of a Quantum ESPRESSO calculation. + + This function only checks for basic content like JOB DONE, errors with %%%%% etc, but can be overridden to + parse more data from the ``stdout``. + + :param stdout: the stdout content as a string. + :returns: tuple of two dictionaries, with the parsed data and log messages, respectively. + """ + parsed_data = {} + + if not re.search(cls.success_string, stdout): + logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') + + code_match = re.search( + r'Program\s(?P[A-Z|a-z|\_|\d]+)\sv\.(?P[\d\.|a-z|A-Z]+)\s', stdout + ) + if code_match: + code_name = code_match.groupdict()['code_name'] + parsed_data['code_version'] = code_match.groupdict()['code_version'] + + wall_match = re.search(fr'{code_name}\s+:[\s\S]+CPU\s+(?P[\s.\d|s|m|d|h]+)\sWALL', stdout) + + if wall_match: + try: + parsed_data['wall_time_seconds'] = convert_qe_time_to_sec(wall_match.groupdict()['wall_time']) + except ValueError: + logs.warnings.append('Unable to convert wall time from `stdout` to seconds.') + + # Look for typical Quantum ESPRESSO error messages between %%%%%-lines that are not in our error map + if re.search(r'\%\%\%\%\%', stdout): # Note: using e.g. `\%{5}` is significantly slower + for error_message in set(re.split(r'\%\%\%\%\%\n', stdout)[1::2]): + + if not any(error_marker in error_message for error_marker in cls.get_error_map().keys()): + logs.error.append(error_message.rstrip('\n%')) + + # Look for error messages in general + for error_marker, error, in cls.get_error_map().items(): + if re.search(fr'{error_marker}', stdout): + logs.error.append(error) + + # Look for lines with warnings from the `warning_map` + for warning_marker, warning in cls.get_warning_map().items(): + for warning_message in set(re.findall(fr'({warning_marker}.+)\n', stdout)): + if warning is not None: + logs.warning.append(warning) + else: + logs.warning.append(warning_message) + + return parsed_data, logs diff --git a/src/aiida_quantumespresso/parsers/cp.py b/src/aiida_quantumespresso/parsers/cp.py index 55ed22c91..3ebe8ec4a 100644 --- a/src/aiida_quantumespresso/parsers/cp.py +++ b/src/aiida_quantumespresso/parsers/cp.py @@ -4,11 +4,13 @@ from packaging.version import Version from qe_tools import CONSTANTS -from .base import Parser +from aiida_quantumespresso.utils.mapping import get_logging_container + +from .base import BaseParser from .parse_raw.cp import parse_cp_raw_output, parse_cp_traj_stanzas -class CpParser(Parser): +class CpParser(BaseParser): """This class is the implementation of the Parser class for Cp.""" def parse(self, **kwargs): @@ -16,23 +18,25 @@ def parse(self, **kwargs): Does all the logic here. """ + logs = get_logging_container() + + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + retrieved = self.retrieved # check what is inside the folder list_of_files = retrieved.base.repository.list_object_names() - # options.metadata become attributes like this: - stdout_filename = self.node.base.attributes.get('output_filename') - # at least the stdout should exist - if stdout_filename not in list_of_files: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - # This should match 1 file xml_files = [xml_file for xml_file in self.node.process_class.xml_filenames if xml_file in list_of_files] if not xml_files: - return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE) + return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE, logs) elif len(xml_files) > 1: - return self.exit(self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE) + return self.exit(self.exit_codes.ERROR_OUTPUT_XML_MULTIPLE, logs) # cp.x can produce, depending on the particular version of the code, a file called `print_counter.xml` or # `print_counter`, which is a plain text file with the number of the last timestep written in the trajectory @@ -63,11 +67,10 @@ def parse(self, **kwargs): self.logger.info('print counter in xml format') filename_counter = filename_counter_xml - output_stdout = retrieved.base.repository.get_object_content(stdout_filename) output_xml = retrieved.base.repository.get_object_content(xml_files[0]) output_xml_counter = None if no_trajectory_output else retrieved.base.repository.get_object_content(filename_counter) out_dict, _raw_successful = parse_cp_raw_output( - output_stdout, output_xml, output_xml_counter, print_counter_xml + stdout, output_xml, output_xml_counter, print_counter_xml ) if not no_trajectory_output: @@ -255,9 +258,16 @@ def parse(self, **kwargs): out_dict.pop(key, None) # convert the dictionary into an AiiDA object + out_dict.update(parsed_data) output_params = Dict(out_dict) self.out('output_parameters', output_params) + for exit_code in list(self.get_error_map().values()) + ['ERROR_OUTPUT_STDOUT_INCOMPLETE']: + if exit_code in logs.error: + return self.exit(self.exit_codes.get(exit_code), logs) + + return self.exit(logs=logs) + def get_linkname_trajectory(self): """Returns the name of the link to the output_structure (None if not present)""" return 'output_trajectory' diff --git a/src/aiida_quantumespresso/parsers/dos.py b/src/aiida_quantumespresso/parsers/dos.py index 8864dce77..08db2a989 100644 --- a/src/aiida_quantumespresso/parsers/dos.py +++ b/src/aiida_quantumespresso/parsers/dos.py @@ -3,46 +3,35 @@ import numpy as np from aiida_quantumespresso.parsers import QEOutputParsingError -from aiida_quantumespresso.parsers.parse_raw.base import parse_output_base +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class DosParser(Parser): - """This class is the implementation of the Parser class for Dos.""" +class DosParser(BaseParser): + """``Parser`` implementation for the ``DosCalculation`` calculation job class.""" def parse(self, **kwargs): - """Parses the datafolder, stores results. + """Parse the retrieved files of a ``DosCalculation`` into output nodes.""" + logs = get_logging_container() - Retrieves dos output, and some basic information from the out_file, such as warnings and wall_time - """ - retrieved = self.retrieved + _, parsed_stdout, logs = self.parse_stdout_from_retrieved(logs) - # Read standard out - try: - filename_stdout = self.node.get_option('output_filename') # or get_attribute(), but this is clearer - with retrieved.base.repository.open(filename_stdout, 'r') as fil: - out_file = fil.readlines() - except OSError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - - job_done = False - for i in range(len(out_file)): - line = out_file[-i] - if 'JOB DONE' in line: - job_done = True - break - if not job_done: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) - - # check that the dos file is present, if it is, read it + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + self.out('output_parameters', Dict(parsed_stdout)) + + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) + + # Parse the DOS try: - with retrieved.base.repository.open(self.node.process_class._DOS_FILENAME, 'r') as fil: - dos_file = fil.readlines() + with self.retrieved.base.repository.open(self.node.process_class._DOS_FILENAME, 'r') as handle: + dos_file = handle.readlines() except OSError: - return self.exit(self.exit_codes.ERROR_READING_DOS_FILE) - - # end of initial checks + return self.exit(self.exit_codes.ERROR_READING_DOS_FILE, logs) array_names = [[], []] array_units = [[], []] @@ -79,11 +68,9 @@ def parse(self, **kwargs): y_units += ['states/eV'] xy_data.set_y(y_arrays, y_names, y_units) - parsed_data, logs = parse_output_base(out_file, 'DOS') - self.emit_logs(logs) - self.out('output_dos', xy_data) - self.out('output_parameters', Dict(parsed_data)) + + return self.exit(logs=logs) def parse_raw_dos(dos_file, array_names, array_units): diff --git a/src/aiida_quantumespresso/parsers/matdyn.py b/src/aiida_quantumespresso/parsers/matdyn.py index faccf6f54..cad497fe9 100644 --- a/src/aiida_quantumespresso/parsers/matdyn.py +++ b/src/aiida_quantumespresso/parsers/matdyn.py @@ -3,27 +3,30 @@ from qe_tools import CONSTANTS from aiida_quantumespresso.calculations.matdyn import MatdynCalculation +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class MatdynParser(Parser): - """Parser implementation for the MatdynCalculation.""" +class MatdynParser(BaseParser): + """``Parser`` implementation for the ``MatDynCalculation`` calculation job class.""" def parse(self, **kwargs): - """Parse the retrieved files from a `MatdynCalculation`.""" - retrieved = self.retrieved - filename_stdout = self.node.get_option('output_filename') - filename_frequencies = MatdynCalculation._PHONON_FREQUENCIES_NAME + """Parse the retrieved files from a ``MatdynCalculation`` into output nodes.""" + logs = get_logging_container() + + _, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) - if filename_stdout not in retrieved.base.repository.list_object_names(): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + self.out('output_parameters', orm.Dict(parsed_data)) - if 'JOB DONE' not in retrieved.base.repository.get_object_content(filename_stdout): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) - if filename_frequencies not in retrieved.base.repository.list_object_names(): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + filename_frequencies = MatdynCalculation._PHONON_FREQUENCIES_NAME # Extract the kpoints from the input data and create the `KpointsData` for the `BandsData` try: @@ -34,15 +37,15 @@ def parse(self, **kwargs): kpoints_for_bands = orm.KpointsData() kpoints_for_bands.set_kpoints(kpoints) - parsed_data = parse_raw_matdyn_phonon_file(retrieved.base.repository.get_object_content(filename_frequencies)) + parsed_data = parse_raw_matdyn_phonon_file(self.retrieved.base.repository.get_object_content(filename_frequencies)) try: num_kpoints = parsed_data.pop('num_kpoints') except KeyError: - return self.exit(self.exit_codes.ERROR_OUTPUT_KPOINTS_MISSING) + return self.exit(self.exit_codes.ERROR_OUTPUT_KPOINTS_MISSING, logs) if num_kpoints != kpoints.shape[0]: - return self.exit(self.exit_codes.ERROR_OUTPUT_KPOINTS_INCOMMENSURATE) + return self.exit(self.exit_codes.ERROR_OUTPUT_KPOINTS_INCOMMENSURATE, logs) output_bands = orm.BandsData() output_bands.set_kpointsdata(kpoints_for_bands) @@ -51,10 +54,9 @@ def parse(self, **kwargs): for message in parsed_data['warnings']: self.logger.error(message) - self.out('output_parameters', orm.Dict(parsed_data)) self.out('output_phonon_bands', output_bands) - return + return self.exit(logs=logs) def parse_raw_matdyn_phonon_file(phonon_frequencies): diff --git a/src/aiida_quantumespresso/parsers/neb.py b/src/aiida_quantumespresso/parsers/neb.py index fd9635959..c98fe992b 100644 --- a/src/aiida_quantumespresso/parsers/neb.py +++ b/src/aiida_quantumespresso/parsers/neb.py @@ -1,9 +1,11 @@ # -*- coding: utf-8 -*- -from aiida.common import NotExistent -from aiida.orm import Dict +import os + +from aiida.common import AttributeDict, NotExistent +from aiida.orm import ArrayData, Dict, TrajectoryData +import numpy from aiida_quantumespresso.calculations.pw import PwCalculation -from aiida_quantumespresso.parsers import QEOutputParsingError from aiida_quantumespresso.parsers.parse_raw import convert_qe_to_aiida_structure from aiida_quantumespresso.parsers.parse_raw.neb import parse_raw_output_neb from aiida_quantumespresso.parsers.parse_raw.pw import parse_stdout as parse_pw_stdout @@ -11,42 +13,40 @@ from aiida_quantumespresso.parsers.parse_xml.exceptions import XMLParseError, XMLUnsupportedFormatError from aiida_quantumespresso.parsers.parse_xml.pw.parse import parse_xml as parse_pw_xml from aiida_quantumespresso.parsers.pw import PwParser +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class NebParser(Parser): +class NebParser(BaseParser): """`Parser` implementation for the `NebCalculation` calculation job class.""" + # Key that contains the optional parser options in the `settings` input node. + parser_settings_key = 'parser_options' + + class_warning_map = { + 'scf convergence NOT achieved on image': 'SCF did not converge for a given image', + 'Maximum CPU time exceeded': 'Maximum CPU time exceeded', + 'reached the maximum number of steps': 'Maximum number of iterations reached in the image optimization', + } + def parse(self, **kwargs): - """Parse the retrieved files of a completed `NebCalculation` into output nodes. + """Parse the retrieved files of a completed ``NebCalculation`` into output nodes. - Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files + Two nodes that are expected are the default 'retrieved' ``FolderData`` node which will store the retrieved files permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files` which should contain the temporary retrieved files. """ - import os - - from aiida.orm import ArrayData, TrajectoryData - import numpy - - PREFIX = self.node.process_class._PREFIX - - retrieved = self.retrieved - list_of_files = retrieved.base.repository.list_object_names() # Note: this includes folders, but not the files they contain. - - # The stdout is required for parsing - filename_stdout = self.node.base.attributes.get('output_filename') + logs = get_logging_container() - if filename_stdout not in list_of_files: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + prefix = self.node.process_class._PREFIX # Look for optional settings input node and potential 'parser_options' dictionary within it # Note that we look for both NEB and PW parser options under "inputs.settings.parser_options"; # we don't even have a namespace "inputs.pw.settings". try: settings = self.node.inputs.settings.get_dict() - parser_options = settings[self.get_parser_settings_key()] + parser_options = settings[self.parser_settings_key] except (AttributeError, KeyError, NotExistent): settings = {} parser_options = {} @@ -54,49 +54,32 @@ def parse(self, **kwargs): # load the pw input parameters dictionary pw_input_dict = self.node.inputs.pw.parameters.get_dict() - # load the neb input parameters dictionary - neb_input_dict = self.node.inputs.parameters.get_dict() + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) - # First parse the Neb output - try: - stdout = retrieved.base.repository.get_object_content(filename_stdout) - neb_out_dict, iteration_data, raw_successful = parse_raw_output_neb(stdout, neb_input_dict) - # TODO: why do we ignore raw_successful ? - except (OSError, QEOutputParsingError): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - - for warn_type in ['warnings', 'parser_warnings']: - for message in neb_out_dict[warn_type]: - self.logger.warning(f'parsing NEB output: {message}') + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) - if 'QE neb run did not reach the end of the execution.' in neb_out_dict['parser_warnings']: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) + neb_out_dict, iteration_data = parse_raw_output_neb(stdout) + parsed_data.update(neb_out_dict) - # Retrieve the number of images - try: - num_images = neb_input_dict['num_of_images'] - except KeyError: - try: - num_images = neb_out_dict['num_of_images'] - except KeyError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE) - if num_images < 2: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_PARSE) + num_images = parsed_data['num_of_images'] # Now parse the information from the individual pw calculations for the different images image_data = {} positions = [] cells = [] - # for each image... + for i in range(num_images): # check if any of the known XML output file names are present, and parse the first that we find - relative_output_folder = os.path.join(f'{PREFIX}_{i + 1}', f'{PREFIX}.save') + relative_output_folder = os.path.join(f'{prefix}_{i + 1}', f'{prefix}.save') retrieved_files = self.retrieved.base.repository.list_object_names(relative_output_folder) + for xml_filename in PwCalculation.xml_filenames: if xml_filename in retrieved_files: xml_file_path = os.path.join(relative_output_folder, xml_filename) try: - with retrieved.base.repository.open(xml_file_path) as xml_file: + with self.retrieved.base.repository.open(xml_file_path) as xml_file: parsed_data_xml, logs_xml = parse_pw_xml(xml_file, None) except IOError: return self.exit(self.exit_codes.ERROR_OUTPUT_XML_READ) @@ -104,10 +87,10 @@ def parse(self, **kwargs): return self.exit(self.exit_codes.ERROR_OUTPUT_XML_PARSE) except XMLUnsupportedFormatError: return self.exit(self.exit_codes.ERROR_OUTPUT_XML_FORMAT) - except Exception as exc: + except Exception: import traceback traceback.print_exc() - return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc)) + return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION) # this image is dealt with, so break the inner loop and go to the next image break # otherwise, if none of the filenames we tried exists, exit with an error @@ -115,9 +98,9 @@ def parse(self, **kwargs): return self.exit(self.exit_codes.ERROR_MISSING_XML_FILE) # look for pw output and parse it - pw_out_file = os.path.join(f'{PREFIX}_{i + 1}', 'PW.out') + pw_out_file = os.path.join(f'{prefix}_{i + 1}', 'PW.out') try: - with retrieved.base.repository.open(pw_out_file, 'r') as f: + with self.retrieved.base.repository.open(pw_out_file, 'r') as f: pw_out_text = f.read() # Note: read() and not readlines() except IOError: return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) @@ -158,16 +141,16 @@ def parse(self, **kwargs): cells.append(structure_data.cell) # Add also PW warnings and errors to the neb output data, avoiding repetitions. - for log_type in ['warning', 'error']: - for message in logs_stdout[log_type]: - formatted_message = f'{log_type}: {message}' - if formatted_message not in neb_out_dict['warnings']: - neb_out_dict['warnings'].append(formatted_message) + for log_level in ['warning', 'error']: + for message in logs_stdout[log_level]: + formatted_message = f'{log_level}: {message}' + if formatted_message not in parsed_data['warnings']: + parsed_data['warnings'].append(formatted_message) # Symbols can be obtained simply from the last image symbols = [str(site.kind_name) for site in structure_data.sites] - output_params = Dict(dict(list(neb_out_dict.items()) + list(image_data.items()))) + output_params = Dict(dict(list(parsed_data.items()) + list(image_data.items()))) self.out('output_parameters', output_params) trajectory = TrajectoryData() @@ -188,16 +171,16 @@ def parse(self, **kwargs): # Load the original and interpolated energy profile along the minimum-energy path (mep) try: - filename = PREFIX + '.dat' - with retrieved.base.repository.open(filename, 'r') as handle: + filename = prefix + '.dat' + with self.retrieved.base.repository.open(filename, 'r') as handle: mep = numpy.loadtxt(handle) except Exception: self.logger.warning(f'could not open expected output file `{filename}`.') mep = numpy.array([[]]) try: - filename = PREFIX + '.int' - with retrieved.base.repository.open(filename, 'r') as handle: + filename = prefix + '.int' + with self.retrieved.base.repository.open(filename, 'r') as handle: interp_mep = numpy.loadtxt(handle) except Exception: self.logger.warning(f'could not open expected output file `{filename}`.') @@ -209,9 +192,7 @@ def parse(self, **kwargs): mep_arraydata.set_array('interpolated_mep', interp_mep) self.out('output_mep', mep_arraydata) - return + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) - @staticmethod - def get_parser_settings_key(): - """Return the key that contains the optional parser options in the `settings` input node.""" - return 'parser_options' + return self.exit(logs=logs) diff --git a/src/aiida_quantumespresso/parsers/open_grid.py b/src/aiida_quantumespresso/parsers/open_grid.py index c11b3b61a..e42f445da 100644 --- a/src/aiida_quantumespresso/parsers/open_grid.py +++ b/src/aiida_quantumespresso/parsers/open_grid.py @@ -1,55 +1,51 @@ # -*- coding: utf-8 -*- -from aiida.common import NotExistent +from typing import Tuple + +from aiida.common import AttributeDict from aiida.orm import Dict, KpointsData -from aiida_quantumespresso.parsers.base import Parser -from aiida_quantumespresso.parsers.parse_raw.base import parse_output_base +from aiida_quantumespresso.parsers.base import BaseParser +from aiida_quantumespresso.utils.mapping import get_logging_container -class OpenGridParser(Parser): +class OpenGridParser(BaseParser): """``Parser`` implementation for the ``OpenGridCalculation`` calculation job class.""" + class_error_map = { + 'incompatible FFT grid': 'ERROR_INCOMPATIBLE_FFT_GRID' + } + def parse(self, **kwargs): """Parse the retrieved files of a completed ``OpenGridCalculation`` into output nodes.""" - try: - out_folder = self.retrieved - except NotExistent: - return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_FOLDER) - - try: - filename_stdout = self.node.get_option('output_filename') - with out_folder.open(filename_stdout, 'r') as handle: - out_file = handle.read() - except OSError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - - parsed_data, logs = parse_output_base(out_file, codename='OPEN_GRID') - self.emit_logs(logs) - self.out('output_parameters', Dict(parsed_data)) + logs = get_logging_container() - lines = out_file.split('\n') - for line in lines: - if 'incompatible FFT grid' in line: - return self.exit(self.exit_codes.ERROR_INCOMPATIBLE_FFT_GRID) + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) - if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs.error: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) - elif logs.error: - return self.exit(self.exit_codes.ERROR_GENERIC_QE_ERROR) + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) - try: - kpoints_mesh, kpoints = self.parse_kpoints(out_file) - except ValueError: - return self.exit(self.exit_codes.ERROR_OUTPUT_KPOINTS_MISMATCH) + self.out('output_parameters', Dict(parsed_data)) + + for exit_code in self.get_error_map().values(): + if exit_code in logs.error: + return self.exit(self.exit_codes.get(exit_code), logs) + + kpoints_mesh, kpoints, logs = self.parse_kpoints(stdout, logs) - # Output both the dimensions and the explict list of kpoints self.out('kpoints_mesh', kpoints_mesh) self.out('kpoints', kpoints) + for exit_code in ['ERROR_OUTPUT_KPOINTS_MISMATCH', 'ERROR_OUTPUT_STDOUT_INCOMPLETE']: + if exit_code in logs.error: + return self.exit(self.exit_codes.get(exit_code), logs) + + return self.exit(logs=logs) + @staticmethod - def parse_kpoints(out_file): - """Parse and output the dimensions and the explicit list of kpoints.""" - lines = out_file.split('\n') + def parse_kpoints(stdout: str, logs: AttributeDict) -> Tuple[KpointsData, KpointsData, AttributeDict]: + """Parse the ``stdout`` for the mesh and explicit list of kpoints.""" + lines = stdout.split('\n') kpoints = [] weights = [] @@ -76,6 +72,6 @@ def parse_kpoints(out_file): kpoints_list.set_kpoints(kpoints, cartesian=False, weights=weights) if kmesh[0] * kmesh[1] * kmesh[2] != len(kpoints): - raise ValueError('Mismatch between kmesh dimensions and number of kpoints') + logs.error.append('ERROR_OUTPUT_KPOINTS_MISMATCH') - return kpoints_mesh, kpoints_list + return kpoints_mesh, kpoints_list, logs diff --git a/src/aiida_quantumespresso/parsers/parse_raw/base.py b/src/aiida_quantumespresso/parsers/parse_raw/base.py index cfa57e761..7e535f955 100644 --- a/src/aiida_quantumespresso/parsers/parse_raw/base.py +++ b/src/aiida_quantumespresso/parsers/parse_raw/base.py @@ -2,109 +2,9 @@ """A basic parser for the common format of QE.""" import re -from aiida.orm.nodes.data.structure import Kind, Site -from aiida.plugins import DataFactory +from aiida.orm import StructureData -StructureData = DataFactory('core.structure') - -__all__ = ('parse_output_base', 'parse_output_error', 'convert_qe_time_to_sec', 'convert_qe_to_aiida_structure') - - -def parse_output_base(filecontent, codename=None, message_map=None): - """Parses the output file of a QE calculation, just checking for basic content like JOB DONE, errors with %%%% etc. - - :param filecontent: a string with the output file content - :param codename: the string printed both in the header and near the walltime. - If passed, a few more things are parsed (e.g. code version, walltime, ...) - :returns: tuple of two dictionaries, with the parsed data and log messages, respectively - """ - from aiida_quantumespresso.utils.mapping import get_logging_container - - keys = ['error', 'warning'] - - if message_map is not None and (not isinstance(message_map, dict) or any(key not in message_map for key in keys)): - raise RuntimeError(f'invalid format `message_map`: should be dictionary with two keys {keys}') - - logs = get_logging_container() - parsed_data = {} - - lines = filecontent if isinstance(filecontent, list) else filecontent.split('\n') - - for line in lines: - if 'JOB DONE' in line: - break - else: - logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') - - if codename is not None: - - codestring = f'Program {codename}' - - for line_number, line in enumerate(lines): - - if codestring in line and 'starts on' in line: - parsed_data['code_version'] = line.split(codestring)[1].split('starts on')[0].strip() - - # Parse the walltime - if codename in line and 'WALL' in line: - try: - time = line.split('CPU')[1].split('WALL')[0].strip() - parsed_data['wall_time'] = time - except (ValueError, IndexError): - logs.warnings.append('ERROR_PARSING_WALLTIME') - else: - try: - parsed_data['wall_time_seconds'] = convert_qe_time_to_sec(time) - except ValueError: - logs.warnings.append('ERROR_CONVERTING_WALLTIME_TO_SECONDS') - - # Parse an error message with optional mapping of the message - if '%%%%%%%%%%%%%%' in line: - parse_output_error(lines, line_number, logs, message_map) - - return parsed_data, logs - - -def parse_output_error(lines, line_number_start, logs, message_map=None): - """Parse a Quantum ESPRESSO error message which appears between two lines marked by ``%%%%%%%%``) - - :param lines: a list of strings gotten by splitting the standard output content on newlines - :param line_number_start: the line at which we identified some ``%%%%%%%%`` - :param logs: a logging container from `aiida_quantumespresso.utils.mapping.get_logging_container` - """ - - def map_message(message, message_map, logs): - - # Match any known error and warning messages - for marker, message in message_map['error'].items(): - if marker in line: - if message is None: - message = line - logs.error.append(message) - - for marker, message in message_map['warning'].items(): - if marker in line: - if message is None: - message = line - logs.warning.append(message) - - # First determine the line that closes the error block which is also marked by ``%%%%%%%`` in the line - for line_number, line in enumerate(lines[line_number_start + 1:]): - if '%%%%%%%%%%%%' in line: - line_number_end = line_number - break - else: - return - - # Get the set of unique lines between the error indicators and pass them through the message map, or if not provided - # simply append the message to the `error` list of the logs container - for message in set(lines[line_number_start:line_number_end]): - if message_map is not None: - map_message(message, message_map, logs) - else: - logs.error(message) - - return +__all__ = ('convert_qe_time_to_sec', 'convert_qe_to_aiida_structure', 'convert_qe_to_kpoints') def convert_qe_time_to_sec(timestr): diff --git a/src/aiida_quantumespresso/parsers/parse_raw/cp.py b/src/aiida_quantumespresso/parsers/parse_raw/cp.py index 5c4745213..b9ac545e8 100644 --- a/src/aiida_quantumespresso/parsers/parse_raw/cp.py +++ b/src/aiida_quantumespresso/parsers/parse_raw/cp.py @@ -82,12 +82,7 @@ def parse_cp_text_output(data, xml_data): parsed_data['warnings'].append(line) elif 'bananas' in line: parsed_data['warnings'].append('Bananas from the ortho.') - elif 'CP' in line and 'WALL' in line: - try: - time = line.split('CPU')[1].split('WALL')[0] - parsed_data['wall_time'] = time - except: - raise QEOutputParsingError('Error while parsing wall time.') + #when the cp does a cg, the output is different and the parser below does not work #TODO: understand what the cg prints out and parse it (it is undocumented) if not conjugate_gradient: diff --git a/src/aiida_quantumespresso/parsers/parse_raw/neb.py b/src/aiida_quantumespresso/parsers/parse_raw/neb.py index 7c70a029c..c38cb776e 100644 --- a/src/aiida_quantumespresso/parsers/parse_raw/neb.py +++ b/src/aiida_quantumespresso/parsers/parse_raw/neb.py @@ -7,57 +7,21 @@ """ from qe_tools import CONSTANTS -from aiida_quantumespresso.parsers import QEOutputParsingError -from aiida_quantumespresso.parsers.parse_raw import convert_qe_time_to_sec - -def parse_raw_output_neb(stdout, input_dict, parser_opts=None): +def parse_raw_output_neb(stdout): """Parses the output of a neb calculation Receives in input the paths to the output file. :param stdout: the stdout content as a string - :param input_dict: dictionary with the neb input parameters - :param parser_opts: not used :return parameter_data: a dictionary with parsed parameters :return iteration_data: a dictionary with arrays (for relax & md calcs.) - :return job_successful: a boolean that is False in case of failed calculations - - :raises QEOutputParsingError: for errors in the parsing, - - 2 different keys to check in output: parser_warnings and warnings. - On an upper level, these flags MUST be checked. - The first is expected to be empty unless QE failures or unfinished jobs. """ import copy - job_successful = True parser_warnings = [] - if not stdout: # there is an output file, but it's empty -> crash - job_successful = False - - # check if the job has finished (that doesn't mean without errors) - finished_run = False - for line in stdout.split('\n')[::-1]: - if 'JOB DONE' in line: - finished_run = True - break - if not finished_run: # error if the job has not finished - warning = 'QE neb run did not reach the end of the execution.' - parser_warnings.append(warning) - job_successful = False - # parse the text output of the neb calculation - try: - out_data, iteration_data, critical_messages = parse_neb_text_output(stdout, input_dict) - except QEOutputParsingError as exc: - if not finished_run: # I try to parse it as much as possible - parser_warnings.append('Error while parsing the output file') - out_data = {'warnings': []} - iteration_data = {} - critical_messages = [] - else: # if it was finished and I got an error, it's a mistake of the parser - raise QEOutputParsingError(f'Error while parsing NEB text output: {exc}') + out_data, iteration_data = parse_neb_text_output(stdout) # I add in the out_data all the last elements of iteration_data values. # I leave the possibility to skip some large arrays (None for the time being). @@ -68,19 +32,12 @@ def parse_raw_output_neb(stdout, input_dict, parser_opts=None): continue out_data[k] = v[-1] - # if there is a severe error, the calculation is FAILED - if any([x in out_data['warnings'] for x in critical_messages]): - job_successful = False - parameter_data = dict(list(out_data.items()) + [('parser_warnings', parser_warnings)]) - # return various data. - # parameter data will be mapped in Dict - # iteration_data in ArrayData - return parameter_data, iteration_data, job_successful + return parameter_data, iteration_data -def parse_neb_text_output(data, input_dict={}): +def parse_neb_text_output(data): """Parses the text output of QE Neb. :param data: a string, the file as read by read() @@ -95,50 +52,14 @@ def parse_neb_text_output(data, input_dict={}): """ from collections import defaultdict - from aiida_quantumespresso.parsers.parse_raw import parse_output_error - from aiida_quantumespresso.utils.mapping import get_logging_container - - # TODO: find a more exhaustive list of the common errors of neb - # critical warnings: if any is found, the calculation status is FAILED - critical_warnings = { - 'scf convergence NOT achieved on image': 'SCF did not converge for a given image', - 'Maximum CPU time exceeded': 'Maximum CPU time exceeded', - 'reached the maximum number of steps': 'Maximum number of iterations reached in the image optimization', - } - - minor_warnings = { - 'Warning:': None, - } - - all_warnings = dict(list(critical_warnings.items()) + list(minor_warnings.items())) - parsed_data = {} parsed_data['warnings'] = [] iteration_data = defaultdict(list) - # parse time, starting from the end - # apparently, the time is written multiple times - for line in reversed(data.split('\n')): - if 'NEB' in line and 'WALL' in line: - try: - time = line.split('CPU')[1].split('WALL')[0].strip() - parsed_data['wall_time'] = time - except Exception: - parsed_data['warnings'].append('Error while parsing wall time.') - - try: - parsed_data['wall_time_seconds'] = \ - convert_qe_time_to_sec(parsed_data['wall_time']) - except ValueError: - raise QEOutputParsingError('Unable to convert wall_time in seconds.') - break - # set by default the calculation as not converged. parsed_data['converged'] = [False, 0] - logs = get_logging_container() - lines = data.split('\n') - for count, line in enumerate(lines): + for count, line in enumerate(data.split('\n')): if 'initial path length' in line: initial_path_length = float(line.split('=')[1].split('bohr')[0]) parsed_data['initial_path_length'] = initial_path_length * CONSTANTS.bohr_to_ang @@ -177,26 +98,8 @@ def parse_neb_text_output(data, input_dict={}): parsed_data['climbing_images_manual'] = [int(_) for _ in line.split(':')[1].split(',')[:-1]] elif 'neb: convergence achieved in' in line: parsed_data['converged'] = [True, int(line.split('iteration')[0].split()[-1])] - elif '%%%%%%%%%%%%%%' in line: - parse_output_error(lines, count, logs) - elif any(i in line for i in all_warnings): - message = [all_warnings[i] for i in all_warnings.keys() if i in line][0] - - if message is not None: - parsed_data['warnings'].append(message) - - parsed_data['warnings'].extend(logs.error) - - try: - num_images = parsed_data['num_of_images'] - except KeyError: - try: - num_images = input_dict['PATH']['num_of_images'] - except KeyError: - raise QEOutputParsingError( - 'No information on the number ' - 'of images available (neither in input nor in output' - ) + + num_images = parsed_data['num_of_images'] iteration_lines = data.split('-- iteration')[1:] iteration_lines = [i.split('\n') for i in iteration_lines] @@ -233,4 +136,4 @@ def parse_neb_text_output(data, input_dict={}): image_dist = float(line.split('=')[1].split('bohr')[0]) iteration_data['image_dist'].append(image_dist * CONSTANTS.bohr_to_ang) - return parsed_data, dict(iteration_data), list(critical_warnings.values()) + return parsed_data, dict(iteration_data) diff --git a/src/aiida_quantumespresso/parsers/parse_raw/ph.py b/src/aiida_quantumespresso/parsers/parse_raw/ph.py index cd5253489..91cb377bf 100644 --- a/src/aiida_quantumespresso/parsers/parse_raw/ph.py +++ b/src/aiida_quantumespresso/parsers/parse_raw/ph.py @@ -10,10 +10,9 @@ from aiida_quantumespresso.parsers import QEOutputParsingError from aiida_quantumespresso.parsers.parse_raw.base import convert_qe_time_to_sec from aiida_quantumespresso.parsers.parse_xml.pw.legacy import parse_xml_child_bool, read_xml_card -from aiida_quantumespresso.utils.mapping import get_logging_container -def parse_raw_ph_output(stdout, tensors=None, dynamical_matrices=None): +def parse_raw_ph_output(stdout, logs, tensors=None, dynamical_matrices=None): """Parses the raw output of a Quantum ESPRESSO `ph.x` calculation. :param stdout: the content of the stdout file as a string @@ -21,16 +20,8 @@ def parse_raw_ph_output(stdout, tensors=None, dynamical_matrices=None): :param dynamical_matrices: a list of the content of the dynamical matrix files as a string :returns: tuple of two dictionaries, with the parsed data and log messages, respectively """ - logs = get_logging_container() data_lines = stdout.split('\n') - # First check whether the `JOB DONE` message was written, otherwise the job was interrupted - for line in data_lines: - if 'JOB DONE' in line: - break - else: - logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') - # Parse tensors, if present tensor_data = {} if tensors: @@ -147,39 +138,12 @@ def parse_xml_matrices(tagname, target_tags): def parse_ph_text_output(lines, logs): - """Parses the stdout of Quantum ESPRESSO ph.x. + """Parses the stdout of Quantum ESPRESSO ``ph.x``. :param lines: list of strings, the file as read by readlines() :return: dictionary with parsed values """ - def detect_important_message(logs, line): - - message_map = { - 'error': { - 'Maximum CPU time exceeded': 'ERROR_OUT_OF_WALLTIME', - 'No convergence has been achieved': 'ERROR_CONVERGENCE_NOT_REACHED', - 'problems computing cholesky': 'ERROR_COMPUTING_CHOLESKY', - }, - 'warning': { - 'Warning:': None, - 'DEPRECATED:': None, - } - } - - # Match any known error and warning messages - for marker, message in message_map['error'].items(): - if marker in line: - if message is None: - message = line - logs.error.append(message) - - for marker, message in message_map['warning'].items(): - if marker in line: - if message is None: - message = line - logs.warning.append(message) - def parse_qpoints(lines): """Parse the q-points from the corresponding lines in the stdout.""" @@ -218,22 +182,11 @@ def parse_mode_symmetries(lines, num_atoms): parsed_data = {} - # Parse time, starting from the end because the time is written multiple times - for line in reversed(lines): - if 'PHONON' in line and 'WALL' in line: - try: - parsed_data['wall_time_seconds'] = \ - convert_qe_time_to_sec(line.split('CPU')[1].split('WALL')[0]) - except (ValueError, IndexError): - raise QEOutputParsingError('Error during parsing of walltime.') - break - parsed_data['num_q_found'] = 0 + # Parse number of q-points and number of atoms for count, line in enumerate(lines): - detect_important_message(logs, line) - if 'q-points for this run' in line: try: parsed_data['number_of_qpoints'] = int(line.split('/')[1].split('q-points')[0]) diff --git a/src/aiida_quantumespresso/parsers/parse_raw/pw2gw.py b/src/aiida_quantumespresso/parsers/parse_raw/pw2gw.py deleted file mode 100644 index 22a0035ec..000000000 --- a/src/aiida_quantumespresso/parsers/parse_raw/pw2gw.py +++ /dev/null @@ -1,46 +0,0 @@ -# -*- coding: utf-8 -*- -"""A collection of function that are used to parse the output of Quantum Espresso pw2gw. - -The function that needs to be called from outside is parse_raw_output(). The functions mostly work without aiida -specific functionalities. The parsing will try to convert whatever it can in some dictionary, which by operative -decision doesn't have much structure encoded, [the values are simple ] -""" -from aiida_quantumespresso.parsers import QEOutputParsingError -from aiida_quantumespresso.parsers.parse_raw import convert_qe_time_to_sec -from aiida_quantumespresso.utils.mapping import get_logging_container - - -def parse_stdout(stdout): - """Parses the stdout content of a Quantum ESPRESSO `pw2gw.x` calculation. - - :param stdout: the stdout content as a string - :param input_parameters: dictionary with the input parameters - :param parser_options: the parser options from the settings input parameter node - :returns: tuple of two dictionaries, with the parsed data and log messages, respectively - """ - # Separate the input string into separate lines - data_lines = stdout.split('\n') - - logs = get_logging_container() - - parsed_data = {} - - for line in data_lines: - if 'JOB DONE' in line: - break - else: - logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') - - for count, line in enumerate(data_lines): - if 'PW2GW' in line and 'WALL' in line: - try: - time = line.split('CPU')[1].split('WALL')[0] - parsed_data['wall_time'] = time - except Exception: - logs.warning.append('Error while parsing wall time.') - try: - parsed_data['wall_time_seconds'] = convert_qe_time_to_sec(time) - except ValueError: - raise QEOutputParsingError('Unable to convert wall_time in seconds.') - - return parsed_data, logs diff --git a/src/aiida_quantumespresso/parsers/ph.py b/src/aiida_quantumespresso/parsers/ph.py index e8a810ccb..8f3946e30 100644 --- a/src/aiida_quantumespresso/parsers/ph.py +++ b/src/aiida_quantumespresso/parsers/ph.py @@ -2,73 +2,65 @@ """`Parser` implementation for the `PhCalculation` calculation job class.""" import os import re -import traceback from aiida import orm from aiida_quantumespresso.calculations.ph import PhCalculation from aiida_quantumespresso.parsers.parse_raw.ph import parse_raw_ph_output +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class PhParser(Parser): - """`Parser` implementation for the `PhCalculation` calculation job class.""" +class PhParser(BaseParser): + """``Parser`` implementation for the ``PhCalculation`` calculation job class.""" + + class_error_map = { + 'No convergence has been achieved': 'ERROR_CONVERGENCE_NOT_REACHED', + 'problems computing cholesky': 'ERROR_COMPUTING_CHOLESKY', + } def parse(self, **kwargs): - """Parse the retrieved files from a `PhCalculation`.""" - retrieved = self.retrieved + """Parse the retrieved files from a ``PhCalculation`` into output nodes.""" + logs = get_logging_container() - # The stdout is required for parsing - filename_stdout = self.node.base.attributes.get('output_filename') - filename_tensor = PhCalculation._OUTPUT_XML_TENSOR_FILE_NAME + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) - if filename_stdout not in retrieved.base.repository.list_object_names(): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING) + # If the scheduler detected OOW, simply keep that exit code by not returning anything more specific. + if self.node.exit_status == PhCalculation.exit_codes.ERROR_SCHEDULER_OUT_OF_WALLTIME: + return - try: - stdout = retrieved.base.repository.get_object_content(filename_stdout) - except (IOError, OSError): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + filename_tensor = self.node.process_class._OUTPUT_XML_TENSOR_FILE_NAME try: - tensor_file = retrieved.base.repository.get_object_content(filename_tensor) - except (IOError, OSError): + with self.retrieved.base.repository.open(filename_tensor, 'r') as handle: + tensor_file = handle.read() + except OSError: tensor_file = None # Look for dynamical matrices dynmat_files = [] - dynmat_folder = PhCalculation._FOLDER_DYNAMICAL_MATRIX # pylint: disable=protected-access - dynmat_prefix = os.path.split(PhCalculation._OUTPUT_DYNAMICAL_MATRIX_PREFIX)[1] # pylint: disable=protected-access + dynmat_folder = self.node.process_class._FOLDER_DYNAMICAL_MATRIX + dynmat_prefix = os.path.split(self.node.process_class._OUTPUT_DYNAMICAL_MATRIX_PREFIX)[1] natural_sort = lambda string: [int(c) if c.isdigit() else c.lower() for c in re.split(r'(\d+)', string)] - for filename in sorted(retrieved.base.repository.list_object_names(dynmat_folder), key=natural_sort): + for filename in sorted(self.retrieved.base.repository.list_object_names(dynmat_folder), key=natural_sort): if not filename.startswith(dynmat_prefix) or filename.endswith('.freq'): continue - dynmat_files.append(retrieved.base.repository.get_object_content(os.path.join(dynmat_folder, filename))) + dynmat_files.append(self.retrieved.base.repository.get_object_content(os.path.join(dynmat_folder, filename))) - try: - parsed_data, logs = parse_raw_ph_output(stdout, tensor_file, dynmat_files) - except Exception as exc: - self.logger.error(traceback.format_exc()) - return self.exit(self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc)) + parsed_ph_data, logs = parse_raw_ph_output(stdout, logs, tensor_file, dynmat_files) + parsed_data.update(parsed_ph_data) - self.emit_logs(logs) self.out('output_parameters', orm.Dict(parsed_data)) - # If the scheduler detected OOW, simply keep that exit code by not returning anything more specific. - if self.node.exit_status == PhCalculation.exit_codes.ERROR_SCHEDULER_OUT_OF_WALLTIME: - return - - if 'ERROR_OUT_OF_WALLTIME' in logs['error']: - return self.exit_codes.ERROR_OUT_OF_WALLTIME - - if 'ERROR_CONVERGENCE_NOT_REACHED' in logs['error']: - return self.exit_codes.ERROR_CONVERGENCE_NOT_REACHED - - if 'ERROR_COMPUTING_CHOLESKY' in logs['error']: - return self.exit_codes.ERROR_COMPUTING_CHOLESKY + for exit_code in list(self.get_error_map().values()) + ['ERROR_OUTPUT_STDOUT_INCOMPLETE']: + if exit_code in logs.error: + return self.exit(self.exit_codes.get(exit_code), logs) - if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']: - return self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE + return self.exit(logs=logs) diff --git a/src/aiida_quantumespresso/parsers/pp.py b/src/aiida_quantumespresso/parsers/pp.py index f6d238227..125c11e5e 100644 --- a/src/aiida_quantumespresso/parsers/pp.py +++ b/src/aiida_quantumespresso/parsers/pp.py @@ -2,20 +2,24 @@ """`Parser` implementation for the `PpCalculation` calculation job class.""" import os import re -import traceback +from typing import Tuple from aiida import orm -from aiida.common import exceptions +from aiida.common import AttributeDict import numpy as np from aiida_quantumespresso.calculations.pp import PpCalculation from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class PpParser(Parser): - """`Parser` implementation for the `PpCalculation` calculation job class.""" +class PpParser(BaseParser): + """``Parser`` implementation for the ``PpCalculation`` calculation job class.""" + + class_error_map = { + 'xml data file not found': 'ERROR_PARENT_XML_MISSING', + } # Lookup: plot_num --> units units_dict = { @@ -43,12 +47,24 @@ class PpParser(Parser): } def parse(self, **kwargs): - """ - Parse raw files retrieved from remote dir - """ - retrieved = self.retrieved + """Parse the retrieved files of a ``PpCalculation`` into output nodes.""" + logs = get_logging_container() + + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + parsed_pp, logs = self.parse_stdout(stdout, logs) + parsed_data.update(parsed_pp) + + self.out('output_parameters', orm.Dict(parsed_data)) + + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) + retrieve_temporary_list = self.node.base.attributes.get('retrieve_temporary_list', None) - filename_stdout = self.node.get_option('output_filename') # If temporary files were specified, check that we have them if retrieve_temporary_list: @@ -57,15 +73,6 @@ def parse(self, **kwargs): except KeyError: return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER) - # The stdout is required for parsing - if filename_stdout not in retrieved.base.repository.list_object_names(): - return self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING - - try: - stdout_raw = retrieved.base.repository.get_object_content(filename_stdout) - except (IOError, OSError): - return self.exit_codes.ERROR_OUTPUT_STDOUT_READ - # Currently all plot output files should start with the `filplot` as prefix. If only one file was produced the # prefix is the entire filename, but in the case of multiple files, there will be pairs of two files where the # first has the format '{filename_prefix}.{some_random_suffix' and the second has the same name but with the @@ -83,22 +90,8 @@ def parse(self, **kwargs): filenames = os.listdir(retrieved_temporary_folder) file_opener = lambda filename: open(os.path.join(retrieved_temporary_folder, filename)) else: - filenames = retrieved.base.repository.list_object_names() - file_opener = retrieved.base.repository.open - - try: - logs, self.output_parameters = self.parse_stdout(stdout_raw) - except Exception as exc: - self.logger.error(traceback.format_exc()) - return self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc) - - self.emit_logs(logs) - - # Scan logs for known errors - if 'ERROR_PARENT_XML_MISSING' in logs['error']: - return self.exit_codes.ERROR_PARENT_XML_MISSING - if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']: - return self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE + filenames = self.retrieved.base.repository.list_object_names() + file_opener = self.retrieved.base.repository.open # The following check should in principle always succeed since the iflag should in principle be set by the # `PpCalculation` plugin which only ever sets 0 - 4, but we check in order for the code not to except. @@ -137,7 +130,7 @@ def get_key_from_filename(filename): # Parse the file try: key = get_key_from_filename(filename) - data_parsed.append((key, parsers[iflag](data_raw))) + data_parsed.append((key, parsers[iflag](data_raw, self.units_dict[parsed_data['plot_num']]))) del data_raw except Exception: # pylint: disable=broad-except return self.exit_codes.ERROR_OUTPUT_DATAFILE_PARSE.format(filename=filename) @@ -154,83 +147,36 @@ def get_key_from_filename(filename): else: self.out('output_data_multiple', dict(data_parsed)) - self.out('output_parameters', orm.Dict(self.output_parameters)) - - def parse_stdout(self, stdout_str): - """ - Parses the output written to StdOut to retrieve basic information about the post processing - - :param stdout_str: the stdout file read in as a single string - """ - - def detect_important_message(logs, line): - """ - Detect know errors and warnings printed in the stdout - - :param logs: - :param line: a line from the stdout as a string - """ - message_map = { - 'error': { - 'xml data file not found': 'ERROR_PARENT_XML_MISSING' - }, - 'warning': { - 'Warning:': None, - 'DEPRECATED:': None, - } - } - - # Match any known error and warning messages - for marker, message in message_map['error'].items(): - if marker in line: - if message is None: - message = line - logs.error.append(message) - - for marker, message in message_map['warning'].items(): - if marker in line: - if message is None: - message = line - logs.warning.append(message) - - stdout_lines = stdout_str.splitlines() - logs = get_logging_container() - output_dict = {} + return self.exit(logs=logs) - # Check for job completion, indicating that pp.x exited without interruption, even if there was an error. - for line in stdout_lines: - if 'JOB DONE' in line: - break - else: - logs.error.append('ERROR_OUTPUT_STDOUT_INCOMPLETE') - - # Detect any issues and detect job completion - for line in stdout_lines: - detect_important_message(logs, line) + def parse_stdout(self, stdout: str, logs: AttributeDict) -> Tuple[dict, AttributeDict]: + """Parse the ``stdout`` content of a Quantum ESPRESSO ``pp.x`` calculation.""" + parsed_data = {} # Parse useful data from stdout - for line in stdout_lines: + for line in stdout.splitlines(): if 'Check:' in line: # QE < 6.5 split_line = line.split('=') if 'negative/imaginary' in line: # QE6.1-6.3 - output_dict['negative_core_charge'] = float(split_line[-1].split()[0]) - output_dict['imaginary_core_charge'] = float(split_line[-1].split()[-1]) + parsed_data['negative_core_charge'] = float(split_line[-1].split()[0]) + parsed_data['imaginary_core_charge'] = float(split_line[-1].split()[-1]) else: # QE6.4 - output_dict['negative_core_charge'] = float(split_line[1]) + parsed_data['negative_core_charge'] = float(split_line[1]) if 'Min, Max, imaginary charge:' in line: split_line = line.split() - output_dict['charge_min'] = float(split_line[-3]) - output_dict['charge_max'] = float(split_line[-2]) - output_dict['charge_img'] = float(split_line[-1]) + parsed_data['charge_min'] = float(split_line[-3]) + parsed_data['charge_max'] = float(split_line[-2]) + parsed_data['charge_img'] = float(split_line[-1]) if 'plot_num = ' in line: - output_dict['plot_num'] = int(line.split('=')[1]) + parsed_data['plot_num'] = int(line.split('=')[1]) if 'Plot Type:' in line: - output_dict['plot_type'] = line.split('Output format')[0].split(':')[-1].strip() - output_dict['output_format'] = line.split(':')[-1].strip() + parsed_data['plot_type'] = line.split('Output format')[0].split(':')[-1].strip() + parsed_data['output_format'] = line.split(':')[-1].strip() - return logs, output_dict + return parsed_data, logs - def parse_gnuplot1D(self, data_file_str): + @staticmethod + def parse_gnuplot1D(data_file_str, data_units): """Parse 1D GNUPlot formatted output. :param data_file_str: the data file read in as a single string @@ -250,7 +196,7 @@ def parse_gnuplot1D(self, data_file_str): data.append(float(split_line[1])) y_data = [data] y_names = ['data'] - y_units = [self.units_dict[self.output_parameters['plot_num']]] + y_units = [data_units] # 1D case with spherical averaging if n_col == 3: @@ -264,8 +210,7 @@ def parse_gnuplot1D(self, data_file_str): data_integral.append(float(split_line[2])) y_data = [data, data_integral] y_names = ['data', 'integrated_data'] - unit = self.units_dict[self.output_parameters['plot_num']] - y_units = [unit, unit.replace('bohr^3', 'bohr')] + y_units = [data_units, data_units.replace('bohr^3', 'bohr')] x_units = 'bohr' arraydata = orm.ArrayData() @@ -277,7 +222,8 @@ def parse_gnuplot1D(self, data_file_str): return arraydata - def parse_gnuplot_polar(self, data_file_str): + @staticmethod + def parse_gnuplot_polar(data_file_str, data_units): """Parse 2D Polar GNUPlot formatted, single column output. :param data_file_str: the data file read in as a single string @@ -288,15 +234,15 @@ def parse_gnuplot_polar(self, data_file_str): data = [] for line in data_lines: data.append(float(line)) - data_units = [self.units_dict[self.output_parameters['plot_num']]] arraydata = orm.ArrayData() arraydata.set_array('data', np.array(data)) - arraydata.set_array('data_units', np.array(data_units)) + arraydata.set_array('data_units', np.array([data_units])) return arraydata - def parse_gnuplot2D(self, data_file_str): + @staticmethod + def parse_gnuplot2D(data_file_str, data_units): """Parse 2D GNUPlot formatted output. :param data_file_str: the data file read in as a single string @@ -316,7 +262,6 @@ def parse_gnuplot2D(self, data_file_str): data.append(float(split_line[2])) coords_units = 'bohr' - data_units = self.units_dict[self.output_parameters['plot_num']] arraydata = orm.ArrayData() arraydata.set_array('xy_coordinates', np.array(coords)) arraydata.set_array('data', np.array(data)) @@ -325,7 +270,8 @@ def parse_gnuplot2D(self, data_file_str): return arraydata - def parse_gaussian(self, data_file_str): + @staticmethod + def parse_gaussian(data_file_str, data_units): """Parse Gaussian Cube formatted output. :param data_file_str: the data file read in as a single string @@ -362,7 +308,6 @@ def parse_gaussian(self, data_file_str): data_array = data_array.reshape((xdim, ydim, zdim)) coordinates_units = 'bohr' - data_units = self.units_dict[self.output_parameters['plot_num']] arraydata = orm.ArrayData() arraydata.set_array('voxel', voxel_array) diff --git a/src/aiida_quantumespresso/parsers/projwfc.py b/src/aiida_quantumespresso/parsers/projwfc.py index 8464d09ea..6b7bc2959 100644 --- a/src/aiida_quantumespresso/parsers/projwfc.py +++ b/src/aiida_quantumespresso/parsers/projwfc.py @@ -8,14 +8,10 @@ import numpy as np from aiida_quantumespresso.parsers import QEOutputParsingError -from aiida_quantumespresso.parsers.parse_raw.base import ( - convert_qe_to_aiida_structure, - convert_qe_to_kpoints, - parse_output_base, -) +from aiida_quantumespresso.parsers.parse_raw.base import convert_qe_to_aiida_structure, convert_qe_to_kpoints from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser def find_orbitals_from_statelines(out_info_dict): @@ -272,46 +268,36 @@ def spin_dependent_pdos_subparser(out_info_dict): return out_arrays -class ProjwfcParser(Parser): - """This class is the implementation of the Parser class for projwfc.x in Quantum Espresso. +class ProjwfcParser(BaseParser): + """``Parser`` implementation for the ``ProjwfcCalculation`` calculation job class. Parses projection arrays that map the projection onto each point in the bands structure, as well as pdos arrays, which map the projected density of states onto an energy axis. """ def parse(self, **kwargs): - """Parses the datafolder, stores results. + """Parse the retrieved files from a ``ProjwfcCalculation`` into output nodes.""" + # we create a dictionary the progressively accumulates more info + out_info_dict = {} + + logs = get_logging_container() + + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + out_info_dict['out_file'] = stdout.split('\n') + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + self.out('output_parameters', Dict(parsed_data)) + + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) - Retrieves projwfc output, and some basic information from the out_file, such as warnings and wall_time - """ - retrieved = self.retrieved - # Get the temporary retrieved folder try: retrieved_temporary_folder = kwargs['retrieved_temporary_folder'] except KeyError: - return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER) - - # Read standard out - try: - filename_stdout = self.node.get_option('output_filename') # or get_attribute(), but this is clearer - with retrieved.base.repository.open(filename_stdout, 'r') as fil: - out_file = fil.readlines() - except OSError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - - job_done = False - for i in range(len(out_file)): - line = out_file[-i] - if 'JOB DONE' in line: - job_done = True - break - if not job_done: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) - - # Parse basic info and warnings, and output them as output_parmeters - parsed_data, logs = parse_output_base(out_file, 'PROJWFC') - self.emit_logs(logs) - self.out('output_parameters', Dict(parsed_data)) + return self.exit(self.exit_codes.ERROR_NO_RETRIEVED_TEMPORARY_FOLDER, logs) # Parse the XML to obtain the `structure`, `kpoints` and spin-related settings from the parent calculation self.exit_code_xml = None @@ -321,9 +307,6 @@ def parse(self, **kwargs): if self.exit_code_xml: return self.exit(self.exit_code_xml) - # we create a dictionary the progressively accumulates more info - out_info_dict = {} - out_info_dict['structure'] = convert_qe_to_aiida_structure(parsed_xml['structure']) out_info_dict['kpoints'] = convert_qe_to_kpoints(parsed_xml, out_info_dict['structure']) out_info_dict['nspin'] = parsed_xml.get('number_of_spin_components') @@ -332,33 +315,32 @@ def parse(self, **kwargs): out_info_dict['spin'] = out_info_dict['nspin'] == 2 # check and read pdos_tot file - out_filenames = retrieved.base.repository.list_object_names() + out_filenames = self.retrieved.base.repository.list_object_names() try: pdostot_filename = fnmatch.filter(out_filenames, '*pdos_tot*')[0] - with retrieved.base.repository.open(pdostot_filename, 'r') as pdostot_file: + with self.retrieved.base.repository.open(pdostot_filename, 'r') as pdostot_file: # Columns: Energy(eV), Ldos, Pdos pdostot_array = np.atleast_2d(np.genfromtxt(pdostot_file)) energy = pdostot_array[:, 0] dos = pdostot_array[:, 1] except (OSError, KeyError): - return self.exit(self.exit_codes.ERROR_READING_PDOSTOT_FILE) + return self.exit(self.exit_codes.ERROR_READING_PDOSTOT_FILE, logs) # check and read all of the individual pdos_atm files pdos_atm_filenames = fnmatch.filter(out_filenames, '*pdos_atm*') pdos_atm_array_dict = {} for name in pdos_atm_filenames: - with retrieved.base.repository.open(name, 'r') as pdosatm_file: + with self.retrieved.base.repository.open(name, 'r') as pdosatm_file: pdos_atm_array_dict[name] = np.atleast_2d(np.genfromtxt(pdosatm_file)) # finding the bands and projections - out_info_dict['out_file'] = out_file out_info_dict['energy'] = energy out_info_dict['pdos_atm_array_dict'] = pdos_atm_array_dict try: new_nodes_list = self._parse_bands_and_projections(out_info_dict) except QEOutputParsingError as err: self.logger.error(f'Error parsing bands and projections: {err}') - return self.exit(self.exit_codes.ERROR_PARSING_PROJECTIONS) + return self.exit(self.exit_codes.ERROR_PARSING_PROJECTIONS, logs) for linkname, node in new_nodes_list: self.out(linkname, node) @@ -367,6 +349,8 @@ def parse(self, **kwargs): Dos_out.set_y(dos, 'Dos', 'states/eV') self.out('Dos', Dos_out) + return self.exit(logs=logs) + def _parse_xml(self, retrieved_temporary_folder): """Parse the XML file. diff --git a/src/aiida_quantumespresso/parsers/pw.py b/src/aiida_quantumespresso/parsers/pw.py index cd31b9e8f..aed890103 100644 --- a/src/aiida_quantumespresso/parsers/pw.py +++ b/src/aiida_quantumespresso/parsers/pw.py @@ -10,11 +10,11 @@ from aiida_quantumespresso.calculations.pw import PwCalculation from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser from .parse_raw.pw import reduce_symmetries -class PwParser(Parser): +class PwParser(BaseParser): """`Parser` implementation for the `PwCalculation` calculation job class.""" def parse(self, **kwargs): diff --git a/src/aiida_quantumespresso/parsers/pw2gw.py b/src/aiida_quantumespresso/parsers/pw2gw.py index fce5f7a37..099bc22fb 100644 --- a/src/aiida_quantumespresso/parsers/pw2gw.py +++ b/src/aiida_quantumespresso/parsers/pw2gw.py @@ -2,58 +2,60 @@ """`Parser` implementation for the `Pw2gwCalculation` calculation job class.""" import io -from aiida import orm +from aiida.orm import ArrayData, Dict import numpy as np -from aiida_quantumespresso.calculations.pw2gw import Pw2gwCalculation +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class Pw2gwParser(Parser): - """`Parser` implementation for the `Pw2gwCalculation` calculation job class.""" +class Pw2gwParser(BaseParser): + """``Parser`` implementation for the ``Pw2gwCalculation`` calculation job class.""" def parse(self, **kwargs): - """Parse the retrieved files of a completed `Pw2gwCalculation` into output nodes. + """Parse the retrieved files of a completed ``Pw2gwCalculation`` into output nodes. Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files - permanently in the repository. The second required node is a filepath under the key `retrieved_temporary_files` - which should contain the temporary retrieved files. + permanently in the repository. The second required node is a filepath under the key + ``retrieved_temporary_files`` which should contain the temporary retrieved files. """ - self.exit_code_stdout = None - self.exit_code_eps = None + logs = get_logging_container() - # Parse the pw2gw stout file - data, logs_stdout = self.parse_stdout() + _, parsed_data, logs = self.parse_stdout_from_retrieved(logs) - self.emit_logs(logs_stdout) + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) - if self.exit_code_stdout: - return self.exit(self.exit_code_stdout) + self.out('output_parameters', Dict(dict=parsed_data)) - self.out('output_parameters', orm.Dict(data)) + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) - # Parse the pw2g outputfiles + self.exit_code_eps = None eps = self.parse_eps_files() if self.exit_code_eps: - return self.exit(self.exit_code_eps) + return self.exit(self.exit_code_eps, logs) self.out('eps', eps) + return self.exit(logs=logs) + def parse_eps_files(self): - """Parse the eps*.dat files produced by pw2gw.x and store them in the `eps` node.""" + """Parse the ``eps*.dat`` files produced by ``pw2gw.x``.""" retrieved = self.retrieved retrieved_names = retrieved.base.repository.list_object_names() - files = Pw2gwCalculation._internal_retrieve_list + files = self.node.process_class._internal_retrieve_list if any(_ not in retrieved_names for _ in files): self.exit_code_eps = self.exit_codes.ERROR_OUTPUT_FILES return energy = None - eps = orm.ArrayData() - for name in Pw2gwCalculation._internal_retrieve_list: + eps = ArrayData() + for name in self.node.process_class._internal_retrieve_list: content = retrieved.base.repository.get_object_content(name) base = name.split('.')[0] @@ -77,37 +79,3 @@ def parse_eps_files(self): eps.set_array(base, y) return eps - - def parse_stdout(self): - """Parse the stdout file of pw2gw to build the `output_parameters` node.""" - from aiida_quantumespresso.parsers.parse_raw.pw2gw import parse_stdout - from aiida_quantumespresso.utils.mapping import get_logging_container - - logs = get_logging_container() - parsed_data = {} - - filename_stdout = self.node.base.attributes.get('output_filename') - - if filename_stdout not in self.retrieved.base.repository.list_object_names(): - self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_MISSING - return parsed_data, logs - - try: - stdout = self.retrieved.base.repository.get_object_content(filename_stdout) - except IOError: - self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_READ - return parsed_data, logs - - try: - parsed_data, logs = parse_stdout(stdout) - except Exception as exc: - import traceback - traceback.print_exc() - self.exit_code_stdout = self.exit_codes.ERROR_UNEXPECTED_PARSER_EXCEPTION.format(exception=exc) - - # If the stdout was incomplete, most likely the job was interrupted before it could cleanly finish, so the - # output files are most likely corrupt and cannot be restarted from - if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs['error']: - self.exit_code_stdout = self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE - - return parsed_data, logs diff --git a/src/aiida_quantumespresso/parsers/pw2wannier90.py b/src/aiida_quantumespresso/parsers/pw2wannier90.py index 8ee20fff1..488ab6120 100644 --- a/src/aiida_quantumespresso/parsers/pw2wannier90.py +++ b/src/aiida_quantumespresso/parsers/pw2wannier90.py @@ -1,33 +1,31 @@ # -*- coding: utf-8 -*- from aiida.orm import Dict -from aiida_quantumespresso.parsers.parse_raw.base import parse_output_base +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class Pw2wannier90Parser(Parser): - """`Parser` implementation for the `Pw2wannierCalculation` calculation job class.""" +class Pw2wannier90Parser(BaseParser): + """``Parser`` implementation for the ``Pw2wannierCalculation`` calculation job class.""" def parse(self, **kwargs): - """Parse the retrieved files of a completed `Pw2wannierCalculation` into output nodes. + """Parse the retrieved files of a completed ``Pw2wannierCalculation`` into output nodes. - Two nodes that are expected are the default 'retrieved' `FolderData` node which will store the retrieved files + Two nodes that are expected are the default 'retrieved' ``FolderData`` node which will store the retrieved files permanently in the repository. """ - try: - filename_stdout = self.node.get_option('output_filename') # or get_attribute(), but this is clearer - with self.retrieved.base.repository.open(filename_stdout, 'r') as fil: - out_file = fil.read() - except OSError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + logs = get_logging_container() - parsed_data, logs = parse_output_base(out_file, codename='PW2WANNIER') - self.emit_logs(logs) + _, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) self.out('output_parameters', Dict(parsed_data)) - if 'ERROR_OUTPUT_STDOUT_INCOMPLETE' in logs.error: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) - elif logs.error: - return self.exit(self.exit_codes.ERROR_GENERIC_QE_ERROR) + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) + + return self.exit(logs=logs) diff --git a/src/aiida_quantumespresso/parsers/q2r.py b/src/aiida_quantumespresso/parsers/q2r.py index 3d9062adc..6c93d83cb 100644 --- a/src/aiida_quantumespresso/parsers/q2r.py +++ b/src/aiida_quantumespresso/parsers/q2r.py @@ -1,29 +1,36 @@ # -*- coding: utf-8 -*- -from aiida_quantumespresso.calculations.q2r import Q2rCalculation +from aiida.orm import Dict + from aiida_quantumespresso.data.force_constants import ForceConstantsData +from aiida_quantumespresso.utils.mapping import get_logging_container -from .base import Parser +from .base import BaseParser -class Q2rParser(Parser): - """Parser implementation for the Q2rCalculation.""" +class Q2rParser(BaseParser): + """``Parser`` implementation for the ``Q2rCalculation`` calculation job class.""" def parse(self, **kwargs): - """Parse the retrieved files from a `Q2rCalculation`.""" - retrieved = self.retrieved - filename_stdout = self.node.get_option('output_filename') - filename_force_constants = Q2rCalculation._FORCE_CONSTANTS_NAME + """Parse the retrieved files of a ``Q2rCalculation`` into output nodes.""" + logs = get_logging_container() + + _, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + self.out('output_parameters', Dict(parsed_data)) - if filename_stdout not in retrieved.base.repository.list_object_names(): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) + if 'ERROR_OUTPUT_STDOUT_INCOMPLETE'in logs.error: + return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE, logs) - if filename_force_constants not in retrieved.base.repository.list_object_names(): - return self.exit(self.exit_codes.ERROR_READING_FORCE_CONSTANTS_FILE) + filename_force_constants = self.node.process_class._FORCE_CONSTANTS_NAME - if 'JOB DONE' not in retrieved.base.repository.get_object_content(filename_stdout): - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) + if filename_force_constants not in self.retrieved.base.repository.list_object_names(): + return self.exit(self.exit_codes.ERROR_READING_FORCE_CONSTANTS_FILE, logs) - with retrieved.base.repository.open(filename_force_constants, 'rb') as handle: - self.out('force_constants', ForceConstantsData(file=handle, filename=filename_force_constants)) + with self.retrieved.base.repository.open(filename_force_constants, 'rb') as handle: + self.out('force_constants', ForceConstantsData(file=handle)) - return + return self.exit(logs=logs) diff --git a/src/aiida_quantumespresso/parsers/xspectra.py b/src/aiida_quantumespresso/parsers/xspectra.py index 1da9c55d2..7ed9e39dd 100644 --- a/src/aiida_quantumespresso/parsers/xspectra.py +++ b/src/aiida_quantumespresso/parsers/xspectra.py @@ -1,57 +1,70 @@ # -*- coding: utf-8 -*- import re +from typing import Tuple +from aiida.common import AttributeDict from aiida.orm import Dict, XyData import numpy as np from aiida_quantumespresso.parsers import QEOutputParsingError -from aiida_quantumespresso.parsers.base import Parser +from aiida_quantumespresso.parsers.base import BaseParser +from aiida_quantumespresso.utils.mapping import get_logging_container -class XspectraParser(Parser): - """ Parser for the XSpectraCalculation calcjob plugin """ +class XspectraParser(BaseParser): + """Parser for the ``XSpectraCalculation`` calcjob plugin.""" + + class_error_map = { + 'Wrong xiabs!!!': 'ERROR_OUTPUT_ABSORBING_SPECIES_WRONG', + 'xiabs < 1 or xiabs > ntyp': 'ERROR_OUTPUT_ABSORBING_SPECIES_ZERO', + 'Calculation not finished': 'ERROR_OUT_OF_WALLTIME', + } + success_string='END JOB' def parse(self, **kwargs): """Parse the contents of the output files stored in the `retrieved` output node.""" - from aiida.plugins import DataFactory + logs = get_logging_container() - retrieved = self.retrieved - try: - filename_stdout = self.node.get_option('output_filename') - with retrieved.base.repository.open(filename_stdout, 'r') as fil: - out_file = fil.readlines() - except OSError: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_READ) - - # Check the stdout for obvious errors - job_done = False - for line in out_file: - if 'Wrong xiabs!!!' in line: - return self.exit(self.exit_codes.ERROR_OUTPUT_ABSORBING_SPECIES_WRONG) - if 'xiabs < 1 or xiabs > ntyp' in line: - return self.exit(self.exit_codes.ERROR_OUTPUT_ABSORBING_SPECIES_ZERO) - if 'Calculation not finished' in line: - return self.exit(self.exit_codes.ERROR_OUT_OF_WALLTIME) - if 'END JOB' in line: - job_done = True - break - if not job_done: - return self.exit(self.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE) + stdout, parsed_data, logs = self.parse_stdout_from_retrieved(logs) + + base_exit_code = self.check_base_errors(logs) + if base_exit_code: + return self.exit(base_exit_code, logs) + + parsed_xspectra, logs = self.parse_stdout(stdout, logs) + parsed_data.update(parsed_xspectra) + + # Parse some additional info which the stdout does not reliably report + parameters = self.node.inputs.parameters.base.attributes.get('INPUT_XSPECTRA', {}) + + xepsilon_defaults = { + '1': 0, + '2': 0, + '3': 1, + } + raw_xepsilon = [parameters.get(f'xepsilon({n})', xepsilon_defaults[n]) for n in ['1', '2', '3']] + parsed_data['xepsilon'] = [float(n) for n in raw_xepsilon] + parsed_data['xcoordcrys'] = parameters.get('xcoordcrys', True) + parsed_data['xonly_plot'] = parameters.get('xonly_plot', False) + + self.out('output_parameters', Dict(parsed_data)) + + for exit_code in list(self.class_error_map.values()) + ['ERROR_OUTPUT_STDOUT_INCOMPLETE']: + if exit_code in logs.error: + return self.exit(self.exit_codes.get(exit_code), logs) # Check that the spectra data file exists and is readable try: - with retrieved.base.repository.open(self.node.process_class._Spectrum_FILENAME, 'r') as fil: + with self.retrieved.base.repository.open(self.node.process_class._Spectrum_FILENAME, 'r') as fil: xspectra_file = fil.readlines() except OSError: - return self.exit(self.exit_codes.ERROR_READING_SPECTRUM_FILE) + return self.exit(self.exit_codes.ERROR_READING_SPECTRUM_FILE, logs) # Check that the data in the spectra file can be read by NumPy try: - xspectra_data = np.genfromtxt(xspectra_file) + _ = np.genfromtxt(xspectra_file) except ValueError: - return self.exit(self.exit_codes.ERROR_READING_SPECTRUM_FILE_DATA) - - # end of initial checks + return self.exit(self.exit_codes.ERROR_READING_SPECTRUM_FILE_DATA, logs) array_names = [[], []] array_units = [[], []] @@ -62,7 +75,7 @@ def parse(self, **kwargs): array_names[1] = ['energy', 'sigma_tot', 'sigma_up', 'sigma_down'] # for spin-polarised calculations array_units[1] = ['eV', 'n/a', 'n/a', 'n/a'] - array_data, spin = parse_raw_xspectra(xspectra_file, array_names, array_units) + array_data, spin = self.parse_raw_xspectra(xspectra_file, array_names, array_units) energy_units = 'eV' xy_data = XyData() @@ -86,184 +99,139 @@ def parse(self, **kwargs): xy_data.set_y(y_arrays, y_names, y_units) - parsed_data, logs = parse_stdout_xspectra(filecontent=out_file, codename='XSpectra') - - # Parse some additional info which the stdout does not reliably report - parameters = self.node.inputs.parameters.base.attributes.get('INPUT_XSPECTRA', {}) - - xepsilon_defaults = { - '1': 0, - '2': 0, - '3': 1, - } - raw_xepsilon = [parameters.get(f'xepsilon({n})', xepsilon_defaults[n]) for n in ['1', '2', '3']] - parsed_data['xepsilon'] = [float(n) for n in raw_xepsilon] - parsed_data['xcoordcrys'] = parameters.get('xcoordcrys', True) - parsed_data['xonly_plot'] = parameters.get('xonly_plot', False) - self.emit_logs(logs) - self.out('spectra', xy_data) - self.out('output_parameters', Dict(dict=parsed_data)) - -def parse_raw_xspectra(xspectra_file, array_names, array_units): - """Parse the content of the output spectrum. - - This function takes as input the xspectra_file as a list of filelines along with information on how to give labels - and units to the parsed data. - - :param xspectra_file: xspectra file lines in the form of a list - :type xspectra_file: list - :param array_names: list of all array names. - :type array_names: list - :param array_units: list of all array units. - :type array_units: list - - :return array_data: narray, a dictionary for ArrayData type, which - contains all parsed xspectra output along with labels and units - """ - - xspectra_header = xspectra_file[:4] - xspectra_data = np.genfromtxt(xspectra_file) - if len(xspectra_data) == 0: - raise QEOutputParsingError('XSpectra file is empty.') - if np.isnan(xspectra_data).any(): - raise QEOutputParsingError('XSpectra file contains non-numeric elements.') - - if len(xspectra_data[0]) == 2: - array_names = array_names[0] - array_units = array_units[0] - spin = False - elif len(xspectra_data[0]) == 4: - array_names = array_names[1] - array_units = array_units[1] - spin = True - else: - raise QEOutputParsingError('XSpectra data file in unsuitable format for the parser') - - i = 0 - array_data = {} - array_data['header'] = np.array(xspectra_header) - while i < len(array_names): - array_data[array_names[i]] = xspectra_data[:, i] - array_data[array_names[i] + '_units'] = np.array(array_units[i]) - i += 1 - return array_data, spin - -def parse_stdout_xspectra(filecontent, codename=None, message_map=None): - """Parses the output file of an XSpectra calculation, checking for - basic content like END JOB, errors with %%%%, and the core level energy - and the energy zero of the spectrum. - - :param filecontent: a string with the output file content - :param codename: the string printed both in the header and near the - walltime. If passed, a few more things are parsed (e.g. - code version, walltime, ...) - :returns: tuple of two dictionaries, with the parsed data and log - messages, respectively - """ - from aiida_quantumespresso.utils.mapping import get_logging_container - - from .parse_raw.base import convert_qe_time_to_sec - - keys = ['error', 'warning'] - - if message_map is not None and (not isinstance(message_map, dict) or any(key not in message_map for key in keys)): - raise RuntimeError(f'invalid format `message_map`: should be dictionary with two keys {keys}') - - logs = get_logging_container() - parsed_data = {} - - lines = filecontent if isinstance(filecontent, list) else filecontent.split('\n') - - # Parse the necessary information for data plotting: core level energy of the - # absorbing atom and the energy zero of the spectrum (typically the Fermi level) - for line in lines: - if 'From SCF save directory' in line: - if '(spin polarized work)' in line: - spin = True - else: - spin = False - parsed_data['lsda'] = spin - if 'ehomo [eV]' in line: - if spin: - homo_energy = line.split(':')[-2].split('(')[0].strip() - else: - homo_energy = line.split(':')[-1].split('(')[0].strip() - homo_energy_units = line.split('[')[1].split(':')[0].replace(']', '') - parsed_data['highest_occupied_level'] = homo_energy - parsed_data['highest_occupied_level_units'] = homo_energy_units - if 'elumo [eV]' in line: - if spin: - lumo_energy = line.split(':')[-2].split('(')[0].strip() - else: - lumo_energy = line.split(':')[-1].split('(')[0].strip() - lumo_energy_units = line.split('[')[1].split(':')[0].replace(']', '') - parsed_data['lowest_unoccupied_level'] = lumo_energy - parsed_data['lowest_unoccupied_level_units'] = lumo_energy_units - parsed_data['lumo_found'] = True - elif 'No LUMO value' in line: - parsed_data['lumo_found'] = False - if 'ef [eV]' in line: - ef_energy = line.split(':')[-1].split('(')[0].strip() - ef_energy_units = line.split('[')[1].split(':')[0].replace(']', '') - parsed_data['fermi_energy'] = ef_energy - parsed_data['fermi_energy_units'] = ef_energy_units - - # parse per-process dynamical RAM estimates - if 'Estimated max dynamical RAM per process' in line: - value = line.split('>')[-1] - match = re.match(r'\s+([+-]?\d+(\.\d*)?|\.\d+([eE][+-]?\d+)?)\s*(Mb|MB|GB)', value) - if match: - try: - parsed_data['estimated_ram_per_process'] = float(match.group(1)) - parsed_data['estimated_ram_per_process_units'] = match.group(4) - except (IndexError, ValueError): - pass - - # parse total dynamical RAM estimates - if 'Estimated total dynamical RAM' in line: - value = line.split('>')[-1] - match = re.match(r'\s+([+-]?\d+(\.\d*)?|\.\d+([eE][+-]?\d+)?)\s*(Mb|MB|GB)', value) - if match: - try: - parsed_data['estimated_ram_total'] = float(match.group(1)) - parsed_data['estimated_ram_total_units'] = match.group(4) - except (IndexError, ValueError): - pass - - if 'Core level energy' in line: - core_energy_line = line - parsed_data['core_level_energy'] = core_energy_line.split('[')[1].split(':')[1].strip() - parsed_data['core_level_energy_units'] = core_energy_line.split('[')[1].split(':')[0].replace(']', '') - if 'energy-zero' in line: - energy_zero_line = line - parsed_data['energy_zero'] = energy_zero_line.split('[')[1].split(':')[1].strip() - parsed_data['energy_zero_units'] = energy_zero_line.split('[')[1].split(':')[0].replace(']', '') - - if codename is not None: - codestring = f'Program {codename}' - - for line_number, line in enumerate(lines): - - if codestring in line and 'starts on' in line: - parsed_data['code_version'] = line.split(codestring)[1].split('starts on')[0].strip() + return self.exit(logs=logs) + + @staticmethod + def parse_stdout(stdout: str, logs: AttributeDict) -> Tuple[dict, AttributeDict]: + """Parse the ``stdout`` of XSpectra for the core level energy and energy zero of the spectrum.""" + from .parse_raw.base import convert_qe_time_to_sec + + parsed_data = {} + + # Parse the necessary information for data plotting: core level energy of the + # absorbing atom and the energy zero of the spectrum (typically the Fermi level) + for line in stdout.split('\n'): + if 'From SCF save directory' in line: + if '(spin polarized work)' in line: + spin = True + else: + spin = False + parsed_data['lsda'] = spin + if 'ehomo [eV]' in line: + if spin: + homo_energy = line.split(':')[-2].split('(')[0].strip() + else: + homo_energy = line.split(':')[-1].split('(')[0].strip() + homo_energy_units = line.split('[')[1].split(':')[0].replace(']', '') + parsed_data['highest_occupied_level'] = homo_energy + parsed_data['highest_occupied_level_units'] = homo_energy_units + if 'elumo [eV]' in line: + if spin: + lumo_energy = line.split(':')[-2].split('(')[0].strip() + else: + lumo_energy = line.split(':')[-1].split('(')[0].strip() + lumo_energy_units = line.split('[')[1].split(':')[0].replace(']', '') + parsed_data['lowest_unoccupied_level'] = lumo_energy + parsed_data['lowest_unoccupied_level_units'] = lumo_energy_units + parsed_data['lumo_found'] = True + elif 'No LUMO value' in line: + parsed_data['lumo_found'] = False + if 'ef [eV]' in line: + ef_energy = line.split(':')[-1].split('(')[0].strip() + ef_energy_units = line.split('[')[1].split(':')[0].replace(']', '') + parsed_data['fermi_energy'] = ef_energy + parsed_data['fermi_energy_units'] = ef_energy_units + + # parse per-process dynamical RAM estimates + if 'Estimated max dynamical RAM per process' in line: + value = line.split('>')[-1] + match = re.match(r'\s+([+-]?\d+(\.\d*)?|\.\d+([eE][+-]?\d+)?)\s*(Mb|MB|GB)', value) + if match: + try: + parsed_data['estimated_ram_per_process'] = float(match.group(1)) + parsed_data['estimated_ram_per_process_units'] = match.group(4) + except (IndexError, ValueError): + pass + + # parse total dynamical RAM estimates + if 'Estimated total dynamical RAM' in line: + value = line.split('>')[-1] + match = re.match(r'\s+([+-]?\d+(\.\d*)?|\.\d+([eE][+-]?\d+)?)\s*(Mb|MB|GB)', value) + if match: + try: + parsed_data['estimated_ram_total'] = float(match.group(1)) + parsed_data['estimated_ram_total_units'] = match.group(4) + except (IndexError, ValueError): + pass + + if 'Core level energy' in line: + core_energy_line = line + parsed_data['core_level_energy'] = core_energy_line.split('[')[1].split(':')[1].strip() + parsed_data['core_level_energy_units'] = core_energy_line.split('[')[1].split(':')[0].replace(']', '') + if 'energy-zero' in line: + energy_zero_line = line + parsed_data['energy_zero'] = energy_zero_line.split('[')[1].split(':')[1].strip() + parsed_data['energy_zero_units'] = energy_zero_line.split('[')[1].split(':')[0].replace(']', '') # Parse the walltime # XSpectra does not appear next to the timing data, so we must find 'xanes' instead. if 'xanes' in line and 'WALL' in line: - try: - time = line.split('CPU')[1].split('WALL')[0].strip() - parsed_data['wall_time'] = time - except (ValueError, IndexError): - logs.warnings.append('ERROR_PARSING_WALLTIME') - try: - parsed_data['wall_time_seconds'] = convert_qe_time_to_sec(time) - except ValueError: - logs.warnings.append('ERROR_CONVERTING_WALLTIME_TO_SECONDS') + try: + time = line.split('CPU')[1].split('WALL')[0].strip() + parsed_data['wall_time'] = time + except (ValueError, IndexError): + break + try: + parsed_data['wall_time_seconds'] = convert_qe_time_to_sec(time) + except ValueError: + logs.warnings.append('Unable to convert wall time from `stdout` to seconds.') + + return parsed_data, logs + + @staticmethod + def parse_raw_xspectra(xspectra_file, array_names, array_units): + """Parse the content of the output spectrum. + + This function takes as input the xspectra_file as a list of filelines along with information on how to give + labels and units to the parsed data. + + :param xspectra_file: xspectra file lines in the form of a list + :type xspectra_file: list + :param array_names: list of all array names. + :type array_names: list + :param array_units: list of all array units. + :type array_units: list + + :return array_data: narray, a dictionary for ArrayData type, which + contains all parsed xspectra output along with labels and units + """ + xspectra_header = xspectra_file[:4] + xspectra_data = np.genfromtxt(xspectra_file) + + if len(xspectra_data) == 0: + raise QEOutputParsingError('XSpectra file is empty.') + if np.isnan(xspectra_data).any(): + raise QEOutputParsingError('XSpectra file contains non-numeric elements.') + + if len(xspectra_data[0]) == 2: + array_names = array_names[0] + array_units = array_units[0] + spin = False + elif len(xspectra_data[0]) == 4: + array_names = array_names[1] + array_units = array_units[1] + spin = True + else: + raise QEOutputParsingError('XSpectra data file in unsuitable format for the parser') - # Parse an error message with optional mapping of the message - if '%%%%%%%%%%%%%%' in line: - parse_output_error(lines, line_number, logs, message_map) + i = 0 + array_data = {} + array_data['header'] = np.array(xspectra_header) + while i < len(array_names): + array_data[array_names[i]] = xspectra_data[:, i] + array_data[array_names[i] + '_units'] = np.array(array_units[i]) + i += 1 - return parsed_data, logs + return array_data, spin diff --git a/tests/parsers/test_cp/test_cp_default_6_6_autopilot_.yml b/tests/parsers/test_cp/test_cp_default_6_6_autopilot_.yml index d7b7dfd62..5e14109c0 100644 --- a/tests/parsers/test_cp/test_cp_default_6_6_autopilot_.yml +++ b/tests/parsers/test_cp/test_cp_default_6_6_autopilot_.yml @@ -1,6 +1,7 @@ parameters: beta_real_space: false charge_density: ./charge-density.dat + code_version: '6.6' conjugate_gradient: true constraint_mag: 0 creator_name: cp @@ -61,7 +62,7 @@ parameters: symmetries: [] symmetries_units: crystal time_reversal_flag: true - wall_time: ' 30.06s ' + wall_time_seconds: 30.06 warnings: [] wfc_cutoff: -27.2113834506 wfc_cutoff_units: eV diff --git a/tests/parsers/test_cp/test_cp_default_6_6_cgstep_.yml b/tests/parsers/test_cp/test_cp_default_6_6_cgstep_.yml index dbcd015fc..5eca2de5e 100644 --- a/tests/parsers/test_cp/test_cp_default_6_6_cgstep_.yml +++ b/tests/parsers/test_cp/test_cp_default_6_6_cgstep_.yml @@ -1,6 +1,7 @@ parameters: beta_real_space: false charge_density: ./charge-density.dat + code_version: '6.6' conjugate_gradient: true constraint_mag: 0 creator_name: cp @@ -60,7 +61,7 @@ parameters: symmetries: [] symmetries_units: crystal time_reversal_flag: true - wall_time: ' 12.98s ' + wall_time_seconds: 12.98 warnings: [] wfc_cutoff: -27.2113834506 wfc_cutoff_units: eV diff --git a/tests/parsers/test_cp/test_cp_default_6_6_cgsteps_.yml b/tests/parsers/test_cp/test_cp_default_6_6_cgsteps_.yml index d53abbc7e..50e509b27 100644 --- a/tests/parsers/test_cp/test_cp_default_6_6_cgsteps_.yml +++ b/tests/parsers/test_cp/test_cp_default_6_6_cgsteps_.yml @@ -1,6 +1,7 @@ parameters: beta_real_space: false charge_density: ./charge-density.dat + code_version: '6.6' conjugate_gradient: true constraint_mag: 0 creator_name: cp @@ -61,7 +62,7 @@ parameters: symmetries: [] symmetries_units: crystal time_reversal_flag: true - wall_time: ' 1m 4.64s ' + wall_time_seconds: 64.64 warnings: [] wfc_cutoff: -27.2113834506 wfc_cutoff_units: eV diff --git a/tests/parsers/test_cp/test_cp_default_6_6_verlet_.yml b/tests/parsers/test_cp/test_cp_default_6_6_verlet_.yml index bea944894..2602d19a5 100644 --- a/tests/parsers/test_cp/test_cp_default_6_6_verlet_.yml +++ b/tests/parsers/test_cp/test_cp_default_6_6_verlet_.yml @@ -1,6 +1,7 @@ parameters: beta_real_space: false charge_density: ./charge-density.dat + code_version: '6.6' constraint_mag: 0 creator_name: cp creator_version: '6.6' @@ -78,7 +79,7 @@ parameters: - 0.0 vnhp: - 0.0 - wall_time: ' 5.84s ' + wall_time_seconds: 5.84 warnings: [] wfc_cutoff: -27.2113834506 wfc_cutoff_units: eV diff --git a/tests/parsers/test_cp/test_cp_default_default_.yml b/tests/parsers/test_cp/test_cp_default_default_.yml index 5043530f1..29fcddf26 100644 --- a/tests/parsers/test_cp/test_cp_default_default_.yml +++ b/tests/parsers/test_cp/test_cp_default_default_.yml @@ -69,6 +69,7 @@ parameters: - - 0.0 - 0.0 - 0.0 + code_version: '6.0' creator_name: cp creator_version: '6.0' dft_exchange_correlation: PBE @@ -185,7 +186,7 @@ parameters: - 0.0 vnhp: - 0.0 - wall_time: ' 5.64s ' + wall_time_seconds: 5.64 warnings: [] wfc_cutoff: 408.170751759 wfc_cutoff_units: eV diff --git a/tests/parsers/test_dos/test_dos_default.yml b/tests/parsers/test_dos/test_dos_default.yml index fa7576bec..5fae916f8 100644 --- a/tests/parsers/test_dos/test_dos_default.yml +++ b/tests/parsers/test_dos/test_dos_default.yml @@ -10,6 +10,5 @@ dos: - states/eV - states/eV parameters: - code_version: v.6.4.1 - wall_time: 0.41s + code_version: 6.4.1 wall_time_seconds: 0.41 diff --git a/tests/parsers/test_matdyn/test_matdyn_default.yml b/tests/parsers/test_matdyn/test_matdyn_default.yml index 30b9f6f61..378982250 100644 --- a/tests/parsers/test_matdyn/test_matdyn_default.yml +++ b/tests/parsers/test_matdyn/test_matdyn_default.yml @@ -1,5 +1,6 @@ output_parameters: - warnings: [] + code_version: '6.1' + wall_time_seconds: 0.0 output_phonon_bands: array|bands: - 1 diff --git a/tests/parsers/test_neb.py b/tests/parsers/test_neb.py index 320dce0a3..4053a4c55 100644 --- a/tests/parsers/test_neb.py +++ b/tests/parsers/test_neb.py @@ -57,7 +57,7 @@ def test_neb_default(fixture_localhost, generate_calc_job_node, generate_parser, assert calcfunction.is_finished, calcfunction.exception assert calcfunction.is_finished_ok, calcfunction.exit_message - assert not orm.Log.collection.get_logs_for(node) + assert not [log for log in orm.Log.objects.get_logs_for(node) if log.levelname == 'ERROR'] assert 'output_parameters' in results assert 'output_mep' in results assert 'output_trajectory' in results @@ -89,7 +89,7 @@ def test_neb_all_iterations( assert calcfunction.is_finished, calcfunction.exception assert calcfunction.is_finished_ok, calcfunction.exit_message - assert not orm.Log.collection.get_logs_for(node) + assert not [log for log in orm.Log.objects.get_logs_for(node) if log.levelname == 'ERROR'] assert 'output_parameters' in results assert 'output_mep' in results assert 'output_trajectory' in results diff --git a/tests/parsers/test_neb/test_neb_default.yml b/tests/parsers/test_neb/test_neb_default.yml index 7d1e6897f..ed39ac0aa 100644 --- a/tests/parsers/test_neb/test_neb_default.yml +++ b/tests/parsers/test_neb/test_neb_default.yml @@ -25,6 +25,7 @@ parameters: ci_scheme: auto climbing_image_auto: - 1 + code_version: 6.4.1 converged: - true - 13 @@ -394,7 +395,6 @@ parameters: suggested_k_max_au: 0.1542 suggested_k_min_au: 0.1028 use_freezing: false - wall_time: 11.44s wall_time_seconds: 11.44 warnings: - 'error: ERROR_OUTPUT_STDOUT_INCOMPLETE' diff --git a/tests/parsers/test_open_grid/test_open_grid_default.yml b/tests/parsers/test_open_grid/test_open_grid_default.yml index 48bcf576f..c969a1a60 100644 --- a/tests/parsers/test_open_grid/test_open_grid_default.yml +++ b/tests/parsers/test_open_grid/test_open_grid_default.yml @@ -14,6 +14,5 @@ kpoints_mesh: - 0.0 - 0.0 output_parameters: - code_version: v.6.8 - wall_time: 0.53s + code_version: '6.8' wall_time_seconds: 0.53 diff --git a/tests/parsers/test_ph.py b/tests/parsers/test_ph.py index 1e693c83d..fcf13e60b 100644 --- a/tests/parsers/test_ph.py +++ b/tests/parsers/test_ph.py @@ -22,7 +22,7 @@ def test_ph_default(test_name, fixture_localhost, generate_calc_job_node, genera assert calcfunction.is_finished, calcfunction.exception assert calcfunction.is_finished_ok, calcfunction.exit_message - assert not orm.Log.collection.get_logs_for(node) + assert not [log for log in orm.Log.objects.get_logs_for(node) if log.levelname == 'ERROR'] assert 'output_parameters' in results data_regression.check(results['output_parameters'].get_dict()) diff --git a/tests/parsers/test_ph/test_ph_default_default_.yml b/tests/parsers/test_ph/test_ph_default_default_.yml index 2b380070a..4797a063f 100644 --- a/tests/parsers/test_ph/test_ph_default_default_.yml +++ b/tests/parsers/test_ph/test_ph_default_default_.yml @@ -1,3 +1,4 @@ +code_version: '6.1' dielectric_constant: - - 57.36256076907993 - -2.842170943040401e-14 diff --git a/tests/parsers/test_ph/test_ph_default_no_modes_printed_.yml b/tests/parsers/test_ph/test_ph_default_no_modes_printed_.yml index 8d9004dc7..6e864994b 100644 --- a/tests/parsers/test_ph/test_ph_default_no_modes_printed_.yml +++ b/tests/parsers/test_ph/test_ph_default_no_modes_printed_.yml @@ -1,3 +1,4 @@ +code_version: '6.8' dynamical_matrix_1: frequencies: - 147.433165 diff --git a/tests/parsers/test_ph/test_ph_default_single_qpoint_.yml b/tests/parsers/test_ph/test_ph_default_single_qpoint_.yml index 6ca78cfb9..f9311123f 100644 --- a/tests/parsers/test_ph/test_ph_default_single_qpoint_.yml +++ b/tests/parsers/test_ph/test_ph_default_single_qpoint_.yml @@ -1,3 +1,4 @@ +code_version: '7.0' dynamical_matrix_1: frequencies: - -3.206051 diff --git a/tests/parsers/test_ph/test_ph_not_converged.yml b/tests/parsers/test_ph/test_ph_not_converged.yml index 642ccc811..01ed77d4d 100644 --- a/tests/parsers/test_ph/test_ph_not_converged.yml +++ b/tests/parsers/test_ph/test_ph_not_converged.yml @@ -1,3 +1,4 @@ +code_version: '6.1' number_of_atoms: 2 number_of_irr_representations_for_each_q: - 2 diff --git a/tests/parsers/test_ph/test_ph_out_of_walltime.yml b/tests/parsers/test_ph/test_ph_out_of_walltime.yml index ea186eeff..fa5fe52f6 100644 --- a/tests/parsers/test_ph/test_ph_out_of_walltime.yml +++ b/tests/parsers/test_ph/test_ph_out_of_walltime.yml @@ -1,3 +1,4 @@ +code_version: 6.3MaX number_of_atoms: 2 number_of_irr_representations_for_each_q: - 2 diff --git a/tests/parsers/test_pw2gw/test_pw2gw_default_data.yml b/tests/parsers/test_pw2gw/test_pw2gw_default_data.yml index 886f2e4e1..6dc6f8267 100644 --- a/tests/parsers/test_pw2gw/test_pw2gw_default_data.yml +++ b/tests/parsers/test_pw2gw/test_pw2gw_default_data.yml @@ -1,3 +1,3 @@ output_parameters: - wall_time: ' 1m26.21s ' + code_version: '6.2' wall_time_seconds: 86.21000000000001 diff --git a/tests/parsers/test_pw2wannier90/test_pw2wannier90_default.yml b/tests/parsers/test_pw2wannier90/test_pw2wannier90_default.yml index e4fbf1f8e..58e1812d0 100644 --- a/tests/parsers/test_pw2wannier90/test_pw2wannier90_default.yml +++ b/tests/parsers/test_pw2wannier90/test_pw2wannier90_default.yml @@ -1,4 +1,3 @@ parameters: - code_version: v.6.4.1 - wall_time: 2.18s + code_version: 6.4.1 wall_time_seconds: 2.18 diff --git a/tests/parsers/test_xspectra/test_xspectra_default.yml b/tests/parsers/test_xspectra/test_xspectra_default.yml index 30ab10ba1..c246fb577 100644 --- a/tests/parsers/test_xspectra/test_xspectra_default.yml +++ b/tests/parsers/test_xspectra/test_xspectra_default.yml @@ -1,5 +1,5 @@ parameters: - code_version: v.6.5 + code_version: '6.5' core_level_energy: '-1839.' core_level_energy_units: eV energy_zero: '4.1718' diff --git a/tests/parsers/test_xspectra/test_xspectra_spin.yml b/tests/parsers/test_xspectra/test_xspectra_spin.yml index daac7a106..c7ef56366 100644 --- a/tests/parsers/test_xspectra/test_xspectra_spin.yml +++ b/tests/parsers/test_xspectra/test_xspectra_spin.yml @@ -1,5 +1,5 @@ parameters: - code_version: v.6.5 + code_version: '6.5' core_level_energy: '-1839.' core_level_energy_units: eV energy_zero: '6.8316' diff --git a/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_.yml b/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_.yml index 448306ff5..d3bce70b1 100644 --- a/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_.yml +++ b/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_.yml @@ -1,3 +1,4 @@ +code_version: '7.0' dynamical_matrix_1: frequencies: - -3.205876 diff --git a/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_singleq_.yml b/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_singleq_.yml index d1f3486a1..66936758f 100644 --- a/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_singleq_.yml +++ b/tests/workflows/ph/test_base/test_merge_outputs_merge_outputs_singleq_.yml @@ -1,3 +1,4 @@ +code_version: '7.0' dynamical_matrix_1: frequencies: - -3.151844