From 08bb260ce5faf5b81a32d7b2f18e122525b6f747 Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 25 Aug 2023 18:15:30 -0400 Subject: [PATCH 1/6] black-formatted all files --- doc/sphinx/source/conf.py | 133 +-- gromacs/__init__.py | 66 +- gromacs/_version.py | 154 +-- gromacs/cbook.py | 1015 +++++++++++------- gromacs/collections.py | 38 +- gromacs/config.py | 422 ++++---- gromacs/core.py | 217 ++-- gromacs/environment.py | 105 +- gromacs/exceptions.py | 25 +- gromacs/fileformats/__init__.py | 3 +- gromacs/fileformats/blocks.py | 463 ++++---- gromacs/fileformats/convert.py | 82 +- gromacs/fileformats/mdp.py | 49 +- gromacs/fileformats/ndx.py | 45 +- gromacs/fileformats/top.py | 1160 ++++++++++++--------- gromacs/fileformats/xpm.py | 65 +- gromacs/fileformats/xvg.py | 467 ++++++--- gromacs/formats.py | 2 - gromacs/log.py | 19 +- gromacs/qsub.py | 210 ++-- gromacs/run.py | 94 +- gromacs/scaling.py | 516 +++++---- gromacs/setup.py | 784 +++++++++----- gromacs/tools.py | 290 ++++-- gromacs/utilities.py | 223 ++-- scripts/gw-forcefield.py | 205 ++-- scripts/gw-join_parts.py | 34 +- scripts/gw-merge_topologies.py | 214 ++-- scripts/gw-partial_tempering.py | 39 +- setup.py | 120 ++- tests/__init__.py | 3 +- tests/conftest.py | 59 +- tests/datafiles.py | 3 +- tests/fileformats/test_convert.py | 53 +- tests/fileformats/test_mdp.py | 97 +- tests/fileformats/test_ndx.py | 28 +- tests/fileformats/test_xpm.py | 8 +- tests/fileformats/test_xvg.py | 30 +- tests/fileformats/top/test_amber03star.py | 26 +- tests/fileformats/top/test_amber03w.py | 26 +- tests/fileformats/top/test_charmm22.py | 7 +- tests/fileformats/top/top.py | 376 ++++--- tests/test_cbook.py | 67 +- tests/test_collections.py | 39 +- tests/test_config.py | 52 +- tests/test_core.py | 45 +- tests/test_log.py | 26 +- tests/test_qsub.py | 18 +- tests/test_run.py | 35 +- tests/test_setup.py | 87 +- tests/test_tools.py | 9 +- tests/test_utilities.py | 234 +++-- tests/test_version.py | 1 + versioneer.py | 267 +++-- 54 files changed, 5385 insertions(+), 3470 deletions(-) diff --git a/doc/sphinx/source/conf.py b/doc/sphinx/source/conf.py index 2e959c2c..6c8a00f8 100644 --- a/doc/sphinx/source/conf.py +++ b/doc/sphinx/source/conf.py @@ -14,6 +14,7 @@ import sys import os import datetime + # https://sphinx-rtd-theme.readthedocs.io/en/stable/ import sphinx_rtd_theme @@ -22,73 +23,77 @@ # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # make sure sphinx always uses the current branch -sys.path.insert(0, os.path.abspath('../../..')) +sys.path.insert(0, os.path.abspath("../../..")) # General configuration # --------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', - 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', - 'sphinx_rtd_theme'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.intersphinx", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx_rtd_theme", +] -mathjax_path = 'https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML' +mathjax_path = "https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/MathJax.js?config=TeX-AMS-MML_HTMLorMML" # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.txt' +source_suffix = ".txt" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General substitutions. -project = u'GromacsWrapper' +project = "GromacsWrapper" now = datetime.datetime.now() -copyright = u'2009-{}, The Authors of GromacsWrapper (see AUTHORS)'.format(now.year) +copyright = "2009-{}, The Authors of GromacsWrapper (see AUTHORS)".format(now.year) # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. # # Dynamically calculate the version (uses versioneer) -packageversion = __import__('gromacs').__version__ +packageversion = __import__("gromacs").__version__ # The short X.Y version. -version = '.'.join(packageversion.split('.')[:2]) +version = ".".join(packageversion.split(".")[:2]) # The full version, including alpha/beta/rc tags. release = packageversion # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -today_fmt = '%B %d, %Y' +today_fmt = "%B %d, %Y" # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directories, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # Options for HTML output @@ -96,36 +101,34 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" html_theme_options = { - 'canonical_url': '', - 'logo_only': True, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, - 'style_nav_header_background': 'white', + "canonical_url": "", + "logo_only": True, + "display_version": True, + "prev_next_buttons_location": "bottom", + "style_external_links": False, + "style_nav_header_background": "white", # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 4, - 'includehidden': True, - 'titles_only': False, + "collapse_navigation": True, + "sticky_navigation": True, + "navigation_depth": 4, + "includehidden": True, + "titles_only": False, } # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [ - sphinx_rtd_theme.get_html_theme_path() -] +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -139,88 +142,94 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, the reST sources are included in the HTML build as _sources/. -#html_copy_source = True +# html_copy_source = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'GromacsWrapperdoc' +htmlhelp_basename = "GromacsWrapperdoc" # Options for LaTeX output # ------------------------ # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). latex_documents = [ - ('index', 'GromacsWrapper.tex', u'GromacsWrapper Documentation', - u'Oliver Beckstein', 'manual'), + ( + "index", + "GromacsWrapper.tex", + "GromacsWrapper Documentation", + "Oliver Beckstein", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True # Options for ext.intersphinx # --------------------------- # intersphinx: reference standard lib and RecSQL # http://sphinx.pocoo.org/latest/ext/intersphinx.html -intersphinx_mapping = {'https://docs.python.org/': None, - 'https://numpy.org/doc/stable/': None, - 'https://docs.scipy.org/doc/scipy/reference/': None, +intersphinx_mapping = { + "https://docs.python.org/": None, + "https://numpy.org/doc/stable/": None, + "https://docs.scipy.org/doc/scipy/reference/": None, } @@ -231,5 +240,3 @@ # This value selects what content will be inserted into the main body of an autoclass directive. # "class", "init", "both" autoclass_content = "both" - - diff --git a/gromacs/__init__.py b/gromacs/__init__.py index 775310de..f73e0d09 100644 --- a/gromacs/__init__.py +++ b/gromacs/__init__.py @@ -170,6 +170,7 @@ """ from __future__ import absolute_import + __docformat__ = "restructuredtext en" import os @@ -177,9 +178,10 @@ import logging # __all__ is extended with all gromacs command instances later -__all__ = ['config', 'tools', 'cbook', 'fileformats'] +__all__ = ["config", "tools", "cbook", "fileformats"] from ._version import get_versions + #: Version of the package, following `semantic versioning`_ in the form #: MAJOR.MINOR.PATCH. When PATCH increases, bugs are fixed or documentation #: or metadata are updated. Increases in MINOR can introduce new features and @@ -193,15 +195,23 @@ #: the commit ID encoded in the trailing string. #: #: .. _`semantic versioning`: https://semver.org/ -__version__ = get_versions()['version'] +__version__ = get_versions()["version"] del get_versions from . import fileformats -from .exceptions import (GromacsError, MissingDataError, ParseError, - GromacsFailureWarning, GromacsImportWarning, - GromacsValueWarning, AutoCorrectionWarning, - BadParameterWarning, MissingDataWarning, - UsageWarning, LowAccuracyWarning) +from .exceptions import ( + GromacsError, + MissingDataError, + ParseError, + GromacsFailureWarning, + GromacsImportWarning, + GromacsValueWarning, + AutoCorrectionWarning, + BadParameterWarning, + MissingDataWarning, + UsageWarning, + LowAccuracyWarning, +) # Import configuration before anything else @@ -216,6 +226,7 @@ class NullHandler(logging.Handler): def emit(self, record): pass + # default silent logger --- just here for illustration; below we # we get a proper logger from log.create() h = NullHandler() @@ -232,6 +243,7 @@ def emit(self, record): # import logging # logger = logging.getLogger('gromacs.MODULENAME') + def start_logging(logfile="gromacs.log"): """Start logging of messages to file and console. @@ -239,19 +251,24 @@ def start_logging(logfile="gromacs.log"): logged with the tag *gromacs*. """ from . import log + log.create("gromacs", logfile=logfile) - logging.getLogger("gromacs").info("GromacsWrapper %s STARTED logging to %r", - __version__, logfile) + logging.getLogger("gromacs").info( + "GromacsWrapper %s STARTED logging to %r", __version__, logfile + ) + def stop_logging(): """Stop logging to logfile and console.""" from . import log + logger = logging.getLogger("gromacs") logger.info("GromacsWrapper %s STOPPED logging", __version__) log.clear_handlers(logger) # this _should_ do the job... + # for testing (maybe enable with envar GW_START_LOGGING) -if os.environ.get('GW_START_LOGGING', False): +if os.environ.get("GW_START_LOGGING", False): start_logging() # Try to load environment variables set by GMXRC @@ -271,9 +288,9 @@ def stop_logging(): _have_g_commands = [] _missing_g_commands = [] for clsname, cls in tools.registry.items(): - name = clsname[0].lower() + clsname[1:] # instances should start with lower case + name = clsname[0].lower() + clsname[1:] # instances should start with lower case try: - globals()[name] = cls() # add instance of command for immediate use + globals()[name] = cls() # add instance of command for immediate use _have_g_commands.append(name) except: _missing_g_commands.append(name) @@ -283,9 +300,12 @@ def stop_logging(): _have_g_commands.sort() _missing_g_commands.sort() if len(_missing_g_commands) > 0: - warnings.warn("Some Gromacs commands were NOT found; " - "maybe source GMXRC first? The following are missing:\n%r\n" % _missing_g_commands, - category=GromacsImportWarning) + warnings.warn( + "Some Gromacs commands were NOT found; " + "maybe source GMXRC first? The following are missing:\n%r\n" + % _missing_g_commands, + category=GromacsImportWarning, + ) del name, cls, clsname @@ -303,7 +323,8 @@ def stop_logging(): # convenience functions for warnings -less_important_warnings = ['AutoCorrectionWarning', 'UsageWarning'] +less_important_warnings = ["AutoCorrectionWarning", "UsageWarning"] + def filter_gromacs_warnings(action, categories=None): """Set the :meth:`warnings.simplefilter` to *action*. @@ -320,9 +341,14 @@ def filter_gromacs_warnings(action, categories=None): except KeyError: w = c if not issubclass(w, Warning): - raise TypeError("{0!r} is neither a Warning nor the name of a Gromacs warning.".format(c)) + raise TypeError( + "{0!r} is neither a Warning nor the name of a Gromacs warning.".format( + c + ) + ) warnings.simplefilter(action, category=w) + def disable_gromacs_warnings(categories=None): """Disable ("ignore") specified warnings from the gromacs package. @@ -330,7 +356,8 @@ def disable_gromacs_warnings(categories=None): ``None`` selects the defaults. """ - filter_gromacs_warnings('ignore', categories=categories) + filter_gromacs_warnings("ignore", categories=categories) + def enable_gromacs_warnings(categories=None): """Enable ("always") specified warnings from the gromacs package. @@ -339,5 +366,4 @@ def enable_gromacs_warnings(categories=None): ``None`` selects the defaults, :data:`gromacs._less_important_warnings`. """ - filter_gromacs_warnings('always', categories=categories) - + filter_gromacs_warnings("always", categories=categories) diff --git a/gromacs/_version.py b/gromacs/_version.py index d4c91d73..fd021892 100644 --- a/gromacs/_version.py +++ b/gromacs/_version.py @@ -1,4 +1,3 @@ - # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -58,17 +57,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -76,10 +76,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -116,16 +119,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -181,7 +190,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -190,7 +199,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -198,19 +207,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -225,8 +241,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -234,10 +249,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s*" % tag_prefix, + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -260,17 +284,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -279,10 +302,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -293,13 +318,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -330,8 +355,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -445,11 +469,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -469,9 +495,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } def get_versions(): @@ -485,8 +515,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass @@ -495,13 +524,16 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -515,6 +547,10 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/gromacs/cbook.py b/gromacs/cbook.py index 2fd18013..9be86fa1 100644 --- a/gromacs/cbook.py +++ b/gromacs/cbook.py @@ -139,14 +139,22 @@ import six import logging -logger = logging.getLogger('gromacs.cbook') + +logger = logging.getLogger("gromacs.cbook") import gromacs -from .exceptions import GromacsError, BadParameterWarning, MissingDataWarning, GromacsValueWarning, GromacsImportWarning +from .exceptions import ( + GromacsError, + BadParameterWarning, + MissingDataWarning, + GromacsValueWarning, + GromacsImportWarning, +) from . import tools from . import utilities from .utilities import asiterable + def _define_canned_commands(): """Define functions for the top level name space. @@ -159,31 +167,41 @@ def _define_canned_commands(): """ global trj_compact, rmsd_backbone, trj_fitted, trj_xyfitted - trj_compact = tools.Trjconv(ur='compact', center=True, boxcenter='tric', pbc='mol', - input=('protein','system'), - doc=""" -Writes a compact representation of the system centered on the protein""") - - rmsd_backbone = tools.G_rms(what='rmsd', fit='rot+trans', - input=('Backbone','Backbone'), - doc=""" -Computes RMSD of backbone after fitting to the backbone.""") - - trj_fitted = tools.Trjconv(fit='rot+trans', - input=('backbone', 'system'), - doc=""" + trj_compact = tools.Trjconv( + ur="compact", + center=True, + boxcenter="tric", + pbc="mol", + input=("protein", "system"), + doc=""" +Writes a compact representation of the system centered on the protein""", + ) + + rmsd_backbone = tools.G_rms( + what="rmsd", + fit="rot+trans", + input=("Backbone", "Backbone"), + doc=""" +Computes RMSD of backbone after fitting to the backbone.""", + ) + + trj_fitted = tools.Trjconv( + fit="rot+trans", + input=("backbone", "system"), + doc=""" Writes a trajectory fitted to the protein backbone. Note that this does *not* center; if center is required, the *input* selection should have the group to be centered on in second position, e.g. ``input = ('backbone', 'Protein', System')``. -""") - +""", + ) # Gromacs 4.x - trj_xyfitted = tools.Trjconv(fit='rotxy+transxy', - input=('backbone', 'protein','system'), - doc=""" + trj_xyfitted = tools.Trjconv( + fit="rotxy+transxy", + input=("backbone", "protein", "system"), + doc=""" Writes a trajectory fitted to the protein in the XY-plane only. This is useful for membrane proteins. The system *must* be oriented so @@ -194,21 +212,26 @@ def _define_canned_commands(): and that one sometimes need two runs of trjconv: one to center and one to fit. -.. Note:: Gromacs 4.x only""") +.. Note:: Gromacs 4.x only""", + ) # end of _define_canned_commands + try: _define_canned_commands() except (OSError, ImportError, AttributeError, GromacsError): - msg = ("Failed to define a number of commands in gromacs.cbook. Most " - "likely the Gromacs installation cannot be found --- set GMXRC in " - "~/.gromacswrapper.cfg or source GMXRC directly") + msg = ( + "Failed to define a number of commands in gromacs.cbook. Most " + "likely the Gromacs installation cannot be found --- set GMXRC in " + "~/.gromacswrapper.cfg or source GMXRC directly" + ) warnings.warn(msg, category=GromacsImportWarning) logger.error(msg) finally: del _define_canned_commands + def trj_fitandcenter(xy=False, **kwargs): """Center everything and make a compact representation (pass 1) and fit the system to a reference (pass 2). @@ -281,68 +304,105 @@ def trj_fitandcenter(xy=False, **kwargs): .. _`g_spatial documentation`: http://www.gromacs.org/Documentation/Gromacs_Utilities/g_spatial """ if xy: - fitmode = 'rotxy+transxy' - kwargs.pop('fit', None) + fitmode = "rotxy+transxy" + kwargs.pop("fit", None) else: - fitmode = kwargs.pop('fit', 'rot+trans') # user can use progressive, too + fitmode = kwargs.pop("fit", "rot+trans") # user can use progressive, too - intrj = kwargs.pop('f', None) + intrj = kwargs.pop("f", None) # get the correct suffix for the intermediate step: only trr will # keep velocities/forces! suffix = os.path.splitext(intrj)[1] - if not suffix in ('xtc', 'trr'): - suffix = '.xtc' - outtrj = kwargs.pop('o', None) + if not suffix in ("xtc", "trr"): + suffix = ".xtc" + outtrj = kwargs.pop("o", None) - ndx = kwargs.pop('n', None) - ndxcompact = kwargs.pop('n1', ndx) + ndx = kwargs.pop("n", None) + ndxcompact = kwargs.pop("n1", ndx) - structures = kwargs.pop('s', None) + structures = kwargs.pop("s", None) if type(structures) in (tuple, list): try: compact_structure, fit_structure = structures except: - raise ValueError("argument s must be a pair of tpr/pdb files or a single structure file") + raise ValueError( + "argument s must be a pair of tpr/pdb files or a single structure file" + ) else: compact_structure = fit_structure = structures - - inpfit = kwargs.pop('input', ('backbone', 'protein','system')) + inpfit = kwargs.pop("input", ("backbone", "protein", "system")) try: - _inpcompact = inpfit[1:] # use 2nd and 3rd group for compact + _inpcompact = inpfit[1:] # use 2nd and 3rd group for compact except TypeError: _inpcompact = None - inpcompact = kwargs.pop('input1', _inpcompact) # ... or the user supplied ones + inpcompact = kwargs.pop("input1", _inpcompact) # ... or the user supplied ones - fd, tmptrj = tempfile.mkstemp(suffix=suffix, prefix='pbc_compact_') + fd, tmptrj = tempfile.mkstemp(suffix=suffix, prefix="pbc_compact_") logger.info("Input structure for PBC: {compact_structure!r}".format(**vars())) logger.info("Input structure for fit: {fit_structure!r}".format(**vars())) logger.info("Input trajectory: {intrj!r}".format(**vars())) logger.info("Output trajectory: {outtrj!r}".format(**vars())) - logger.debug("Writing temporary trajectory {tmptrj!r} (will be auto-cleaned).".format(**vars())) + logger.debug( + "Writing temporary trajectory {tmptrj!r} (will be auto-cleaned).".format( + **vars() + ) + ) sys.stdout.flush() try: - gromacs.trjconv(s=compact_structure, f=intrj, o=tmptrj, n=ndxcompact, - ur='compact', center=True, boxcenter='tric', pbc='mol', - input=inpcompact, **kwargs) + gromacs.trjconv( + s=compact_structure, + f=intrj, + o=tmptrj, + n=ndxcompact, + ur="compact", + center=True, + boxcenter="tric", + pbc="mol", + input=inpcompact, + **kwargs + ) # explicitly set pbc="none" for the fitting stage (anything else will produce rubbish and/or # complaints from Gromacs) - kwargs['pbc'] = "none" + kwargs["pbc"] = "none" if compact_structure == fit_structure: # fit as ususal, including centering # (Is center=True really necessary? -- note, if I remove center=True then # I MUST fiddle inpfit as below!!) - gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, center=True, input=inpfit, **kwargs) + gromacs.trjconv( + s=fit_structure, + f=tmptrj, + o=outtrj, + n=ndx, + fit=fitmode, + center=True, + input=inpfit, + **kwargs + ) else: # make sure that we fit EXACTLY as the user wants inpfit = [inpfit[0], inpfit[-1]] - gromacs.trjconv(s=fit_structure, f=tmptrj, o=outtrj, n=ndx, fit=fitmode, input=inpfit, **kwargs) + gromacs.trjconv( + s=fit_structure, + f=tmptrj, + o=outtrj, + n=ndx, + fit=fitmode, + input=inpfit, + **kwargs + ) finally: utilities.unlink_gmx(tmptrj) -def cat(prefix="md", dirname=os.path.curdir, partsdir="parts", fulldir="full", - resolve_multi="pass"): + +def cat( + prefix="md", + dirname=os.path.curdir, + partsdir="parts", + fulldir="full", + resolve_multi="pass", +): """Concatenate all parts of a simulation. The xtc, trr, and edr files in *dirname* such as prefix.xtc, @@ -388,17 +448,18 @@ def cat(prefix="md", dirname=os.path.curdir, partsdir="parts", fulldir="full", directory where to store the final results [full] """ - gmxcat = {'xtc': gromacs.trjcat, - 'trr': gromacs.trjcat, - 'edr': gromacs.eneconv, - 'log': utilities.cat, - } + gmxcat = { + "xtc": gromacs.trjcat, + "trr": gromacs.trjcat, + "edr": gromacs.eneconv, + "log": utilities.cat, + } def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): filenames = glob_parts(prefix, ext) - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] - outfile = os.path.join(fulldir, prefix + '.' + ext) + outfile = os.path.join(fulldir, prefix + "." + ext) if not filenames: return None nonempty_files = [] @@ -408,8 +469,11 @@ def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): continue if os.path.islink(f): # TODO: re-write the symlink to point to the original file - errmsg = "Symbolic links do not work (file %(f)r), sorry. " \ - "CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!" % vars() + errmsg = ( + "Symbolic links do not work (file %(f)r), sorry. " + "CHECK LOCATION OF FILES MANUALLY BEFORE RUNNING gromacs.cbook.cat() AGAIN!" + % vars() + ) logger.exception(errmsg) raise NotImplementedError(errmsg) shutil.move(f, partsdir) @@ -420,8 +484,10 @@ def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): _resolve_options = ("pass", "guess") if not resolve_multi in _resolve_options: - raise ValueError("resolve_multi must be one of %(_resolve_options)r, " - "not %(resolve_multi)r" % vars()) + raise ValueError( + "resolve_multi must be one of %(_resolve_options)r, " + "not %(resolve_multi)r" % vars() + ) if fulldir == os.path.curdir: wmsg = "Using the current directory as fulldir can potentially lead to data loss if you run this function multiple times." @@ -431,45 +497,57 @@ def _cat(prefix, ext, partsdir=partsdir, fulldir=fulldir): with utilities.in_dir(dirname, create=False): utilities.mkdir_p(partsdir) utilities.mkdir_p(fulldir) - for ext in ('log', 'edr', 'trr', 'xtc'): + for ext in ("log", "edr", "trr", "xtc"): logger.info("[%(dirname)s] concatenating %(ext)s files...", vars()) outfile = _cat(prefix, ext, partsdir) logger.info("[%(dirname)s] created %(outfile)r", vars()) - for ext in ('gro', 'pdb'): # XXX: ugly, make method out of parts? + for ext in ("gro", "pdb"): # XXX: ugly, make method out of parts? filenames = glob_parts(prefix, ext) if len(filenames) == 0: - continue # goto next ext + continue # goto next ext elif len(filenames) == 1: pick = filenames[0] else: if resolve_multi == "pass": - logger.warning("[%(dirname)s] too many output structures %(filenames)r, " - "cannot decide which one --- resolve manually!", vars()) + logger.warning( + "[%(dirname)s] too many output structures %(filenames)r, " + "cannot decide which one --- resolve manually!", + vars(), + ) for f in filenames: shutil.move(f, partsdir) - continue # goto next ext + continue # goto next ext elif resolve_multi == "guess": - pick = prefix + '.' + ext + pick = prefix + "." + ext if not pick in filenames: - pick = filenames[-1] # filenames are ordered with highest parts at end - final = os.path.join(fulldir, prefix + '.' + ext) + pick = filenames[ + -1 + ] # filenames are ordered with highest parts at end + final = os.path.join(fulldir, prefix + "." + ext) shutil.copy(pick, final) # copy2 fails on nfs with Darwin at least for f in filenames: shutil.move(f, partsdir) - logger.info("[%(dirname)s] collected final structure %(final)r " - "(from %(pick)r)", vars()) - + logger.info( + "[%(dirname)s] collected final structure %(final)r " "(from %(pick)r)", + vars(), + ) partsdirpath = utilities.realpath(dirname, partsdir) - logger.warning("[%(dirname)s] cat() complete in %(fulldir)r but original files " - "in %(partsdirpath)r must be manually removed", vars()) + logger.warning( + "[%(dirname)s] cat() complete in %(fulldir)r but original files " + "in %(partsdirpath)r must be manually removed", + vars(), + ) + def glob_parts(prefix, ext): """Find files from a continuation run""" - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] - files = glob.glob(prefix+'.'+ext) + glob.glob(prefix+'.part[0-9][0-9][0-9][0-9].'+ext) - files.sort() # at least some rough sorting... + files = glob.glob(prefix + "." + ext) + glob.glob( + prefix + ".part[0-9][0-9][0-9][0-9]." + ext + ) + files.sort() # at least some rough sorting... return files @@ -495,7 +573,7 @@ class Frames(object): manageable. """ - def __init__(self, structure, trj, maxframes=None, format='pdb', **kwargs): + def __init__(self, structure, trj, maxframes=None, format="pdb", **kwargs): """Set up the Frames iterator. :Arguments: @@ -516,18 +594,20 @@ def __init__(self, structure, trj, maxframes=None, format='pdb', **kwargs): """ self.structure = structure # tpr or equivalent - self.trj = trj # xtc, trr, ... + self.trj = trj # xtc, trr, ... self.maxframes = maxframes if self.maxframes is not None: - raise NotImplementedError('sorry, maxframes feature not implemented yet') - - self.framedir = tempfile.mkdtemp(prefix="Frames_", suffix='_'+format) - self.frameprefix = os.path.join(self.framedir, 'frame') - self.frametemplate = self.frameprefix + '%d' + '.' + format # depends on trjconv - self.frameglob = self.frameprefix + '*' + '.' + format - kwargs['sep'] = True - kwargs['o'] = self.frameprefix + '.' + format - kwargs.setdefault('input', ('System',)) + raise NotImplementedError("sorry, maxframes feature not implemented yet") + + self.framedir = tempfile.mkdtemp(prefix="Frames_", suffix="_" + format) + self.frameprefix = os.path.join(self.framedir, "frame") + self.frametemplate = ( + self.frameprefix + "%d" + "." + format + ) # depends on trjconv + self.frameglob = self.frameprefix + "*" + "." + format + kwargs["sep"] = True + kwargs["o"] = self.frameprefix + "." + format + kwargs.setdefault("input", ("System",)) self.extractor = tools.Trjconv(s=self.structure, f=self.trj, **kwargs) #: Holds the current frame number of the currently extracted @@ -578,6 +658,7 @@ def __del__(self): if self.framedir is not None: self.cleanup() + # Working with topologies # ----------------------- @@ -587,6 +668,7 @@ def __del__(self): grompp_warnonly = tools.Grompp(failure="warn") # grompp_warnonly.__doc__ += "\n\ngrompp wrapper that only warns on failure but does not raise :exc:`GromacsError`" + def grompp_qtot(*args, **kwargs): r"""Run ``gromacs.grompp`` and return the total charge of the system. @@ -612,10 +694,12 @@ def grompp_qtot(*args, **kwargs): :regexp:`System has non-zero total charge: *(?P[-+]?\d*\.\d+([eE][-+]\d+)?)`. """ - qtot_pattern = re.compile(r"System has non-zero total charge: *(?P[-+]?\d*\.\d+([eE][-+]\d+)?)") + qtot_pattern = re.compile( + r"System has non-zero total charge: *(?P[-+]?\d*\.\d+([eE][-+]\d+)?)" + ) # make sure to capture ALL output - kwargs['stdout'] = False - kwargs['stderr'] = False + kwargs["stdout"] = False + kwargs["stderr"] = False rc, output, error = grompp_warnonly(*args, **kwargs) gmxoutput = "\n".join([x for x in [output, error] if x is not None]) if rc != 0: @@ -623,24 +707,27 @@ def grompp_qtot(*args, **kwargs): msg = "grompp_qtot() failed. See warning and screen output for clues." logger.error(msg) import sys + sys.stderr.write("=========== grompp (stdout/stderr) ============\n") sys.stderr.write(gmxoutput) sys.stderr.write("===============================================\n") sys.stderr.flush() raise GromacsError(rc, msg) qtot = 0 - for line in gmxoutput.split('\n'): + for line in gmxoutput.split("\n"): m = qtot_pattern.search(line) if m: - qtot = float(m.group('qtot')) + qtot = float(m.group("qtot")) break logger.info("system total charge qtot = {qtot!r}".format(**vars())) return qtot + def _mdp_include_string(dirs): """Generate a string that can be added to a mdp 'include = ' line.""" include_paths = [os.path.expanduser(p) for p in dirs] - return ' -I'.join([''] + include_paths) + return " -I".join([""] + include_paths) + def add_mdp_includes(topology=None, kwargs=None): """Set the mdp *include* key in the *kwargs* dict. @@ -684,19 +771,22 @@ def add_mdp_includes(topology=None, kwargs=None): if kwargs is None: kwargs = {} - include_dirs = ['.', '..'] # should . & .. always be added? + include_dirs = [".", ".."] # should . & .. always be added? if topology is not None: # half-hack: find additional itps in the same directory as the topology topology_dir = os.path.dirname(topology) include_dirs.append(topology_dir) - include_dirs.extend(asiterable(kwargs.pop('includes', []))) # includes can be a list or a string + include_dirs.extend( + asiterable(kwargs.pop("includes", [])) + ) # includes can be a list or a string # 1. setdefault: we do nothing if user defined include # 2. modify input in place! - kwargs.setdefault('include', _mdp_include_string(include_dirs)) + kwargs.setdefault("include", _mdp_include_string(include_dirs)) return kwargs + def filter_grompp_options(**kwargs): """Returns one dictionary only containing valid :program:`grompp` options and everything else. @@ -706,14 +796,39 @@ def filter_grompp_options(**kwargs): .. versionadded:: 0.2.4 """ - grompp_options = ('f','po','c','r','rb','n','p','pp','o','t','e', # files - 'h', 'noh', 'version', 'noversion', 'nice', 'v', 'nov', - 'time', 'rmvsbds', 'normvsbds', 'maxwarn', 'zero', 'nozero', - 'renum', 'norenum') - grompp = dict((k,v) for k,v in kwargs.items() if k in grompp_options) - other = dict((k,v) for k,v in kwargs.items() if k not in grompp_options) + grompp_options = ( + "f", + "po", + "c", + "r", + "rb", + "n", + "p", + "pp", + "o", + "t", + "e", # files + "h", + "noh", + "version", + "noversion", + "nice", + "v", + "nov", + "time", + "rmvsbds", + "normvsbds", + "maxwarn", + "zero", + "nozero", + "renum", + "norenum", + ) + grompp = dict((k, v) for k, v in kwargs.items() if k in grompp_options) + other = dict((k, v) for k, v in kwargs.items() if k not in grompp_options) return grompp, other + def create_portable_topology(topol, struct, **kwargs): """Create a processed topology. @@ -742,40 +857,45 @@ def create_portable_topology(topol, struct, **kwargs): :Returns: full path to the processed topology """ _topoldir, _topol = os.path.split(topol) - processed = kwargs.pop('processed', os.path.join(_topoldir, 'pp_'+_topol)) + processed = kwargs.pop("processed", os.path.join(_topoldir, "pp_" + _topol)) grompp_kwargs, mdp_kwargs = filter_grompp_options(**kwargs) mdp_kwargs = add_mdp_includes(topol, mdp_kwargs) - with tempfile.NamedTemporaryFile(suffix='.mdp', mode='wb') as mdp: - mdp.write('; empty mdp file\ninclude = {include!s}\n'.format(**mdp_kwargs).encode('utf-8')) + with tempfile.NamedTemporaryFile(suffix=".mdp", mode="wb") as mdp: + mdp.write( + "; empty mdp file\ninclude = {include!s}\n".format(**mdp_kwargs).encode( + "utf-8" + ) + ) mdp.flush() - grompp_kwargs['p'] = topol - grompp_kwargs['pp'] = processed - grompp_kwargs['f'] = mdp.name - grompp_kwargs['c'] = struct - grompp_kwargs['v'] = False + grompp_kwargs["p"] = topol + grompp_kwargs["pp"] = processed + grompp_kwargs["f"] = mdp.name + grompp_kwargs["c"] = struct + grompp_kwargs["v"] = False try: gromacs.grompp(**grompp_kwargs) finally: - utilities.unlink_gmx('topol.tpr', 'mdout.mdp') + utilities.unlink_gmx("topol.tpr", "mdout.mdp") return utilities.realpath(processed) + def get_volume(f): """Return the volume in nm^3 of structure file *f*. (Uses :func:`gromacs.editconf`; error handling is not good) """ - fd, temp = tempfile.mkstemp('.gro') + fd, temp = tempfile.mkstemp(".gro") try: - rc,out,err = gromacs.editconf(f=f, o=temp, stdout=False) + rc, out, err = gromacs.editconf(f=f, o=temp, stdout=False) finally: os.unlink(temp) - return [float(x.split()[1]) for x in out.splitlines() - if x.startswith('Volume:')][0] + return [float(x.split()[1]) for x in out.splitlines() if x.startswith("Volume:")][0] # Editing textual input files # --------------------------- + def edit_mdp(mdp, new_mdp=None, extend_parameters=None, **substitutions): r"""Change values in a Gromacs mdp file. @@ -837,68 +957,77 @@ def edit_mdp(mdp, new_mdp=None, extend_parameters=None, **substitutions): if new_mdp is None: new_mdp = mdp if extend_parameters is None: - extend_parameters = ['include'] + extend_parameters = ["include"] else: extend_parameters = list(asiterable(extend_parameters)) # None parameters should be ignored (simple way to keep the template defaults) - substitutions = {k: v for k,v in substitutions.items() if v is not None} + substitutions = {k: v for k, v in substitutions.items() if v is not None} - params = list(substitutions.keys()) # list will be reduced for each match + params = list(substitutions.keys()) # list will be reduced for each match def demangled(p): """Return a RE string that matches the parameter.""" - return p.replace('_', '[-_]') # must catch either - or _ + return p.replace("_", "[-_]") # must catch either - or _ - patterns = {parameter: - re.compile(r""" + patterns = { + parameter: re.compile( + r""" (?P\s*{0!s}\s*=\s*) # parameter == everything before the value (?P[^;]*) # value (stop before comment=;) (?P\s*;.*)? # optional comment - """.format(demangled(parameter)), re.VERBOSE) - for parameter in substitutions} + """.format( + demangled(parameter) + ), + re.VERBOSE, + ) + for parameter in substitutions + } with tempfile.TemporaryFile() as target: - with open(mdp, 'rb') as src: + with open(mdp, "rb") as src: logger.info("editing mdp = {0!r}: {1!r}".format(mdp, substitutions.keys())) for line in src: - line = line.decode('utf-8') - new_line = line.strip() # \n must be stripped to ensure that new line is built without break + line = line.decode("utf-8") + new_line = ( + line.strip() + ) # \n must be stripped to ensure that new line is built without break for p in params[:]: m = patterns[p].match(new_line) if m: # I am too stupid to replace a specific region in the string so I rebuild it # (matching a line and then replacing value requires TWO re calls) - #print 'line:' + new_line - #print m.groupdict() - if m.group('comment') is None: - comment = '' + # print 'line:' + new_line + # print m.groupdict() + if m.group("comment") is None: + comment = "" else: - comment = " "+m.group('comment') - assignment = m.group('assignment') - if not assignment.endswith(' '): - assignment += ' ' + comment = " " + m.group("comment") + assignment = m.group("assignment") + if not assignment.endswith(" "): + assignment += " " # build new line piece-wise: new_line = assignment if p in extend_parameters: # keep original value and add new stuff at end - new_line += str(m.group('value')) + ' ' + new_line += str(m.group("value")) + " " # automatically transform lists into space-separated string values value = " ".join(map(str, asiterable(substitutions[p]))) new_line += value + comment params.remove(p) break - target.write((new_line+'\n').encode('utf-8')) + target.write((new_line + "\n").encode("utf-8")) target.seek(0) # XXX: Is there a danger of corrupting the original mdp if something went wrong? - with open(new_mdp, 'wb') as final: + with open(new_mdp, "wb") as final: shutil.copyfileobj(target, final) - # return all parameters that have NOT been substituted + # return all parameters that have NOT been substituted if len(params) > 0: logger.warning("Not substituted in {new_mdp!r}: {params!r}".format(**vars())) return {p: substitutions[p] for p in params} + def edit_txt(filename, substitutions, newname=None): """Primitive text file stream editor. @@ -953,36 +1082,42 @@ def edit_txt(filename, substitutions, newname=None): # No sanity checks (figure out later how to give decent diagnostics). # Filter out any rules that have None in replacement. - _substitutions = [{'lRE': re.compile(str(lRE)), - 'sRE': re.compile(str(sRE)), - 'repl': repl} - for lRE,sRE,repl in substitutions if repl is not None] + _substitutions = [ + {"lRE": re.compile(str(lRE)), "sRE": re.compile(str(sRE)), "repl": repl} + for lRE, sRE, repl in substitutions + if repl is not None + ] with tempfile.TemporaryFile() as target: - with open(filename, 'rb') as src: - logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions))) + with open(filename, "rb") as src: + logger.info( + "editing txt = {0!r} ({1:d} substitutions)".format( + filename, len(substitutions) + ) + ) for line in src: line = line.decode("utf-8") keep_line = True for subst in _substitutions: - m = subst['lRE'].match(line) - if m: # apply substition to this line? - logger.debug('match: '+line.rstrip()) - if subst['repl'] is False: # special rule: delete line + m = subst["lRE"].match(line) + if m: # apply substition to this line? + logger.debug("match: " + line.rstrip()) + if subst["repl"] is False: # special rule: delete line keep_line = False - else: # standard replacement - line = subst['sRE'].sub(str(subst['repl']), line) - logger.debug('replaced: '+line.rstrip()) + else: # standard replacement + line = subst["sRE"].sub(str(subst["repl"]), line) + logger.debug("replaced: " + line.rstrip()) if keep_line: - target.write(line.encode('utf-8')) + target.write(line.encode("utf-8")) else: logger.debug("Deleting line %r", line) target.seek(0) - with open(newname, 'wb') as final: + with open(newname, "wb") as final: shutil.copyfileobj(target, final) logger.info("edited txt = {newname!r}".format(**vars())) + def remove_molecules_from_topology(filename, **kwargs): r"""Remove autogenerated [ molecules ] entries from *filename*. @@ -1024,7 +1159,7 @@ def remove_molecules_from_topology(filename, **kwargs): expression) are removed. Leading white space is ignored. ``None`` uses the default as described above. """ - marker = kwargs.pop('marker', None) + marker = kwargs.pop("marker", None) if marker is None: marker = "; Gromacs auto-generated entries follow:" logger.debug("Scrubbed [ molecules ]: marker = %(marker)r", vars()) @@ -1032,23 +1167,25 @@ def remove_molecules_from_topology(filename, **kwargs): p_marker = re.compile(r"\s*{0!s}".format(marker)) p_molecule = re.compile(r"\s*[\w+_-]+\s+\d+\s*(;.*)?$") with tempfile.TemporaryFile() as target: - with open(filename, 'rb') as src: + with open(filename, "rb") as src: autogenerated = False n_removed = 0 for line in src: - line = line.decode('utf-8') + line = line.decode("utf-8") if p_marker.match(line): autogenerated = True if autogenerated and p_molecule.match(line): n_removed += 1 continue # remove by skipping - target.write(line.encode('utf-8')) + target.write(line.encode("utf-8")) if autogenerated and n_removed > 0: target.seek(0) - with open(filename, 'wb') as final: # overwrite original! + with open(filename, "wb") as final: # overwrite original! shutil.copyfileobj(target, final) - logger.info("Removed %(n_removed)d autogenerated [ molecules ] from " - "topol = %(filename)r" % vars()) + logger.info( + "Removed %(n_removed)d autogenerated [ molecules ] from " + "topol = %(filename)r" % vars() + ) return n_removed @@ -1058,7 +1195,8 @@ def remove_molecules_from_topology(filename, **kwargs): #: compiled regular expression to match a list of index groups #: in the output of ``make_ndx``s (empty) command. -NDXLIST = re.compile(r""">\s+\n # '> ' marker line from '' input (input not echoed) +NDXLIST = re.compile( + r""">\s+\n # '> ' marker line from '' input (input not echoed) \n # empty line (?P # list of groups ( # consists of repeats of the same pattern: @@ -1067,14 +1205,20 @@ def remove_molecules_from_topology(filename, **kwargs): \s*\d+\satoms # number of atoms in group \n )+ # multiple repeats - )""", re.VERBOSE) + )""", + re.VERBOSE, +) #: compiled regular expression to match a single line of #: ``make_ndx`` output (e.g. after a successful group creation) -NDXGROUP = re.compile(r""" +NDXGROUP = re.compile( + r""" \s*(?P\d+) # group number \s+(?P[^\s]+)\s*: # group name, separator ':' \s*(?P\d+)\satoms # number of atoms in group - """, re.VERBOSE) + """, + re.VERBOSE, +) + def make_ndx_captured(**kwargs): """make_ndx that captures all output @@ -1094,12 +1238,13 @@ def make_ndx_captured(**kwargs): :Returns: (*returncode*, *output*, ``None``) """ - kwargs['stdout']=False # required for proper output as described in doc - user_input = kwargs.pop('input',[]) - user_input = [cmd for cmd in user_input if cmd != 'q'] # filter any quit - kwargs['input'] = user_input + ['', 'q'] # necessary commands + kwargs["stdout"] = False # required for proper output as described in doc + user_input = kwargs.pop("input", []) + user_input = [cmd for cmd in user_input if cmd != "q"] # filter any quit + kwargs["input"] = user_input + ["", "q"] # necessary commands return gromacs.make_ndx(**kwargs) + def get_ndx_groups(ndx, **kwargs): """Return a list of index groups in the index file *ndx*. @@ -1113,14 +1258,15 @@ def get_ndx_groups(ndx, **kwargs): Alternatively, load the index file with :class:`gromacs.formats.NDX` for full control. """ - fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx') - kwargs['o'] = tmp_ndx + fd, tmp_ndx = tempfile.mkstemp(suffix=".ndx") + kwargs["o"] = tmp_ndx try: g = parse_ndxlist(make_ndx_captured(n=ndx, **kwargs)[1]) finally: utilities.unlink_gmx(tmp_ndx) return g + def parse_ndxlist(output): """Parse output from make_ndx to build list of index groups:: @@ -1148,22 +1294,28 @@ def parse_ndxlist(output): """ - m = NDXLIST.search(output) # make sure we pick up a proper full list - grouplist = m.group('LIST') + m = NDXLIST.search(output) # make sure we pick up a proper full list + grouplist = m.group("LIST") return parse_groups(grouplist) + def parse_groups(output): """Parse ``make_ndx`` output and return groups as a list of dicts.""" groups = [] - for line in output.split('\n'): + for line in output.split("\n"): m = NDXGROUP.match(line) if m: d = m.groupdict() - groups.append({'name': d['GROUPNAME'], - 'nr': int(d['GROUPNUMBER']), - 'natoms': int(d['NATOMS'])}) + groups.append( + { + "name": d["GROUPNAME"], + "nr": int(d["GROUPNUMBER"]), + "natoms": int(d["NATOMS"]), + } + ) return groups + class IndexBuilder(object): """Build an index file with specified groups and the combined group. @@ -1216,8 +1368,16 @@ class IndexBuilder(object): """ - def __init__(self, struct=None, selections=None, names=None, name_all=None, - ndx=None, out_ndx="selection.ndx", offset=0): + def __init__( + self, + struct=None, + selections=None, + names=None, + name_all=None, + ndx=None, + out_ndx="selection.ndx", + offset=0, + ): """Build a index group from the selection arguments. If selections and a structure file are supplied then the individual @@ -1305,14 +1465,19 @@ def __init__(self, struct=None, selections=None, names=None, name_all=None, #: Specialized ``make_ndx`` that always uses same structure #: and redirection (can be overridden) - self.make_ndx = tools.Make_ndx(f=self.structure, n=self.ndx, - stdout=False, stderr=False) + self.make_ndx = tools.Make_ndx( + f=self.structure, n=self.ndx, stdout=False, stderr=False + ) #: dict, keyed by group name and pointing to index file for group #: (Groups are built in separate files because that is more robust #: as I can clear groups easily.) - self.indexfiles = dict([self.parse_selection(selection, name) - for selection, name in zip(selections, names)]) + self.indexfiles = dict( + [ + self.parse_selection(selection, name) + for selection, name in zip(selections, names) + ] + ) @property def names(self): @@ -1326,10 +1491,14 @@ def gmx_resid(self, resid): except (TypeError, IndexError): gmx_resid = resid + self.offset except KeyError: - raise KeyError("offset must be a dict that contains the gmx resid for {0:d}".format(resid)) + raise KeyError( + "offset must be a dict that contains the gmx resid for {0:d}".format( + resid + ) + ) return gmx_resid - def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=False): + def combine(self, name_all=None, out_ndx=None, operation="|", defaultgroups=False): """Combine individual groups into a single one and write output. :Keywords: @@ -1362,9 +1531,12 @@ def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=Fals .. SeeAlso:: :meth:`IndexBuilder.write`. """ - if not operation in ('|', '&', False): - raise ValueError("Illegal operation {0!r}, only '|' (OR) and '&' (AND) or False allowed.".format( - operation)) + if not operation in ("|", "&", False): + raise ValueError( + "Illegal operation {0!r}, only '|' (OR) and '&' (AND) or False allowed.".format( + operation + ) + ) if name_all is None and operation: name_all = self.name_all or operation.join(self.indexfiles) if out_ndx is None: @@ -1372,9 +1544,9 @@ def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=Fals if defaultgroups: # make a default file (using the original ndx where provided!!) - fd, default_ndx = tempfile.mkstemp(suffix='.ndx', prefix='default__') + fd, default_ndx = tempfile.mkstemp(suffix=".ndx", prefix="default__") try: - self.make_ndx(o=default_ndx, input=['q']) + self.make_ndx(o=default_ndx, input=["q"]) except: utilities.unlink_gmx(default_ndx) raise @@ -1387,22 +1559,29 @@ def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=Fals if operation: # combine multiple selections and name them try: - fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='combined__') + fd, tmp_ndx = tempfile.mkstemp(suffix=".ndx", prefix="combined__") # combine all selections by loading ALL temporary index files - operation = ' '+operation.strip()+' ' - cmd = [operation.join(['"{0!s}"'.format(gname) for gname in self.indexfiles]), - '', 'q'] - rc,out,err = self.make_ndx(n=ndxfiles, o=tmp_ndx, input=cmd) + operation = " " + operation.strip() + " " + cmd = [ + operation.join( + ['"{0!s}"'.format(gname) for gname in self.indexfiles] + ), + "", + "q", + ] + rc, out, err = self.make_ndx(n=ndxfiles, o=tmp_ndx, input=cmd) if self._is_empty_group(out): - warnings.warn("No atoms found for {cmd!r}".format(**vars()), - category=BadParameterWarning) + warnings.warn( + "No atoms found for {cmd!r}".format(**vars()), + category=BadParameterWarning, + ) # second pass for naming, sigh (or: use NDX ?) groups = parse_ndxlist(out) last = groups[-1] # name this group - name_cmd = ["name {0:d} {1!s}".format(last['nr'], name_all), 'q'] - rc,out,err = self.make_ndx(n=tmp_ndx, o=out_ndx, input=name_cmd) + name_cmd = ["name {0:d} {1!s}".format(last["nr"], name_all), "q"] + rc, out, err = self.make_ndx(n=tmp_ndx, o=out_ndx, input=name_cmd) # For debugging, look at out and err or set stdout=True, stderr=True # TODO: check out if at least 1 atom selected ##print "DEBUG: combine()" @@ -1413,13 +1592,15 @@ def combine(self, name_all=None, out_ndx=None, operation='|', defaultgroups=Fals utilities.unlink_gmx(default_ndx) else: # just write individual groups in one file (name_all --> None) - rc,out,err = self.make_ndx(n=ndxfiles, o=out_ndx, input=['','q']) + rc, out, err = self.make_ndx(n=ndxfiles, o=out_ndx, input=["", "q"]) return name_all, out_ndx def write(self, out_ndx=None, defaultgroups=False): """Write individual (named) groups to *out_ndx*.""" - name_all, out_ndx = self.combine(operation=False, out_ndx=out_ndx, defaultgroups=defaultgroups) + name_all, out_ndx = self.combine( + operation=False, out_ndx=out_ndx, defaultgroups=defaultgroups + ) return out_ndx def cat(self, out_ndx=None): @@ -1436,7 +1617,7 @@ def cat(self, out_ndx=None): """ if out_ndx is None: out_ndx = self.output - self.make_ndx(o=out_ndx, input=['q']) + self.make_ndx(o=out_ndx, input=["q"]) return out_ndx def parse_selection(self, selection, name=None): @@ -1445,7 +1626,7 @@ def parse_selection(self, selection, name=None): if type(selection) is tuple: # range process = self._process_range - elif selection.startswith('@'): + elif selection.startswith("@"): # verbatim make_ndx command process = self._process_command selection = selection[1:] @@ -1463,11 +1644,15 @@ def _process_command(self, command, name=None): # Need to build it with two make_ndx calls because I cannot reliably # name the new group without knowing its number. try: - fd, tmp_ndx = tempfile.mkstemp(suffix='.ndx', prefix='tmp_'+name+'__') - cmd = [command, '', 'q'] # empty command '' necessary to get list + fd, tmp_ndx = tempfile.mkstemp(suffix=".ndx", prefix="tmp_" + name + "__") + cmd = [command, "", "q"] # empty command '' necessary to get list # This sometimes fails with 'OSError: Broken Pipe' --- hard to debug - rc,out,err = self.make_ndx(o=tmp_ndx, input=cmd) - self.check_output(out, "No atoms found for selection {command!r}.".format(**vars()), err=err) + rc, out, err = self.make_ndx(o=tmp_ndx, input=cmd) + self.check_output( + out, + "No atoms found for selection {command!r}.".format(**vars()), + err=err, + ) # For debugging, look at out and err or set stdout=True, stderr=True # TODO: check ' 0 r_300_&_ALA_&_O : 1 atoms' has at least 1 atom ##print "DEBUG: _process_command()" @@ -1475,17 +1660,21 @@ def _process_command(self, command, name=None): groups = parse_ndxlist(out) last = groups[-1] # reduce and name this group - fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__') - name_cmd = ["keep {0:d}".format(last['nr']), - "name 0 {0!s}".format(name), 'q'] - rc,out,err = self.make_ndx(n=tmp_ndx, o=ndx, input=name_cmd) + fd, ndx = tempfile.mkstemp(suffix=".ndx", prefix=name + "__") + name_cmd = [ + "keep {0:d}".format(last["nr"]), + "name 0 {0!s}".format(name), + "q", + ] + rc, out, err = self.make_ndx(n=tmp_ndx, o=ndx, input=name_cmd) finally: utilities.unlink_gmx(tmp_ndx) return name, ndx #: regular expression to match and parse a residue-atom selection - RESIDUE = re.compile(r""" + RESIDUE = re.compile( + r""" (?P([ACDEFGHIKLMNPQRSTVWY]) # 1-letter amino acid | # or ([A-Z][A-Z][A-Z][A-Z]?) # 3-letter or 4-letter residue name @@ -1494,39 +1683,41 @@ def _process_command(self, command, name=None): (: # separator ':' (?P\w+) # atom name )? # possibly one - """, re.VERBOSE | re.IGNORECASE) + """, + re.VERBOSE | re.IGNORECASE, + ) def _process_residue(self, selection, name=None): """Process residue/atom selection and return name and temp index file.""" if name is None: - name = selection.replace(':', '_') + name = selection.replace(":", "_") # XXX: use _translate_residue() .... m = self.RESIDUE.match(selection) if not m: raise ValueError("Selection {selection!r} is not valid.".format(**vars())) - gmx_resid = self.gmx_resid(int(m.group('resid'))) - residue = m.group('aa') + gmx_resid = self.gmx_resid(int(m.group("resid"))) + residue = m.group("aa") if len(residue) == 1: - gmx_resname = utilities.convert_aa_code(residue) # only works for AA + gmx_resname = utilities.convert_aa_code(residue) # only works for AA else: - gmx_resname = residue # use 3-letter for any resname - gmx_atomname = m.group('atom') + gmx_resname = residue # use 3-letter for any resname + gmx_atomname = m.group("atom") if gmx_atomname is None: - gmx_atomname = 'CA' + gmx_atomname = "CA" #: select residue atom - _selection = 'r {gmx_resid:d} & r {gmx_resname!s} & a {gmx_atomname!s}'.format(**vars()) - cmd = ['keep 0', 'del 0', - _selection, - 'name 0 {name!s}'.format(**vars()), - 'q'] - fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__') - rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd) - self.check_output(out, "No atoms found for " - "%(selection)r --> %(_selection)r" % vars()) + _selection = "r {gmx_resid:d} & r {gmx_resname!s} & a {gmx_atomname!s}".format( + **vars() + ) + cmd = ["keep 0", "del 0", _selection, "name 0 {name!s}".format(**vars()), "q"] + fd, ndx = tempfile.mkstemp(suffix=".ndx", prefix=name + "__") + rc, out, err = self.make_ndx(n=self.ndx, o=ndx, input=cmd) + self.check_output( + out, "No atoms found for " "%(selection)r --> %(_selection)r" % vars() + ) # For debugging, look at out and err or set stdout=True, stderr=True ##print "DEBUG: _process_residue()" ##print out @@ -1547,7 +1738,7 @@ def _process_range(self, selection, name=None): except ValueError: try: first, last = selection - gmx_atomname = '*' + gmx_atomname = "*" except: logger.error("%r is not a valid range selection", selection) raise @@ -1557,23 +1748,22 @@ def _process_range(self, selection, name=None): _first = self._translate_residue(first, default_atomname=gmx_atomname) _last = self._translate_residue(last, default_atomname=gmx_atomname) - _selection = 'r {0:d} - {1:d} & & a {2!s}'.format(_first['resid'], _last['resid'], gmx_atomname) - cmd = ['keep 0', 'del 0', - _selection, - 'name 0 {name!s}'.format(**vars()), - 'q'] - fd, ndx = tempfile.mkstemp(suffix='.ndx', prefix=name+'__') - rc,out,err = self.make_ndx(n=self.ndx, o=ndx, input=cmd) - self.check_output(out, "No atoms found for " - "%(selection)r --> %(_selection)r" % vars()) + _selection = "r {0:d} - {1:d} & & a {2!s}".format( + _first["resid"], _last["resid"], gmx_atomname + ) + cmd = ["keep 0", "del 0", _selection, "name 0 {name!s}".format(**vars()), "q"] + fd, ndx = tempfile.mkstemp(suffix=".ndx", prefix=name + "__") + rc, out, err = self.make_ndx(n=self.ndx, o=ndx, input=cmd) + self.check_output( + out, "No atoms found for " "%(selection)r --> %(_selection)r" % vars() + ) # For debugging, look at out and err or set stdout=True, stderr=True ##print "DEBUG: _process_residue()" ##print out return name, ndx - - def _translate_residue(self, selection, default_atomname='CA'): + def _translate_residue(self, selection, default_atomname="CA"): """Translate selection for a single res to make_ndx syntax.""" m = self.RESIDUE.match(selection) if not m: @@ -1581,53 +1771,59 @@ def _translate_residue(self, selection, default_atomname='CA'): logger.error(errmsg) raise ValueError(errmsg) - gmx_resid = self.gmx_resid(int(m.group('resid'))) # magic offset correction - residue = m.group('aa') + gmx_resid = self.gmx_resid(int(m.group("resid"))) # magic offset correction + residue = m.group("aa") if len(residue) == 1: - gmx_resname = utilities.convert_aa_code(residue) # only works for AA + gmx_resname = utilities.convert_aa_code(residue) # only works for AA else: - gmx_resname = residue # use 3-letter for any resname + gmx_resname = residue # use 3-letter for any resname - gmx_atomname = m.group('atom') + gmx_atomname = m.group("atom") if gmx_atomname is None: gmx_atomname = default_atomname - return {'resname':gmx_resname, 'resid':gmx_resid, 'atomname':gmx_atomname} - - + return {"resname": gmx_resname, "resid": gmx_resid, "atomname": gmx_atomname} def check_output(self, make_ndx_output, message=None, err=None): """Simple tests to flag problems with a ``make_ndx`` run.""" if message is None: message = "" else: - message = '\n' + message + message = "\n" + message + def format(output, w=60): - hrule = "====[ GromacsError (diagnostic output) ]".ljust(w,"=") - return hrule + '\n' + str(output) + hrule + hrule = "====[ GromacsError (diagnostic output) ]".ljust(w, "=") + return hrule + "\n" + str(output) + hrule rc = True if self._is_empty_group(make_ndx_output): - warnings.warn("Selection produced empty group.{message!s}".format(**vars()), category=GromacsValueWarning) + warnings.warn( + "Selection produced empty group.{message!s}".format(**vars()), + category=GromacsValueWarning, + ) rc = False if self._has_syntax_error(make_ndx_output): rc = False out_formatted = format(make_ndx_output) - raise GromacsError("make_ndx encountered a Syntax Error, " - "%(message)s\noutput:\n%(out_formatted)s" % vars()) + raise GromacsError( + "make_ndx encountered a Syntax Error, " + "%(message)s\noutput:\n%(out_formatted)s" % vars() + ) if make_ndx_output.strip() == "": rc = False out_formatted = format(err) - raise GromacsError("make_ndx produced no output, " - "%(message)s\nerror output:\n%(out_formatted)s" % vars()) + raise GromacsError( + "make_ndx produced no output, " + "%(message)s\nerror output:\n%(out_formatted)s" % vars() + ) return rc def _is_empty_group(self, make_ndx_output): - m = re.search('Group is empty', make_ndx_output) + m = re.search("Group is empty", make_ndx_output) return m is not None def _has_syntax_error(self, make_ndx_output): - m = re.search('Syntax error:', make_ndx_output) + m = re.search("Syntax error:", make_ndx_output) return m is not None def __del__(self): @@ -1653,8 +1849,15 @@ class Transformer(utilities.FileUtils): """ - def __init__(self, s="topol.tpr", f="traj.xtc", n=None, force=None, - dirname=os.path.curdir, outdir=None): + def __init__( + self, + s="topol.tpr", + f="traj.xtc", + n=None, + force=None, + dirname=os.path.curdir, + outdir=None, + ): """Set up Transformer with structure and trajectory. Supply *n* = tpr, *f* = xtc (and *n* = ndx) relative to dirname. @@ -1691,22 +1894,28 @@ def __init__(self, s="topol.tpr", f="traj.xtc", n=None, force=None, self.dirname = dirname self.outdir = utilities.realpath(outdir) if outdir is not None else None self.force = force - self.nowater = {} # data for trajectory stripped from water - self.proteinonly = {} # data for a protein-only trajectory + self.nowater = {} # data for trajectory stripped from water + self.proteinonly = {} # data for a protein-only trajectory with utilities.in_dir(self.dirname, create=False): for f in (self.tpr, self.xtc, self.ndx): if f is None: continue if not os.path.exists(f): - msg = "Possible problem: File {f!r} not found in {dirname!r}.".format(**vars()) + msg = ( + "Possible problem: File {f!r} not found in {dirname!r}.".format( + **vars() + ) + ) warnings.warn(msg, category=MissingDataWarning) logger.warning(msg) logger.info("%r initialised", self) def __repr__(self): - return "{0!s}(s={1!r}, f={2!r}, n={3!r}, force={4!r})".format(self.__class__.__name__, - self.tpr, self.xtc, self.ndx, self.force) + return "{0!s}(s={1!r}, f={2!r}, n={3!r}, force={4!r})".format( + self.__class__.__name__, self.tpr, self.xtc, self.ndx, self.force + ) + def outfile(self, p): """Path for an output file. @@ -1721,8 +1930,8 @@ def outfile(self, p): def rp(self, *args): """Return canonical path to file under *dirname* with components *args* - If *args* form an absolute path then just return it as the absolute path. - """ + If *args* form an absolute path then just return it as the absolute path. + """ try: p = os.path.join(*args) if os.path.isabs(p): @@ -1759,18 +1968,20 @@ def center_fit(self, **kwargs): dictionary with keys *tpr*, *xtc*, which are the names of the the new files """ - kwargs.setdefault('s', self.tpr) - kwargs.setdefault('n', self.ndx) - kwargs['f'] = self.xtc - kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, '_centfit', 'xtc'))) - force = kwargs.pop('force', self.force) + kwargs.setdefault("s", self.tpr) + kwargs.setdefault("n", self.ndx) + kwargs["f"] = self.xtc + kwargs.setdefault( + "o", self.outfile(self.infix_filename(None, self.xtc, "_centfit", "xtc")) + ) + force = kwargs.pop("force", self.force) logger.info("Centering and fitting trajectory {f!r}...".format(**kwargs)) with utilities.in_dir(self.dirname): - if not self.check_file_exists(kwargs['o'], resolve="indicate", force=force): + if not self.check_file_exists(kwargs["o"], resolve="indicate", force=force): trj_fitandcenter(**kwargs) logger.info("Centered and fit trajectory: {o!r}.".format(**kwargs)) - return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])} + return {"tpr": self.rp(kwargs["s"]), "xtc": self.rp(kwargs["o"])} def fit(self, xy=False, **kwargs): """Write xtc that is fitted to the tpr reference structure. @@ -1817,44 +2028,64 @@ def fit(self, xy=False, **kwargs): the new files """ - kwargs.setdefault('s', self.tpr) - kwargs.setdefault('n', self.ndx) - kwargs['f'] = self.xtc - force = kwargs.pop('force', self.force) + kwargs.setdefault("s", self.tpr) + kwargs.setdefault("n", self.ndx) + kwargs["f"] = self.xtc + force = kwargs.pop("force", self.force) if xy: - fitmode = 'rotxy+transxy' - kwargs.pop('fit', None) - infix_default = '_fitxy' + fitmode = "rotxy+transxy" + kwargs.pop("fit", None) + infix_default = "_fitxy" else: - fitmode = kwargs.pop('fit', 'rot+trans') # user can use 'progressive', too - infix_default = '_fit' + fitmode = kwargs.pop("fit", "rot+trans") # user can use 'progressive', too + infix_default = "_fit" - dt = kwargs.get('dt') + dt = kwargs.get("dt") if dt: - infix_default += '_dt{0:d}ps'.format(int(dt)) # dt in ps - - kwargs.setdefault('o', self.outfile(self.infix_filename(None, self.xtc, infix_default, 'xtc'))) - fitgroup = kwargs.pop('fitgroup', 'backbone') - kwargs.setdefault('input', [fitgroup, "system"]) - - if kwargs.get('center', False): - logger.warning("Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", kwargs) - if len(kwargs['inputs']) != 3: - logger.error("If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)") - raise ValuError("Insufficient index groups for centering,fitting,output") - - logger.info("Fitting trajectory %r to with xy=%r...", kwargs['f'], xy) + infix_default += "_dt{0:d}ps".format(int(dt)) # dt in ps + + kwargs.setdefault( + "o", self.outfile(self.infix_filename(None, self.xtc, infix_default, "xtc")) + ) + fitgroup = kwargs.pop("fitgroup", "backbone") + kwargs.setdefault("input", [fitgroup, "system"]) + + if kwargs.get("center", False): + logger.warning( + "Transformer.fit(): center=%(center)r used: centering should not be combined with fitting.", + kwargs, + ) + if len(kwargs["inputs"]) != 3: + logger.error( + "If you insist on centering you must provide three groups in the 'input' kwarg: (center, fit, output)" + ) + raise ValuError( + "Insufficient index groups for centering,fitting,output" + ) + + logger.info("Fitting trajectory %r to with xy=%r...", kwargs["f"], xy) logger.info("Fitting on index group %(fitgroup)r", vars()) with utilities.in_dir(self.dirname): - if self.check_file_exists(kwargs['o'], resolve="indicate", force=force): - logger.warning("File %r exists; force regenerating it with force=True.", kwargs['o']) + if self.check_file_exists(kwargs["o"], resolve="indicate", force=force): + logger.warning( + "File %r exists; force regenerating it with force=True.", + kwargs["o"], + ) else: gromacs.trjconv(fit=fitmode, **kwargs) - logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs['o']) - return {'tpr': self.rp(kwargs['s']), 'xtc': self.rp(kwargs['o'])} - - def strip_water(self, os=None, o=None, on=None, compact=False, - resn="SOL", groupname="notwater", **kwargs): + logger.info("Fitted trajectory (fitmode=%s): %r.", fitmode, kwargs["o"]) + return {"tpr": self.rp(kwargs["s"]), "xtc": self.rp(kwargs["o"])} + + def strip_water( + self, + os=None, + o=None, + on=None, + compact=False, + resn="SOL", + groupname="notwater", + **kwargs + ): """Write xtc and tpr with water (by resname) removed. :Keywords: @@ -1904,38 +2135,57 @@ def strip_water(self, os=None, o=None, on=None, compact=False, (This appears to be a bug in Gromacs 4.x.) """ - force = kwargs.pop('force', self.force) + force = kwargs.pop("force", self.force) - newtpr = self.outfile(self.infix_filename(os, self.tpr, '_nowater')) - newxtc = self.outfile(self.infix_filename(o, self.xtc, '_nowater')) - newndx = self.outfile(self.infix_filename(on, self.tpr, '_nowater', 'ndx')) + newtpr = self.outfile(self.infix_filename(os, self.tpr, "_nowater")) + newxtc = self.outfile(self.infix_filename(o, self.xtc, "_nowater")) + newndx = self.outfile(self.infix_filename(on, self.tpr, "_nowater", "ndx")) - nowater_ndx = self._join_dirname(newtpr, "nowater.ndx") # refers to original tpr + nowater_ndx = self._join_dirname( + newtpr, "nowater.ndx" + ) # refers to original tpr if compact: TRJCONV = trj_compact # input overrides centergroup - if kwargs.get('centergroup') is not None and 'input' in kwargs: - logger.warning("centergroup = %r will be superceded by input[0] = %r", kwargs['centergroup'], kwargs['input'][0]) - _input = kwargs.get('input', [kwargs.get('centergroup', 'Protein')]) - kwargs['input'] = [_input[0], groupname] # [center group, write-out selection] + if kwargs.get("centergroup") is not None and "input" in kwargs: + logger.warning( + "centergroup = %r will be superceded by input[0] = %r", + kwargs["centergroup"], + kwargs["input"][0], + ) + _input = kwargs.get("input", [kwargs.get("centergroup", "Protein")]) + kwargs["input"] = [ + _input[0], + groupname, + ] # [center group, write-out selection] del _input - logger.info("Creating a compact trajectory centered on group %r", kwargs['input'][0]) - logger.info("Writing %r to the output trajectory", kwargs['input'][1]) + logger.info( + "Creating a compact trajectory centered on group %r", kwargs["input"][0] + ) + logger.info("Writing %r to the output trajectory", kwargs["input"][1]) else: TRJCONV = gromacs.trjconv - kwargs['input'] = [groupname] - logger.info("Writing %r to the output trajectory (no centering)", kwargs['input'][0]) + kwargs["input"] = [groupname] + logger.info( + "Writing %r to the output trajectory (no centering)", kwargs["input"][0] + ) # clean kwargs, only legal arguments for Gromacs tool trjconv should remain kwargs.pop("centergroup", None) - NOTwater = "! r {resn!s}".format(**vars()) # make_ndx selection ("not water residues") + NOTwater = "! r {resn!s}".format( + **vars() + ) # make_ndx selection ("not water residues") with utilities.in_dir(self.dirname): # ugly because I cannot break from the block if not self.check_file_exists(newxtc, resolve="indicate", force=force): # make no-water index - B = IndexBuilder(struct=self.tpr, selections=['@'+NOTwater], - ndx=self.ndx, out_ndx=nowater_ndx) + B = IndexBuilder( + struct=self.tpr, + selections=["@" + NOTwater], + ndx=self.ndx, + out_ndx=nowater_ndx, + ) B.combine(name_all=groupname, operation="|", defaultgroups=True) logger.debug("Index file for water removal: %r", nowater_ndx) @@ -1943,40 +2193,45 @@ def strip_water(self, os=None, o=None, on=None, compact=False, gromacs.tpbconv(s=self.tpr, o=newtpr, n=nowater_ndx, input=[groupname]) logger.info("NDX of the new system %r", newndx) - gromacs.make_ndx(f=newtpr, o=newndx, input=['q'], stderr=False, stdout=False) + gromacs.make_ndx( + f=newtpr, o=newndx, input=["q"], stderr=False, stdout=False + ) # PROBLEM: If self.ndx contained a custom group required for fitting then we are loosing # this group here. We could try to merge only this group but it is possible that # atom indices changed. The only way to solve this is to regenerate the group with # a selection or only use Gromacs default groups. logger.info("Trajectory without water {newxtc!r}".format(**vars())) - kwargs['s'] = self.tpr - kwargs['f'] = self.xtc - kwargs['n'] = nowater_ndx - kwargs['o'] = newxtc + kwargs["s"] = self.tpr + kwargs["f"] = self.xtc + kwargs["n"] = nowater_ndx + kwargs["o"] = newxtc TRJCONV(**kwargs) logger.info("pdb and gro for visualization") - for ext in 'pdb', 'gro': + for ext in "pdb", "gro": try: # see warning in doc ... so we don't use the new xtc but the old one - kwargs['o'] = self.filename(newtpr, ext=ext) + kwargs["o"] = self.filename(newtpr, ext=ext) TRJCONV(dump=0, stdout=False, stderr=False, **kwargs) # silent except: - logger.exception("Failed building the water-less %(ext)s. " - "Position restraints in tpr file (see docs)?" % vars()) + logger.exception( + "Failed building the water-less %(ext)s. " + "Position restraints in tpr file (see docs)?" % vars() + ) logger.info("strip_water() complete") - self.nowater[self.rp(newxtc)] = Transformer(dirname=self.dirname, s=newtpr, - f=newxtc, n=newndx, force=force) - return {'tpr':self.rp(newtpr), 'xtc':self.rp(newxtc), 'ndx':self.rp(newndx)} - + self.nowater[self.rp(newxtc)] = Transformer( + dirname=self.dirname, s=newtpr, f=newxtc, n=newndx, force=force + ) + return {"tpr": self.rp(newtpr), "xtc": self.rp(newxtc), "ndx": self.rp(newndx)} # TODO: could probably unify strip_water() and keep_protein_only() # (given that the latter was produced by copy&paste+search&replace...) - def keep_protein_only(self, os=None, o=None, on=None, compact=False, - groupname="proteinonly", **kwargs): + def keep_protein_only( + self, os=None, o=None, on=None, compact=False, groupname="proteinonly", **kwargs + ): """Write xtc and tpr only containing the protein. :Keywords: @@ -2019,60 +2274,76 @@ def keep_protein_only(self, os=None, o=None, on=None, compact=False, (This appears to be a bug in Gromacs 4.x.) """ - force = kwargs.pop('force', self.force) - suffix = 'proteinonly' - newtpr = self.outfile(self.infix_filename(os, self.tpr, '_'+suffix)) - newxtc = self.outfile(self.infix_filename(o, self.xtc, '_'+suffix)) - newndx = self.outfile(self.infix_filename(on, self.tpr, '_'+suffix, 'ndx')) + force = kwargs.pop("force", self.force) + suffix = "proteinonly" + newtpr = self.outfile(self.infix_filename(os, self.tpr, "_" + suffix)) + newxtc = self.outfile(self.infix_filename(o, self.xtc, "_" + suffix)) + newndx = self.outfile(self.infix_filename(on, self.tpr, "_" + suffix, "ndx")) - selection_ndx = suffix+".ndx" # refers to original tpr + selection_ndx = suffix + ".ndx" # refers to original tpr if compact: TRJCONV = trj_compact - _input = kwargs.get('input', ['Protein']) - kwargs['input'] = [_input[0], groupname] # [center group, write-out selection] + _input = kwargs.get("input", ["Protein"]) + kwargs["input"] = [ + _input[0], + groupname, + ] # [center group, write-out selection] del _input else: TRJCONV = gromacs.trjconv - kwargs['input'] = [groupname] + kwargs["input"] = [groupname] - selections = ['@'+sel for sel in ['"Protein"'] + kwargs.pop('keepalso',[])] + selections = ["@" + sel for sel in ['"Protein"'] + kwargs.pop("keepalso", [])] with utilities.in_dir(self.dirname): # ugly because I cannot break from the block if not self.check_file_exists(newxtc, resolve="indicate", force=force): # make index (overkill for 'Protein' but maybe we want to enhance # it in the future, e.g. with keeping ions/ligands as well? - B = IndexBuilder(struct=self.tpr, selections=selections, - ndx=self.ndx, out_ndx=selection_ndx) + B = IndexBuilder( + struct=self.tpr, + selections=selections, + ndx=self.ndx, + out_ndx=selection_ndx, + ) B.combine(name_all=groupname, operation="|", defaultgroups=True) logger.info("TPR file containg the protein {newtpr!r}".format(**vars())) - gromacs.tpbconv(s=self.tpr, o=newtpr, n=selection_ndx, input=[groupname]) + gromacs.tpbconv( + s=self.tpr, o=newtpr, n=selection_ndx, input=[groupname] + ) logger.info("NDX of the new system {newndx!r}".format(**vars())) - gromacs.make_ndx(f=newtpr, o=newndx, input=['q'], stderr=False, stdout=False) - - logger.info("Trajectory with only the protein {newxtc!r}".format(**vars())) - kwargs['s'] = self.tpr - kwargs['f'] = self.xtc - kwargs['n'] = selection_ndx - kwargs['o'] = newxtc + gromacs.make_ndx( + f=newtpr, o=newndx, input=["q"], stderr=False, stdout=False + ) + + logger.info( + "Trajectory with only the protein {newxtc!r}".format(**vars()) + ) + kwargs["s"] = self.tpr + kwargs["f"] = self.xtc + kwargs["n"] = selection_ndx + kwargs["o"] = newxtc TRJCONV(**kwargs) logger.info("pdb and gro for visualization") - for ext in 'pdb', 'gro': + for ext in "pdb", "gro": try: # see warning in doc ... so we don't use the new xtc but the old one - kwargs['o'] = self.filename(newtpr, ext=ext) + kwargs["o"] = self.filename(newtpr, ext=ext) TRJCONV(dump=0, stdout=False, stderr=False, **kwargs) # silent except: - logger.exception("Failed building the protein-only %(ext)s. " - "Position restraints in tpr file (see docs)?" % vars()) + logger.exception( + "Failed building the protein-only %(ext)s. " + "Position restraints in tpr file (see docs)?" % vars() + ) logger.info("keep_protein_only() complete") - self.proteinonly[self.rp(newxtc)] = Transformer(dirname=self.dirname, s=newtpr, - f=newxtc, n=newndx, force=force) - return {'tpr':self.rp(newtpr), 'xtc':self.rp(newxtc), 'ndx':self.rp(newndx)} + self.proteinonly[self.rp(newxtc)] = Transformer( + dirname=self.dirname, s=newtpr, f=newxtc, n=newndx, force=force + ) + return {"tpr": self.rp(newtpr), "xtc": self.rp(newxtc), "ndx": self.rp(newndx)} def strip_fit(self, **kwargs): """Strip water and fit to the remaining system. @@ -2098,23 +2369,23 @@ def strip_fit(self, **kwargs): .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one. """ - kwargs.setdefault('fit', 'rot+trans') + kwargs.setdefault("fit", "rot+trans") kw_fit = {} - for k in ('xy', 'fit', 'fitgroup', 'input'): + for k in ("xy", "fit", "fitgroup", "input"): if k in kwargs: kw_fit[k] = kwargs.pop(k) - kwargs['input'] = kwargs.pop('strip_input', ['Protein']) - kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force) + kwargs["input"] = kwargs.pop("strip_input", ["Protein"]) + kwargs["force"] = kw_fit["force"] = kwargs.pop("force", self.force) - paths = self.strip_water(**kwargs) # updates self.nowater - transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced - return transformer_nowater.fit(**kw_fit) # use new Transformer's fit() + paths = self.strip_water(**kwargs) # updates self.nowater + transformer_nowater = self.nowater[ + paths["xtc"] + ] # make sure to get the one we just produced + return transformer_nowater.fit(**kw_fit) # use new Transformer's fit() def _join_dirname(self, *args): """return os.path.join(os.path.dirname(args[0]), *args[1:])""" # extra function because I need to use it in a method that defines # the kwarg 'os', which collides with os.path... return os.path.join(os.path.dirname(args[0]), *args[1:]) - - diff --git a/gromacs/collections.py b/gromacs/collections.py index 77e68133..267b5b76 100644 --- a/gromacs/collections.py +++ b/gromacs/collections.py @@ -18,6 +18,7 @@ from six.moves import cPickle from numpy import all, any + class Collection(list): """Multiple objects (organized as a list). @@ -41,15 +42,19 @@ class Collection(list): >>> arc ['ant', 'boar', 'ape', 'gnu', 'ant', 'boar', 'ape', 'gnu'] """ + # note: do not use with multiple inheritance -- why, I think it could work now... def save(self, filename): """Pickle the whole collection to *filename*. - + If no extension is provided, ".collection" is appended. """ - cPickle.dump(self, open(self._canonicalize(filename), 'wb'), - protocol=cPickle.HIGHEST_PROTOCOL) + cPickle.dump( + self, + open(self._canonicalize(filename), "wb"), + protocol=cPickle.HIGHEST_PROTOCOL, + ) def load(self, filename, append=False): """Load collection from pickled file *filename*. @@ -59,7 +64,7 @@ def load(self, filename, append=False): If no extension is provided, ".collection" is appended. """ - tmp = cPickle.load(open(self._canonicalize(filename), 'rb')) + tmp = cPickle.load(open(self._canonicalize(filename), "rb")) if append: self.extend(tmp) else: @@ -86,27 +91,34 @@ def __getattribute__(self, attr): return super(Collection, self).__getattribute__(attr) except AttributeError: pass - + for o in self: failures = [] if not hasattr(o, attr): failures.append(o) if len(failures) > 0: - raise AttributeError("The following members of the collection do not " - "implement the attribute %(attr)r:\n%(failures)r\n" - % vars()) + raise AttributeError( + "The following members of the collection do not " + "implement the attribute %(attr)r:\n%(failures)r\n" % vars() + ) # analyze attribute: functions (the ones with __call__) get delayed, simple # attributes are looked up immediately - iscallable = [hasattr(o.__getattribute__(attr), '__call__') for o in self] + iscallable = [hasattr(o.__getattribute__(attr), "__call__") for o in self] if all(iscallable): + def runall(*args, **kwargs): """Apply function to all members and return a new Collection""" - return Collection([o.__getattribute__(attr)(*args, **kwargs) for o in self]) + return Collection( + [o.__getattribute__(attr)(*args, **kwargs) for o in self] + ) + runall.__name__ = attr return runall elif any(iscallable): - raise TypeError("Attribute {0!r} is callable only for some objects".format(attr)) + raise TypeError( + "Attribute {0!r} is callable only for some objects".format(attr) + ) return Collection([o.__getattribute__(attr) for o in self]) @@ -114,4 +126,6 @@ def __add__(self, x): return Collection(super(Collection, self).__add__(x)) def __repr__(self): - return self.__class__.__name__+"({0!s})".format(super(Collection, self).__repr__()) + return self.__class__.__name__ + "({0!s})".format( + super(Collection, self).__repr__() + ) diff --git a/gromacs/config.py b/gromacs/config.py index 15240480..a21ea4ab 100644 --- a/gromacs/config.py +++ b/gromacs/config.py @@ -236,6 +236,7 @@ if sys.version_info[0] < 3: # several differences for Python 2 from ConfigParser import SafeConfigParser as ConfigParser from ConfigParser import NoSectionError, NoOptionError + # Define read_file to point to the (deprecated in Python 3) readfp # in order to have consistent, non-deprecated syntax ConfigParser.read_file = ConfigParser.readfp @@ -257,6 +258,7 @@ def _getboolean(self, section, option, fallback=_UNSET, **kwargs): if fallback is _UNSET: raise return fallback # If fallback is given, use that value + ConfigParser.getboolean = _getboolean else: from configparser import ConfigParser @@ -267,19 +269,19 @@ def _getboolean(self, section, option, fallback=_UNSET, **kwargs): #: Default name of the global configuration file. -CONFIGNAME = os.path.expanduser(os.path.join("~",".gromacswrapper.cfg")) +CONFIGNAME = os.path.expanduser(os.path.join("~", ".gromacswrapper.cfg")) #: Default configuration directory in GromacsWrapper. -default_configdir = os.path.expanduser(os.path.join("~",".gromacswrapper")) +default_configdir = os.path.expanduser(os.path.join("~", ".gromacswrapper")) #: Initial defaults for directories, filenames, and logger options. defaults = { - 'configdir': default_configdir, - 'qscriptdir': os.path.join(default_configdir, 'qscripts'), - 'templatesdir': os.path.join(default_configdir, 'templates'), - 'logfilename': "gromacs.log", - 'loglevel_console': 'INFO', - 'loglevel_file': 'DEBUG', + "configdir": default_configdir, + "qscriptdir": os.path.join(default_configdir, "qscripts"), + "templatesdir": os.path.join(default_configdir, "templates"), + "logfilename": "gromacs.log", + "loglevel_console": "INFO", + "loglevel_file": "DEBUG", } @@ -291,13 +293,13 @@ def _getboolean(self, section, option, fallback=_UNSET, **kwargs): #: File name for the log file; all gromacs command and many utility functions (e.g. in #: :mod:`gromacs.cbook` and :mod:`gromacs.setup`) append messages there. Warnings and #: errors are also recorded here. The default is *gromacs.log*. -logfilename = defaults['logfilename'] +logfilename = defaults["logfilename"] #: The default loglevel that is still printed to the console. -loglevel_console = logging.getLevelName(defaults['loglevel_console']) +loglevel_console = logging.getLevelName(defaults["loglevel_console"]) #: The default loglevel that is still written to the :data:`logfilename`. -loglevel_file = logging.getLevelName(defaults['loglevel_file']) +loglevel_file = logging.getLevelName(defaults["loglevel_file"]) # User-accessible configuration @@ -305,15 +307,15 @@ def _getboolean(self, section, option, fallback=_UNSET, **kwargs): #: Directory to store user templates and rc files. #: The default value is ``~/.gromacswrapper``. -configdir = defaults['configdir'] +configdir = defaults["configdir"] #: Directory to store user supplied queuing system scripts. #: The default value is ``~/.gromacswrapper/qscripts``. -qscriptdir = defaults['qscriptdir'] +qscriptdir = defaults["qscriptdir"] #: Directory to store user supplied template files such as mdp files. #: The default value is ``~/.gromacswrapper/templates``. -templatesdir = defaults['templatesdir'] +templatesdir = defaults["templatesdir"] #: List of all configuration directories. config_directories = [configdir, qscriptdir, templatesdir] @@ -333,6 +335,7 @@ def _getboolean(self, section, option, fallback=_UNSET, **kwargs): # Location of template files # -------------------------- + def _generate_template_dict(dirname): """Generate a list of included files *and* extract them to a temp space. @@ -340,18 +343,22 @@ def _generate_template_dict(dirname): by external code. All template filenames are stored in :data:`config.templates`. """ - return dict((resource_basename(fn), resource_filename(__name__, dirname +'/'+fn)) - for fn in resource_listdir(__name__, dirname) - if not fn.endswith('~')) + return dict( + (resource_basename(fn), resource_filename(__name__, dirname + "/" + fn)) + for fn in resource_listdir(__name__, dirname) + if not fn.endswith("~") + ) + def resource_basename(resource): """Last component of a resource (which always uses '/' as sep).""" - if resource.endswith('/'): - resource = resource[:-1] - parts = resource.split('/') + if resource.endswith("/"): + resource = resource[:-1] + parts = resource.split("/") return parts[-1] -templates = _generate_template_dict('templates') + +templates = _generate_template_dict("templates") """*GromacsWrapper* comes with a number of templates for run input files and queuing system scripts. They are provided as a convenience and examples but **WITHOUT ANY GUARANTEE FOR CORRECTNESS OR SUITABILITY FOR @@ -383,12 +390,13 @@ def resource_basename(resource): """ #: The default template for SGE/PBS run scripts. -qscript_template = templates['local.sh'] +qscript_template = templates["local.sh"] # Functions to access configuration data # -------------------------------------- + def get_template(t): """Find template file *t* and return its real path. @@ -412,9 +420,10 @@ def get_template(t): """ templates = [_get_template(s) for s in utilities.asiterable(t)] if len(templates) == 1: - return templates[0] + return templates[0] return templates + def get_templates(t): """Find template file(s) *t* and return their real paths. @@ -436,118 +445,131 @@ def get_templates(t): """ return [_get_template(s) for s in utilities.asiterable(t)] + def _get_template(t): """Return a single template *t*.""" - if os.path.exists(t): # 1) Is it an accessible file? - pass + if os.path.exists(t): # 1) Is it an accessible file? + pass else: - _t = t - _t_found = False - for d in path: # 2) search config.path - p = os.path.join(d, _t) - if os.path.exists(p): - t = p - _t_found = True - break - _t = os.path.basename(t) - if not _t_found: # 3) try template dirs - for p in templates.values(): - if _t == os.path.basename(p): - t = p - _t_found = True # NOTE: in principle this could match multiple - break # times if more than one template dir existed. - if not _t_found: # 4) try it as a key into templates - try: - t = templates[t] - except KeyError: - pass - else: - _t_found = True - if not _t_found: # 5) nothing else to try... - raise ValueError("Failed to locate the template file {t!r}.".format(**vars())) + _t = t + _t_found = False + for d in path: # 2) search config.path + p = os.path.join(d, _t) + if os.path.exists(p): + t = p + _t_found = True + break + _t = os.path.basename(t) + if not _t_found: # 3) try template dirs + for p in templates.values(): + if _t == os.path.basename(p): + t = p + _t_found = True # NOTE: in principle this could match multiple + break # times if more than one template dir existed. + if not _t_found: # 4) try it as a key into templates + try: + t = templates[t] + except KeyError: + pass + else: + _t_found = True + if not _t_found: # 5) nothing else to try... + raise ValueError( + "Failed to locate the template file {t!r}.".format(**vars()) + ) return os.path.realpath(t) class GMXConfigParser(ConfigParser, object): - """Customized :class:`ConfigParser.SafeConfigParser`.""" - cfg_template = 'gromacswrapper.cfg' - - def __init__(self, *args, **kwargs): - """Reads and parses the configuration file. - - Default values are loaded and then replaced with the values from - ``~/.gromacswrapper.cfg`` if that file exists. The global - configuration instance :data:`gromacswrapper.config.cfg` is updated - as are a number of global variables such as :data:`configdir`, - :data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ... - - Normally, the configuration is only loaded when the :mod:`gromacswrapper` - package is imported but a re-reading of the configuration can be forced - anytime by calling :func:`get_configuration`. - """ - - self.filename = kwargs.pop('filename', CONFIGNAME) - - super(GMXConfigParser, self).__init__(*args, **kwargs) - # defaults - self.set('DEFAULT', 'qscriptdir', - os.path.join("%(configdir)s", os.path.basename(defaults['qscriptdir']))) - self.set('DEFAULT', 'templatesdir', - os.path.join("%(configdir)s", os.path.basename(defaults['templatesdir']))) - self.add_section('Gromacs') - self.set("Gromacs", "release", "") - self.set("Gromacs", "GMXRC", "") - self.set("Gromacs", "tools", "") - self.set("Gromacs", "extra", "") - self.set("Gromacs", "groups", "tools") - self.set("Gromacs", "append_suffix", "yes") - self.add_section('Logging') - self.set('Logging', 'logfilename', defaults['logfilename']) - self.set('Logging', 'loglevel_console', defaults['loglevel_console']) - self.set('Logging', 'loglevel_file', defaults['loglevel_file']) - - # bundled defaults (should be ok to use get_template()) - default_cfg = get_template(self.cfg_template) - self.read_file(open(default_cfg)) - - # defaults are overriden by existing user global cfg file - self.read([self.filename]) - - @property - def configuration(self): - """Dict of variables that we make available as globals in the module. - - Can be used as :: - - globals().update(GMXConfigParser.configuration) # update configdir, templatesdir ... - """ - configuration = { - 'configfilename': self.filename, - 'logfilename': self.getpath('Logging', 'logfilename'), - 'loglevel_console': self.getLogLevel('Logging', 'loglevel_console'), - 'loglevel_file': self.getLogLevel('Logging', 'loglevel_file'), - 'configdir': self.getpath('DEFAULT', 'configdir'), - 'qscriptdir': self.getpath('DEFAULT', 'qscriptdir'), - 'templatesdir': self.getpath('DEFAULT', 'templatesdir'), - } - configuration['path'] = [os.path.curdir, - configuration['qscriptdir'], - configuration['templatesdir']] - return configuration - - def getpath(self, section, option): - """Return option as an expanded path.""" - return os.path.expanduser(os.path.expandvars(self.get(section, option))) - - def getLogLevel(self, section, option): - """Return the textual representation of logging level 'option' or the number. - - Note that option is always interpreted as an UPPERCASE string - and hence integer log levels will not be recognized. - - .. SeeAlso: :mod:`logging` and :func:`logging.getLevelName` - """ - return logging.getLevelName(self.get(section, option).upper()) + """Customized :class:`ConfigParser.SafeConfigParser`.""" + + cfg_template = "gromacswrapper.cfg" + + def __init__(self, *args, **kwargs): + """Reads and parses the configuration file. + + Default values are loaded and then replaced with the values from + ``~/.gromacswrapper.cfg`` if that file exists. The global + configuration instance :data:`gromacswrapper.config.cfg` is updated + as are a number of global variables such as :data:`configdir`, + :data:`qscriptdir`, :data:`templatesdir`, :data:`logfilename`, ... + + Normally, the configuration is only loaded when the :mod:`gromacswrapper` + package is imported but a re-reading of the configuration can be forced + anytime by calling :func:`get_configuration`. + """ + + self.filename = kwargs.pop("filename", CONFIGNAME) + + super(GMXConfigParser, self).__init__(*args, **kwargs) + # defaults + self.set( + "DEFAULT", + "qscriptdir", + os.path.join("%(configdir)s", os.path.basename(defaults["qscriptdir"])), + ) + self.set( + "DEFAULT", + "templatesdir", + os.path.join("%(configdir)s", os.path.basename(defaults["templatesdir"])), + ) + self.add_section("Gromacs") + self.set("Gromacs", "release", "") + self.set("Gromacs", "GMXRC", "") + self.set("Gromacs", "tools", "") + self.set("Gromacs", "extra", "") + self.set("Gromacs", "groups", "tools") + self.set("Gromacs", "append_suffix", "yes") + self.add_section("Logging") + self.set("Logging", "logfilename", defaults["logfilename"]) + self.set("Logging", "loglevel_console", defaults["loglevel_console"]) + self.set("Logging", "loglevel_file", defaults["loglevel_file"]) + + # bundled defaults (should be ok to use get_template()) + default_cfg = get_template(self.cfg_template) + self.read_file(open(default_cfg)) + + # defaults are overriden by existing user global cfg file + self.read([self.filename]) + + @property + def configuration(self): + """Dict of variables that we make available as globals in the module. + + Can be used as :: + + globals().update(GMXConfigParser.configuration) # update configdir, templatesdir ... + """ + configuration = { + "configfilename": self.filename, + "logfilename": self.getpath("Logging", "logfilename"), + "loglevel_console": self.getLogLevel("Logging", "loglevel_console"), + "loglevel_file": self.getLogLevel("Logging", "loglevel_file"), + "configdir": self.getpath("DEFAULT", "configdir"), + "qscriptdir": self.getpath("DEFAULT", "qscriptdir"), + "templatesdir": self.getpath("DEFAULT", "templatesdir"), + } + configuration["path"] = [ + os.path.curdir, + configuration["qscriptdir"], + configuration["templatesdir"], + ] + return configuration + + def getpath(self, section, option): + """Return option as an expanded path.""" + return os.path.expanduser(os.path.expandvars(self.get(section, option))) + + def getLogLevel(self, section, option): + """Return the textual representation of logging level 'option' or the number. + + Note that option is always interpreted as an UPPERCASE string + and hence integer log levels will not be recognized. + + .. SeeAlso: :mod:`logging` and :func:`logging.getLevelName` + """ + return logging.getLevelName(self.get(section, option).upper()) + def get_configuration(filename=CONFIGNAME): """Reads and parses the configuration file. @@ -564,51 +586,55 @@ def get_configuration(filename=CONFIGNAME): :Returns: a dict with all updated global configuration variables """ - global cfg, configuration # very iffy --- most of the whole config mod should a class + global cfg, configuration # very iffy --- most of the whole config mod should a class #: :data:`cfg` is the instance of :class:`GMXConfigParser` that makes all #: global configuration data accessible - cfg = GMXConfigParser(filename=filename) # update module-level cfg - globals().update(cfg.configuration) # update configdir, templatesdir ... - configuration = cfg.configuration # update module-level configuration + cfg = GMXConfigParser(filename=filename) # update module-level cfg + globals().update(cfg.configuration) # update configdir, templatesdir ... + configuration = cfg.configuration # update module-level configuration return cfg + #: :data:`cfg` is the instance of :class:`GMXConfigParser` that makes all #: global configuration data accessible cfg = GMXConfigParser() -globals().update(cfg.configuration) # update configdir, templatesdir ... +globals().update(cfg.configuration) # update configdir, templatesdir ... #: Dict containing important configuration variables, populated by #: :func:`get_configuration` (mainly a shortcut; use :data:`cfg` in most cases). configuration = cfg.configuration + def setup(filename=CONFIGNAME): - """Prepare a default GromacsWrapper global environment. - - 1) Create the global config file. - 2) Create the directories in which the user can store template and config files. - - This function can be run repeatedly without harm. - """ - # setup() must be separate and NOT run automatically when config - # is loaded so that easy_install installations work - # (otherwise we get a sandbox violation) - # populate cfg with defaults (or existing data) - get_configuration() - if not os.path.exists(filename): - with open(filename, 'w') as configfile: - cfg.write(configfile) # write the default file so that user can edit - msg = "NOTE: GromacsWrapper created the configuration file \n\t%r\n" \ - " for you. Edit the file to customize the package." % filename - print(msg) - - # directories - for d in config_directories: - utilities.mkdir_p(d) + """Prepare a default GromacsWrapper global environment. + + 1) Create the global config file. + 2) Create the directories in which the user can store template and config files. + + This function can be run repeatedly without harm. + """ + # setup() must be separate and NOT run automatically when config + # is loaded so that easy_install installations work + # (otherwise we get a sandbox violation) + # populate cfg with defaults (or existing data) + get_configuration() + if not os.path.exists(filename): + with open(filename, "w") as configfile: + cfg.write(configfile) # write the default file so that user can edit + msg = ( + "NOTE: GromacsWrapper created the configuration file \n\t%r\n" + " for you. Edit the file to customize the package." % filename + ) + print(msg) + + # directories + for d in config_directories: + utilities.mkdir_p(d) def check_setup(): - """Check if templates directories are setup and issue a warning and help. + """Check if templates directories are setup and issue a warning and help. Set the environment variable :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK` skip the check and make it always return ``True`` @@ -618,20 +644,20 @@ def check_setup(): .. versionchanged:: 0.3.1 Uses :envvar:`GROMACSWRAPPER_SUPPRESS_SETUP_CHECK` to suppress check (useful for scripts run on a server) - """ + """ - if "GROMACSWRAPPER_SUPPRESS_SETUP_CHECK" in os.environ: - return True + if "GROMACSWRAPPER_SUPPRESS_SETUP_CHECK" in os.environ: + return True - missing = [d for d in config_directories if not os.path.exists(d)] - if len(missing) > 0: - print("NOTE: Some configuration directories are not set up yet: ") - print("\t{0!s}".format('\n\t'.join(missing))) - print("NOTE: You can create the configuration file and directories with:") - print("\t>>> import gromacs") - print("\t>>> gromacs.config.setup()") - return False - return True + missing = [d for d in config_directories if not os.path.exists(d)] + if len(missing) > 0: + print("NOTE: Some configuration directories are not set up yet: ") + print("\t{0!s}".format("\n\t".join(missing))) + print("NOTE: You can create the configuration file and directories with:") + print("\t>>> import gromacs") + print("\t>>> gromacs.config.setup()") + return False + return True def set_gmxrc_environment(gmxrc): @@ -645,14 +671,27 @@ def set_gmxrc_environment(gmxrc): this function. """ # only v5: 'GMXPREFIX', 'GROMACS_DIR' - envvars = ['GMXBIN', 'GMXLDLIB', 'GMXMAN', 'GMXDATA', - 'LD_LIBRARY_PATH', 'MANPATH', 'PKG_CONFIG_PATH', - 'PATH', - 'GMXPREFIX', 'GROMACS_DIR'] + envvars = [ + "GMXBIN", + "GMXLDLIB", + "GMXMAN", + "GMXDATA", + "LD_LIBRARY_PATH", + "MANPATH", + "PKG_CONFIG_PATH", + "PATH", + "GMXPREFIX", + "GROMACS_DIR", + ] # in order to keep empty values, add ___ sentinels around result # (will be removed later) - cmdargs = ['bash', '-c', ". {0} && echo {1}".format(gmxrc, - ' '.join(['___${{{0}}}___'.format(v) for v in envvars]))] + cmdargs = [ + "bash", + "-c", + ". {0} && echo {1}".format( + gmxrc, " ".join(["___${{{0}}}___".format(v) for v in envvars]) + ), + ] if not gmxrc: logger.debug("set_gmxrc_environment(): no GMXRC, nothing done.") @@ -662,48 +701,51 @@ def set_gmxrc_environment(gmxrc): out = subprocess.check_output(cmdargs) out = out.strip().split() for key, value in zip(envvars, out): - value = str(value.decode('ascii').replace('___', '')) # remove sentinels + value = str(value.decode("ascii").replace("___", "")) # remove sentinels os.environ[key] = value logger.debug("set_gmxrc_environment(): %s = %r", key, value) except (subprocess.CalledProcessError, OSError): - logger.warning("Failed to automatically set the Gromacs environment" - "from GMXRC=%r", gmxrc) + logger.warning( + "Failed to automatically set the Gromacs environment" "from GMXRC=%r", gmxrc + ) def get_tool_names(): - """ Get tool names from all configured groups. + """Get tool names from all configured groups. :return: list of tool names """ names = [] - for group in cfg.get('Gromacs', 'groups').split(): - names.extend(cfg.get('Gromacs', group).split()) + for group in cfg.get("Gromacs", "groups").split(): + names.extend(cfg.get("Gromacs", group).split()) return names def get_extra_tool_names(): - """ Get tool names from all configured groups. + """Get tool names from all configured groups. :return: list of tool names """ - return cfg.get('Gromacs', 'extra').split() + return cfg.get("Gromacs", "extra").split() RELEASE = None MAJOR_RELEASE = None -if cfg.get('Gromacs', 'release'): - RELEASE = cfg.get('Gromacs', 'release') - MAJOR_RELEASE = RELEASE.split('.')[0] +if cfg.get("Gromacs", "release"): + RELEASE = cfg.get("Gromacs", "release") + MAJOR_RELEASE = RELEASE.split(".")[0] for name in get_tool_names(): - match = re.match(r'(gmx[^:]*):.*', name) + match = re.match(r"(gmx[^:]*):.*", name) if match: driver = match.group(1) - raise ValueError("'%s' isn't a valid tool name anymore." - " Replace it by '%s'.\n See " - "https://gromacswrapper.readthedocs.io/en/latest/" - "configuration.html" % (name, match.group(1))) + raise ValueError( + "'%s' isn't a valid tool name anymore." + " Replace it by '%s'.\n See " + "https://gromacswrapper.readthedocs.io/en/latest/" + "configuration.html" % (name, match.group(1)) + ) check_setup() diff --git a/gromacs/core.py b/gromacs/core.py index 5b5d4da7..57674255 100644 --- a/gromacs/core.py +++ b/gromacs/core.py @@ -111,14 +111,17 @@ import errno import logging -logger = logging.getLogger('gromacs.core') + +logger = logging.getLogger("gromacs.core") from .exceptions import GromacsError, GromacsFailureWarning from . import environment + class Command(object): """Wrap simple script or command.""" + #: Derive a class from command; typically one only has to set *command_name* #: to the name of the script or executable. The full path is required if it #: cannot be found by searching :envvar:`PATH`. @@ -193,36 +196,38 @@ def _run_command(self, *args, **kwargs): """ # hack to run command WITHOUT input (-h...) even though user defined # input (should have named it "ignore_input" with opposite values...) - use_input = kwargs.pop('use_input', True) + use_input = kwargs.pop("use_input", True) # logic for capturing output (see docs on I/O and the flags) capturefile = None - if environment.flags['capture_output'] is True: + if environment.flags["capture_output"] is True: # capture into Python vars (see subprocess.Popen.communicate()) - kwargs.setdefault('stderr', PIPE) - kwargs.setdefault('stdout', PIPE) - elif environment.flags['capture_output'] == "file": - if 'stdout' in kwargs and 'stderr' in kwargs: + kwargs.setdefault("stderr", PIPE) + kwargs.setdefault("stdout", PIPE) + elif environment.flags["capture_output"] == "file": + if "stdout" in kwargs and "stderr" in kwargs: pass else: # XXX: not race or thread proof; potentially many commands write to the same file - fn = environment.flags['capture_output_filename'] - capturefile = open(fn, "w") # overwrite (clobber) capture file - if 'stdout' in kwargs and 'stderr' not in kwargs: + fn = environment.flags["capture_output_filename"] + capturefile = open(fn, "w") # overwrite (clobber) capture file + if "stdout" in kwargs and "stderr" not in kwargs: # special case of stdout used by code but stderr should be captured to file - kwargs.setdefault('stderr', capturefile) + kwargs.setdefault("stderr", capturefile) else: # merge stderr with stdout and write stdout to file # (stderr comes *before* stdout in capture file, could split...) - kwargs.setdefault('stderr', STDOUT) - kwargs.setdefault('stdout', capturefile) + kwargs.setdefault("stderr", STDOUT) + kwargs.setdefault("stdout", capturefile) try: p = self.Popen(*args, **kwargs) - out, err = p.communicate(use_input=use_input) # special Popen knows input! + out, err = p.communicate(use_input=use_input) # special Popen knows input! except: if capturefile is not None: - logger.error("Use captured command output in %r for diagnosis.", capturefile) + logger.error( + "Use captured command output in %r for diagnosis.", capturefile + ) raise finally: if capturefile is not None: @@ -232,7 +237,7 @@ def _run_command(self, *args, **kwargs): def _commandline(self, *args, **kwargs): """Returns the command line (without pipes) as a list.""" - # transform_args() is a hook (used in GromacsCommand very differently!) + # transform_args() is a hook (used in GromacsCommand very differently!) return [self.command_name] + self.transform_args(*args, **kwargs) def commandline(self, *args, **kwargs): @@ -252,48 +257,63 @@ def Popen(self, *args, **kwargs): :TODO: Write example. """ - stderr = kwargs.pop('stderr', None) # default: print to stderr (if STDOUT then merge) - if stderr is False: # False: capture it + stderr = kwargs.pop( + "stderr", None + ) # default: print to stderr (if STDOUT then merge) + if stderr is False: # False: capture it stderr = PIPE elif stderr is True: - stderr = None # use stderr + stderr = None # use stderr - stdout = kwargs.pop('stdout', None) # either set to PIPE for capturing output - if stdout is False: # ... or to False + stdout = kwargs.pop("stdout", None) # either set to PIPE for capturing output + if stdout is False: # ... or to False stdout = PIPE elif stdout is True: - stdout = None # for consistency, make True write to screen + stdout = None # for consistency, make True write to screen - stdin = kwargs.pop('stdin', None) - input = kwargs.pop('input', None) + stdin = kwargs.pop("stdin", None) + input = kwargs.pop("input", None) - use_shell = kwargs.pop('use_shell', False) + use_shell = kwargs.pop("use_shell", False) if input: stdin = PIPE - if isinstance(input, six.string_types) and not input.endswith('\n'): + if isinstance(input, six.string_types) and not input.endswith("\n"): # make sure that input is a simple string with \n line endings - input = six.text_type(input) + '\n' + input = six.text_type(input) + "\n" else: try: # make sure that input is a simple string with \n line endings - input = '\n'.join(map(six.text_type, input)) + '\n' + input = "\n".join(map(six.text_type, input)) + "\n" except TypeError: # so maybe we are a file or something ... and hope for the best pass - cmd = self._commandline(*args, **kwargs) # lots of magic happening here - # (cannot move out of method because filtering of stdin etc) + cmd = self._commandline(*args, **kwargs) # lots of magic happening here + # (cannot move out of method because filtering of stdin etc) try: - p = PopenWithInput(cmd, stdin=stdin, stderr=stderr, stdout=stdout, - universal_newlines=True, input=input, shell=use_shell) + p = PopenWithInput( + cmd, + stdin=stdin, + stderr=stderr, + stdout=stdout, + universal_newlines=True, + input=input, + shell=use_shell, + ) except OSError as err: - logger.error(" ".join(cmd)) # log command line + logger.error(" ".join(cmd)) # log command line if err.errno == errno.ENOENT: - errmsg = "Failed to find Gromacs command {0!r}, maybe its not on PATH or GMXRC must be sourced?".format(self.command_name) + errmsg = "Failed to find Gromacs command {0!r}, maybe its not on PATH or GMXRC must be sourced?".format( + self.command_name + ) logger.fatal(errmsg) raise OSError(errmsg) else: - logger.exception("Setting up Gromacs command {0!r} raised an exception.".format(self.command_name)) + logger.exception( + "Setting up Gromacs command {0!r} raised an exception.".format( + self.command_name + ) + ) raise logger.debug(p.command_string) return p @@ -301,24 +321,26 @@ def Popen(self, *args, **kwargs): def transform_args(self, *args, **kwargs): """Transform arguments and return them as a list suitable for Popen.""" options = [] - for option,value in kwargs.items(): - if not option.startswith('-'): + for option, value in kwargs.items(): + if not option.startswith("-"): # heuristic for turning key=val pairs into options # (fails for commands such as 'find' -- then just use args) if len(option) == 1: - option = '-' + option # POSIX style + option = "-" + option # POSIX style else: - option = '--' + option # GNU option + option = "--" + option # GNU option if value is True: options.append(option) continue elif value is False: - raise ValueError('A False value is ambiguous for option {0!r}'.format(option)) + raise ValueError( + "A False value is ambiguous for option {0!r}".format(option) + ) - if option[:2] == '--': - options.append(option + '=' + str(value)) # GNU option + if option[:2] == "--": + options.append(option + "=" + str(value)) # GNU option else: - options.extend((option, str(value))) # POSIX style + options.extend((option, str(value))) # POSIX style return options + list(args) def help(self, long=False): @@ -329,7 +351,7 @@ def help(self, long=False): print("\ncall method: command():\n") print(self.__call__.__doc__) - def __call__(self,*args,**kwargs): + def __call__(self, *args, **kwargs): """Run command with the given arguments:: rc,stdout,stderr = command(*args, input=None, **kwargs) @@ -430,7 +452,7 @@ class GromacsCommand(Command): # ------------------------------------------------------- #: Available failure modes. - failuremodes = ('raise', 'warn', None) + failuremodes = ("raise", "warn", None) def __init__(self, *args, **kwargs): """Set up the command with gromacs flags as keyword arguments. @@ -515,9 +537,9 @@ def __init__(self, *args, **kwargs): The *doc* keyword is now ignored (because it was not worth the effort to make it work with the lazy-loading of docs). """ - doc = kwargs.pop('doc', None) # ignored + doc = kwargs.pop("doc", None) # ignored self.__failuremode = None - self.failuremode = kwargs.pop('failure', 'raise') + self.failuremode = kwargs.pop("failure", "raise") self.gmxargs = self._combineargs(*args, **kwargs) self._doc_cache = None @@ -534,47 +556,64 @@ def failuremode(): just continue silently """ + def fget(self): return self.__failuremode + def fset(self, mode): if not mode in self.failuremodes: - raise ValueError('failuremode must be one of {0!r}'.format(self.failuremodes)) + raise ValueError( + "failuremode must be one of {0!r}".format(self.failuremodes) + ) self.__failuremode = mode + return locals() + failuremode = property(**failuremode()) def _combine_arglist(self, args, kwargs): """Combine the default values and the supplied values.""" gmxargs = self.gmxargs.copy() gmxargs.update(self._combineargs(*args, **kwargs)) - return (), gmxargs # Gromacs tools don't have positional args --> args = () + return (), gmxargs # Gromacs tools don't have positional args --> args = () - def check_failure(self, result, msg='Gromacs tool failed', command_string=None): + def check_failure(self, result, msg="Gromacs tool failed", command_string=None): rc, out, err = result if command_string is not None: - msg += '\nCommand invocation: ' + str(command_string) - had_success = (rc == 0) + msg += "\nCommand invocation: " + str(command_string) + had_success = rc == 0 if not had_success: gmxoutput = "\n".join([x for x in [out, err] if x is not None]) m = re.search(self.gmxfatal_pattern, gmxoutput, re.VERBOSE | re.DOTALL) if m: - formatted_message = ['GMX_FATAL '+line for line in m.group('message').split('\n')] - msg = "\n".join(\ - [msg, "Gromacs command {program_name!r} fatal error message:".format(**m.groupdict())] + - formatted_message) - if self.failuremode == 'raise': + formatted_message = [ + "GMX_FATAL " + line for line in m.group("message").split("\n") + ] + msg = "\n".join( + [ + msg, + "Gromacs command {program_name!r} fatal error message:".format( + **m.groupdict() + ), + ] + + formatted_message + ) + if self.failuremode == "raise": raise GromacsError(rc, msg) - elif self.failuremode == 'warn': - warnings.warn(msg + '\nError code: {0!r}\n'.format(rc), category=GromacsFailureWarning) + elif self.failuremode == "warn": + warnings.warn( + msg + "\nError code: {0!r}\n".format(rc), + category=GromacsFailureWarning, + ) elif self.failuremode is None: pass else: - raise ValueError('unknown failure mode {0!r}'.format(self.failuremode)) + raise ValueError("unknown failure mode {0!r}".format(self.failuremode)) return had_success def _combineargs(self, *args, **kwargs): """Add switches as 'options' with value True to the options dict.""" - d = {arg: True for arg in args} # switches are kwargs with value True + d = {arg: True for arg in args} # switches are kwargs with value True d.update(kwargs) return d @@ -584,28 +623,30 @@ def _build_arg_list(self, **kwargs): for flag, value in kwargs.items(): # XXX: check flag against allowed values flag = str(flag) - if flag.startswith('_'): - flag = flag[1:] # python-illegal keywords are '_'-quoted - if not flag.startswith('-'): - flag = '-' + flag # now flag is guaranteed to start with '-' + if flag.startswith("_"): + flag = flag[1:] # python-illegal keywords are '_'-quoted + if not flag.startswith("-"): + flag = "-" + flag # now flag is guaranteed to start with '-' if value is True: - arglist.append(flag) # simple command line flag + arglist.append(flag) # simple command line flag elif value is False: - if flag.startswith('-no'): + if flag.startswith("-no"): # negate a negated flag ('noX=False' --> X=True --> -X ... but who uses that?) - arglist.append('-' + flag[3:]) + arglist.append("-" + flag[3:]) else: - arglist.append('-no' + flag[1:]) # gromacs switches booleans by prefixing 'no' + arglist.append( + "-no" + flag[1:] + ) # gromacs switches booleans by prefixing 'no' elif value is None: - pass # ignore flag = None + pass # ignore flag = None else: try: - arglist.extend([flag] + value) # option with value list + arglist.extend([flag] + value) # option with value list except TypeError: arglist.extend([flag, value]) # option with single value return list(map(str, arglist)) # all arguments MUST be strings - def _run_command(self,*args,**kwargs): + def _run_command(self, *args, **kwargs): """Execute the gromacs command; see the docs for __call__.""" result, p = super(GromacsCommand, self)._run_command(*args, **kwargs) self.check_failure(result, command_string=p.command_string) @@ -613,12 +654,13 @@ def _run_command(self,*args,**kwargs): def _commandline(self, *args, **kwargs): """Returns the command line (without pipes) as a list. Inserts driver if present""" - if(self.driver is not None): - return [self.driver, self.command_name] + self.transform_args(*args, **kwargs) + if self.driver is not None: + return [self.driver, self.command_name] + self.transform_args( + *args, **kwargs + ) return [self.command_name] + self.transform_args(*args, **kwargs) - - def transform_args(self,*args,**kwargs): + def transform_args(self, *args, **kwargs): """Combine arguments and turn them into gromacs tool arguments.""" newargs = self._combineargs(*args, **kwargs) return self._build_arg_list(**newargs) @@ -634,9 +676,13 @@ def _get_gmx_docs(self): try: logging.disable(logging.CRITICAL) - rc, header, docs = self.run('h', stdout=PIPE, stderr=PIPE, use_input=False) + rc, header, docs = self.run("h", stdout=PIPE, stderr=PIPE, use_input=False) except: - logging.critical("Invoking command {0} failed when determining its doc string. Proceed with caution".format(self.command_name)) + logging.critical( + "Invoking command {0} failed when determining its doc string. Proceed with caution".format( + self.command_name + ) + ) self._doc_cache = "(No Gromacs documentation available)" return self._doc_cache finally: @@ -653,7 +699,7 @@ def _get_gmx_docs(self): self._doc_cache = "(No Gromacs documentation available)" return self._doc_cache - self._doc_cache = m.group('DOCS') + self._doc_cache = m.group("DOCS") return self._doc_cache @@ -685,19 +731,20 @@ def __init__(self, *args, **kwargs): string that is piped into the command """ - kwargs.setdefault('close_fds', True) # fixes 'Too many open fds' with 2.6 - self.input = kwargs.pop('input', None) + kwargs.setdefault("close_fds", True) # fixes 'Too many open fds' with 2.6 + self.input = kwargs.pop("input", None) if six.PY2 and self.input is not None: # in Python 2, subprocess.Popen uses os.write(chunk) with default ASCII encoding - self.input = self.input.encode('utf-8') + self.input = self.input.encode("utf-8") self.command = args[0] try: - input_string = 'printf "' + \ - self.input.replace('\n','\\n') + '" | ' # display newlines + input_string = ( + 'printf "' + self.input.replace("\n", "\\n") + '" | ' + ) # display newlines except (TypeError, AttributeError): input_string = "" self.command_string = input_string + " ".join(self.command) - super(PopenWithInput,self).__init__(*args, **kwargs) + super(PopenWithInput, self).__init__(*args, **kwargs) def communicate(self, use_input=True): """Run the command, using the input that was set up on __init__ (for *use_input* = ``True``)""" diff --git a/gromacs/environment.py b/gromacs/environment.py index bdbe915c..08f23069 100644 --- a/gromacs/environment.py +++ b/gromacs/environment.py @@ -38,6 +38,7 @@ """ import six + # set up flags for core routines (more convoluted than strictly necessary but should # be clean to add more flags if needed) class Flags(dict): @@ -56,43 +57,59 @@ class Flags(dict): New flags are added with the :meth:`Flags.register` method which takes a new :class:`Flag` instance as an argument. """ - def __init__(self,*args): + + def __init__(self, *args): """For **developers**: Initialize Flags registry with a *list* of :class:`Flag` instances.""" - super(Flags,self).__init__([(flag.name,flag) for flag in args]) - def get_flag(self,name): - return super(Flags,self).__getitem__(name) + super(Flags, self).__init__([(flag.name, flag) for flag in args]) + + def get_flag(self, name): + return super(Flags, self).__getitem__(name) + def doc(self): """Shows doc strings for all flags.""" return "\n\n".join([flag.__doc__ for flag in self._itervalues()]) - def register(self,flag): + + def register(self, flag): """Register a new :class:`Flag` instance with the Flags registry.""" - super(Flags,self).__setitem__(flag.name,flag) - def update(self,*flags): + super(Flags, self).__setitem__(flag.name, flag) + + def update(self, *flags): """Update Flags registry with a list of :class:`Flag` instances.""" - super(Flags,self).update([(flag.name,flag) for flag in flags]) - def setdefault(self,k,d=None): + super(Flags, self).update([(flag.name, flag) for flag in flags]) + + def setdefault(self, k, d=None): raise NotImplementedError - def __getitem__(self,name): + + def __getitem__(self, name): return self.get_flag(name).get() - def __setitem__(self,name,value): + + def __setitem__(self, name, value): self.get_flag(name).set(value) + def _itervalues(self): - return six.itervalues(super(Flags,self)) + return six.itervalues(super(Flags, self)) + def _items(self): - return super(Flags,self).items() + return super(Flags, self).items() + def itervalues(self): for flag in self._itervalues(): yield flag.value + def iteritems(self): for flag in self._itervalues(): - yield flag.name,flag.value + yield flag.name, flag.value + def values(self): return [flag.value for flag in self._itervalues()] + def items(self): - return [(flag.name,flag.value) for flag in self._itervalues()] + return [(flag.name, flag.value) for flag in self._itervalues()] + def __repr__(self): return str(self.items()) + class FlagsDynamicDocs(Flags): # docs are generated on the fly for interactive use; but because # this does not work well with the sphinx documentation system @@ -105,12 +122,14 @@ def __doc__(self): class IdentityMapping(dict): - def __getitem__(self,key): + def __getitem__(self, key): return key + class Flag(object): """A Flag, essentially a variable that knows its default and legal values.""" - def __init__(self,name,default,mapping=None,doc=None): + + def __init__(self, name, default, mapping=None, doc=None): """Create a new flag which will be registered with Flags. Usage :: @@ -141,19 +160,27 @@ def __init__(self,name,default,mapping=None,doc=None): self.default = default # {v1:v1,v2:v1,v3:v3, ...} mapping of allowed values to canonical ones self.mapping = mapping or IdentityMapping() - self._doctemplate = "**%(name)s** = *%(value)r*\n" + (doc or "*undocumented flag*") + self._doctemplate = "**%(name)s** = *%(value)r*\n" + ( + doc or "*undocumented flag*" + ) + def get(self): return self.value - def set(self,value): + + def set(self, value): if value is not None: try: self.value = self.mapping[value] except KeyError: - raise ValueError("flag must be None or one of "+str(self.mapping.keys())) + raise ValueError( + "flag must be None or one of " + str(self.mapping.keys()) + ) return self.get() + def prop(self): """Use this for ``property(**flag.prop())``""" - return {'fget':self.get, 'fset':self.set, 'doc':self.__doc__} + return {"fget": self.get, "fset": self.set, "doc": self.__doc__} + def __repr__(self): return """Flag('{name!s}',{value!r})""".format(**self.__dict__) @@ -162,16 +189,19 @@ class _Flag(Flag): @property def __doc__(self): # generate dynamic docs with current values - return self._doctemplate % self.__dict__ + return self._doctemplate % self.__dict__ + _flags = [ - _Flag('capture_output', - False, - {True: True, - False: False, - 'file': 'file', - }, - """ + _Flag( + "capture_output", + False, + { + True: True, + False: False, + "file": "file", + }, + """ Select if Gromacs command output is *always* captured. >>> flags['%(name)s'] = %(value)r @@ -202,24 +232,27 @@ def __doc__(self): output one would see on the screen. The default is %(default)r. - """ - ), - _Flag('capture_output_filename', - 'gromacs_captured_output.txt', - doc=""" + """, + ), + _Flag( + "capture_output_filename", + "gromacs_captured_output.txt", + doc=""" Name of the file that captures output if ``flags['capture_output'] = "file"`` >>> flags['%(name)s'] = %(value)r This is an *experimental* feature. The default is %(default)r. - """), - ] + """, + ), +] #: Global flag registry for :mod:`gromacs.environment`. #: Can be accessed like a dictionary and appears to the casual user as such. flags = FlagsDynamicDocs(*_flags) del _flags + # only for sphinx docs class flagsDocs(object): __doc__ = flags.doc() diff --git a/gromacs/exceptions.py b/gromacs/exceptions.py index fa85be0b..27ddf81a 100644 --- a/gromacs/exceptions.py +++ b/gromacs/exceptions.py @@ -5,6 +5,7 @@ # exceptions and warnings + class GromacsError(EnvironmentError): """Error raised when a gromacs tool fails. @@ -12,6 +13,7 @@ class GromacsError(EnvironmentError): # TODO: return status code and possibly error message """ + class MissingDataError(Exception): """Error raised when prerequisite data are not available. @@ -20,37 +22,54 @@ class MissingDataError(Exception): to be run first. """ + class ParseError(Exception): """Error raised when parsing of a file failed.""" + class GromacsFailureWarning(Warning): """Warning about failure of a Gromacs tool.""" + class GromacsImportWarning(ImportWarning): """Warns about problems with using a gromacs tool.""" + class GromacsValueWarning(Warning): """Warns about problems with the value of an option or variable.""" + class AutoCorrectionWarning(Warning): """Warns about cases when the code is choosing new values automatically.""" + class BadParameterWarning(Warning): """Warns if some parameters or variables are unlikely to be appropriate or correct.""" + class MissingDataWarning(Warning): """Warns when prerequisite data/files are not available.""" + class UsageWarning(Warning): """Warns if usage is unexpected/documentation ambiguous.""" + class LowAccuracyWarning(Warning): """Warns that results may possibly have low accuracy.""" + import warnings + # These warnings should always be displayed because other parameters # can have changed, eg during interactive use. -for w in (AutoCorrectionWarning, BadParameterWarning, UsageWarning, - GromacsFailureWarning, GromacsValueWarning, LowAccuracyWarning): - warnings.simplefilter('always', category=w) +for w in ( + AutoCorrectionWarning, + BadParameterWarning, + UsageWarning, + GromacsFailureWarning, + GromacsValueWarning, + LowAccuracyWarning, +): + warnings.simplefilter("always", category=w) del w diff --git a/gromacs/fileformats/__init__.py b/gromacs/fileformats/__init__.py index 1671c0d9..e4806b2a 100644 --- a/gromacs/fileformats/__init__.py +++ b/gromacs/fileformats/__init__.py @@ -5,6 +5,7 @@ # file formats from __future__ import absolute_import + __all__ = ["XVG", "MDP", "NDX", "uniqueNDX", "XPM", "TOP"] from .xvg import XVG @@ -12,5 +13,3 @@ from .ndx import NDX, uniqueNDX from .top import TOP, SystemToGroTop from .xpm import XPM - - diff --git a/gromacs/fileformats/blocks.py b/gromacs/fileformats/blocks.py index 0d01a72b..c52ba1f8 100644 --- a/gromacs/fileformats/blocks.py +++ b/gromacs/fileformats/blocks.py @@ -57,32 +57,33 @@ import logging + class System(object): """Top-level class containing molecule topology. - Contains all the parameter types (AtomTypes, BondTypes, ... ) - and molecules. + Contains all the parameter types (AtomTypes, BondTypes, ... ) + and molecules. """ - logger = logging.getLogger('gromacs.formats.BLOCKS') + + logger = logging.getLogger("gromacs.formats.BLOCKS") def __init__(self): self.molecules = tuple([]) - self.atomtypes = [] - self.bondtypes = [] - self.nonbond_params = [] - self.angletypes = [] - self.dihedraltypes = [] - self.impropertypes = [] - self.cmaptypes = [] + self.atomtypes = [] + self.bondtypes = [] + self.nonbond_params = [] + self.angletypes = [] + self.dihedraltypes = [] + self.impropertypes = [] + self.cmaptypes = [] self.interactiontypes = [] - self.pairtypes = [] - self.constrainttypes = [] - self.forcefield= None - - self.information = {} # like 'atomtypes': self.atomtypes + self.pairtypes = [] + self.constrainttypes = [] + self.forcefield = None + self.information = {} # like 'atomtypes': self.atomtypes class Molecule(object): @@ -117,23 +118,24 @@ class Molecule(object): """ + def __init__(self): - self.chains = [] - self.atoms = [] - self.residues = [] + self.chains = [] + self.atoms = [] + self.residues = [] - self.bonds = [] - self.angles = [] + self.bonds = [] + self.angles = [] self.dihedrals = [] self.impropers = [] - self.cmaps = [] - self.pairs = [] + self.cmaps = [] + self.pairs = [] self.exclusion_numb = None # 0, 1, 2, .. self.virtual_sites3 = [] self.exclusions = [] - self.settles = [] - self.constraints= [] + self.settles = [] + self.constraints = [] self.information = {} # like 'atoms': self.atoms @@ -141,14 +143,12 @@ def __init__(self): self._anumb_to_atom = {} - def anumb_to_atom(self, anumb): - '''Returns the atom object corresponding to an atom number''' + """Returns the atom object corresponding to an atom number""" assert isinstance(anumb, int), "anumb must be integer" - if not self._anumb_to_atom: # empty dictionary - + if not self._anumb_to_atom: # empty dictionary if self.atoms: for atom in self.atoms: self._anumb_to_atom[atom.number] = atom @@ -164,20 +164,19 @@ def anumb_to_atom(self, anumb): self.logger("no such atom number ({0:d}) in the molecule".format(anumb)) return False - def renumber_atoms(self): """Reset the molecule's atoms :attr:`number` to be 1-indexed""" if self.atoms: - # reset the mapping self._anumb_to_atom = {} - for i,atom in enumerate(self.atoms): - atom.number = i+1 # starting from 1 + for i, atom in enumerate(self.atoms): + atom.number = i + 1 # starting from 1 else: self.logger("the number of atoms is zero - no renumbering") + class Atom(object): """Class that represents an Atom @@ -224,19 +223,18 @@ class Atom(object): """ def __init__(self): + self.coords = [] # a list of coordinates (x,y,z) of models + self.altlocs = [] # a list of (altloc_name, (x,y,z), occup, bfactor) - self.coords = [] # a list of coordinates (x,y,z) of models - self.altlocs= [] # a list of (altloc_name, (x,y,z), occup, bfactor) - - self.name = None + self.name = None self.atomtype = None - self.number = None - self.resname = None - self.resnumb = None - self.charge = None + self.number = None + self.resname = None + self.resnumb = None + self.charge = None def get_atomtype(self): - if hasattr(self, 'atomtype'): + if hasattr(self, "atomtype"): return self.atomtype else: self.logger("atom {0} doesn't have atomtype".format(self)) @@ -258,90 +256,92 @@ class Param(object): """ def __init__(self, format): - assert format in ('charmm', 'gromacs') + assert format in ("charmm", "gromacs") self.format = format self.comment = None - self.line = None + self.line = None self.disabled = False self.charmm = None self.gromacs = None def convert(self, reqformat): - assert reqformat in ('charmm', 'gromacs') + assert reqformat in ("charmm", "gromacs") if reqformat == self.format: - if reqformat == 'charmm': + if reqformat == "charmm": return self.charmm - elif reqformat == 'gromacs': + elif reqformat == "gromacs": return self.gromacs else: raise NotImplementedError - - if isinstance(self, AtomType): - if reqformat == 'gromacs' and self.format == 'charmm': - self.gromacs['param']['lje'] = abs(self.charmm['param']['lje']) * 4.184 - self.gromacs['param']['ljl'] = self.charmm['param']['ljl'] * 2 * 0.1 / (2**(1.0/6.0)) - - if self.charmm['param']['lje14'] is not None: - self.gromacs['param']['lje14'] = abs(self.charmm['param']['lje14']) * 4.184 - self.gromacs['param']['ljl14'] = self.charmm['param']['ljl14'] * 2 * 0.1 / (2**(1.0/6.0)) + if reqformat == "gromacs" and self.format == "charmm": + self.gromacs["param"]["lje"] = abs(self.charmm["param"]["lje"]) * 4.184 + self.gromacs["param"]["ljl"] = ( + self.charmm["param"]["ljl"] * 2 * 0.1 / (2 ** (1.0 / 6.0)) + ) + + if self.charmm["param"]["lje14"] is not None: + self.gromacs["param"]["lje14"] = ( + abs(self.charmm["param"]["lje14"]) * 4.184 + ) + self.gromacs["param"]["ljl14"] = ( + self.charmm["param"]["ljl14"] * 2 * 0.1 / (2 ** (1.0 / 6.0)) + ) else: - self.gromacs['param']['lje14'] = None - self.gromacs['param']['ljl14'] = None + self.gromacs["param"]["lje14"] = None + self.gromacs["param"]["ljl14"] = None else: raise NotImplementedError - - elif isinstance(self, BondType): - if reqformat == 'gromacs' and self.format == 'charmm': - self.gromacs['param']['kb'] = self.charmm['param']['kb'] * 2 * 4.184 * (1.0 / 0.01) # nm^2 - self.gromacs['param']['b0'] = self.charmm['param']['b0'] * 0.1 - self.gromacs['func'] = 1 + if reqformat == "gromacs" and self.format == "charmm": + self.gromacs["param"]["kb"] = ( + self.charmm["param"]["kb"] * 2 * 4.184 * (1.0 / 0.01) + ) # nm^2 + self.gromacs["param"]["b0"] = self.charmm["param"]["b0"] * 0.1 + self.gromacs["func"] = 1 else: raise NotImplementedError - - elif isinstance(self, AngleType): - if reqformat == 'gromacs' and self.format == 'charmm': - self.gromacs['param']['ktetha'] = self.charmm['param']['ktetha'] * 2 * 4.184 - self.gromacs['param']['tetha0'] = self.charmm['param']['tetha0'] - self.gromacs['param']['kub'] = self.charmm['param']['kub'] * 2 * 4.184 * 10 * 10 - self.gromacs['param']['s0'] = self.charmm['param']['s0'] * 0.1 - self.gromacs['func'] = 5 + if reqformat == "gromacs" and self.format == "charmm": + self.gromacs["param"]["ktetha"] = ( + self.charmm["param"]["ktetha"] * 2 * 4.184 + ) + self.gromacs["param"]["tetha0"] = self.charmm["param"]["tetha0"] + self.gromacs["param"]["kub"] = ( + self.charmm["param"]["kub"] * 2 * 4.184 * 10 * 10 + ) + self.gromacs["param"]["s0"] = self.charmm["param"]["s0"] * 0.1 + self.gromacs["func"] = 5 else: raise NotImplementedError - - elif isinstance(self, DihedralType): - if reqformat == 'gromacs' and self.format == 'charmm': - for dih in self.charmm['param']: + if reqformat == "gromacs" and self.format == "charmm": + for dih in self.charmm["param"]: convdih = {} - convdih['kchi'] = dih['kchi'] * 4.184 - convdih['n'] = dih['n'] - convdih['delta'] = dih['delta'] - self.gromacs['param'].append(convdih) - self.gromacs['func'] = 9 + convdih["kchi"] = dih["kchi"] * 4.184 + convdih["n"] = dih["n"] + convdih["delta"] = dih["delta"] + self.gromacs["param"].append(convdih) + self.gromacs["func"] = 9 else: raise NotImplementedError - - elif isinstance(self, ImproperType): - if reqformat == 'gromacs' and self.format == 'charmm': - for imp in self.charmm['param']: + if reqformat == "gromacs" and self.format == "charmm": + for imp in self.charmm["param"]: convimp = {} - convimp['kpsi'] = imp['kpsi'] * 2 * 4.184 - convimp['psi0'] = imp['psi0'] - if imp.get('n', False): - convimp['n'] = imp['n'] - self.gromacs['param'].append(convimp) - self.gromacs['func'] = 2 + convimp["kpsi"] = imp["kpsi"] * 2 * 4.184 + convimp["psi0"] = imp["psi0"] + if imp.get("n", False): + convimp["n"] = imp["n"] + self.gromacs["param"].append(convimp) + self.gromacs["func"] = 2 # self.gromacs['param']['kpsi'] = self.charmm['param']['kpsi'] * 2 * 4.184 # self.gromacs['param']['psi0'] = self.charmm['param']['psi0'] @@ -349,32 +349,36 @@ def convert(self, reqformat): else: raise NotImplementedError - - elif isinstance(self, CMapType): - if reqformat == 'gromacs' and self.format == 'charmm': - self.gromacs['param']= [n*4.184 for n in self.charmm['param']] - self.gromacs['func'] = 1 + if reqformat == "gromacs" and self.format == "charmm": + self.gromacs["param"] = [n * 4.184 for n in self.charmm["param"]] + self.gromacs["func"] = 1 else: raise NotImplementedError - - elif isinstance(self, InteractionType): - if reqformat == 'gromacs' and self.format == 'charmm': - if self.charmm['param']['lje'] is not None: - self.gromacs['param']['lje'] = abs(self.charmm['param']['lje']) * 4.184 - self.gromacs['param']['ljl'] = self.charmm['param']['ljl'] * 0.1 / (2**(1.0/6.0)) # no *2 + if reqformat == "gromacs" and self.format == "charmm": + if self.charmm["param"]["lje"] is not None: + self.gromacs["param"]["lje"] = ( + abs(self.charmm["param"]["lje"]) * 4.184 + ) + self.gromacs["param"]["ljl"] = ( + self.charmm["param"]["ljl"] * 0.1 / (2 ** (1.0 / 6.0)) + ) # no *2 else: - self.gromacs['param']['lje'] = None - self.gromacs['param']['ljl'] = None - - if self.charmm['param']['lje14'] is not None: - self.gromacs['param']['lje14'] = abs(self.charmm['param']['lje14']) * 4.184 - self.gromacs['param']['ljl14'] = self.charmm['param']['ljl14'] * 0.1 / (2**(1.0/6.0)) + self.gromacs["param"]["lje"] = None + self.gromacs["param"]["ljl"] = None + + if self.charmm["param"]["lje14"] is not None: + self.gromacs["param"]["lje14"] = ( + abs(self.charmm["param"]["lje14"]) * 4.184 + ) + self.gromacs["param"]["ljl14"] = ( + self.charmm["param"]["ljl14"] * 0.1 / (2 ** (1.0 / 6.0)) + ) else: - self.gromacs['param']['lje14'] = None - self.gromacs['param']['ljl14'] = None + self.gromacs["param"]["lje14"] = None + self.gromacs["param"]["ljl14"] = None else: raise NotImplementedError @@ -384,36 +388,40 @@ def convert(self, reqformat): class AtomType(Param): def __init__(self, format): + super(AtomType, self).__init__(format) - super(AtomType,self).__init__(format) - - self.atype = None - self.atnum = None - self.mass = None + self.atype = None + self.atnum = None + self.mass = None self.charge = None self.bond_type = None - self.charmm = {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} } - self.gromacs= {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} } + self.charmm = { + "param": {"lje": None, "ljl": None, "lje14": None, "ljl14": None} + } + self.gromacs = { + "param": {"lje": None, "ljl": None, "lje14": None, "ljl14": None} + } def __eq__(self, other): - return \ - self.atype == other.atype and \ - self.atnum == other.atnum and \ - self.mass == other.mass and \ - self.charge == other.charge and \ - self.bond_type == other.bond_type and \ - self.charmm == other.charmm + return ( + self.atype == other.atype + and self.atnum == other.atnum + and self.mass == other.mass + and self.charge == other.charge + and self.bond_type == other.bond_type + and self.charmm == other.charmm + ) def __repr__(self): - return '<{0!s} {1!s} m={2:g} q={3:g} (gromacs:{4!s})>'.format( - self.__class__.__name__, self.atype, self.mass, self.charge, self.gromacs) + return "<{0!s} {1!s} m={2:g} q={3:g} (gromacs:{4!s})>".format( + self.__class__.__name__, self.atype, self.mass, self.charge, self.gromacs + ) class BondType(Param): def __init__(self, format): - - super(BondType,self).__init__(format) + super(BondType, self).__init__(format) self.atom1 = None self.atom2 = None @@ -421,21 +429,21 @@ def __init__(self, format): self.atype1 = None self.atype2 = None - self.charmm = {'param': {'kb':None, 'b0':None} } - self.gromacs= {'param': {'kb':None, 'b0':None}, 'func':None} + self.charmm = {"param": {"kb": None, "b0": None}} + self.gromacs = {"param": {"kb": None, "b0": None}, "func": None} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) class AngleType(Param): def __init__(self, format): - - super(AngleType,self).__init__(format) + super(AngleType, self).__init__(format) self.atom1 = None self.atom2 = None @@ -445,21 +453,27 @@ def __init__(self, format): self.atype2 = None self.atype3 = None - self.charmm = {'param':{'ktetha':None, 'tetha0':None, 'kub':None, 's0':None} } - self.gromacs= {'param':{'ktetha':None, 'tetha0':None, 'kub':None, 's0':None}, 'func':None} + self.charmm = { + "param": {"ktetha": None, "tetha0": None, "kub": None, "s0": None} + } + self.gromacs = { + "param": {"ktetha": None, "tetha0": None, "kub": None, "s0": None}, + "func": None, + } def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.atype3 == other.atype3 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.atype3 == other.atype3 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) + class DihedralType(Param): def __init__(self, format): - - super(DihedralType,self).__init__(format) + super(DihedralType, self).__init__(format) self.atom1 = None self.atom2 = None @@ -471,44 +485,46 @@ def __init__(self, format): self.atype3 = None self.atype4 = None - self.charmm = {'param':[]} # {kchi, n, delta} - self.gromacs= {'param':[]} + self.charmm = {"param": []} # {kchi, n, delta} + self.gromacs = {"param": []} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.atype3 == other.atype3 and \ - self.atype4 == other.atype4 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.atype3 == other.atype3 + and self.atype4 == other.atype4 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) + class ImproperType(Param): def __init__(self, format): - - super(ImproperType,self).__init__(format) + super(ImproperType, self).__init__(format) self.atype1 = None self.atype2 = None self.atype3 = None self.atype4 = None - self.charmm = {'param':[]} - self.gromacs= {'param':[], 'func': None} # {'kpsi': None, 'psi0':None} + self.charmm = {"param": []} + self.gromacs = {"param": [], "func": None} # {'kpsi': None, 'psi0':None} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.atype3 == other.atype3 and \ - self.atype4 == other.atype4 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.atype3 == other.atype3 + and self.atype4 == other.atype4 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) + class CMapType(Param): def __init__(self, format): - - super(CMapType,self).__init__(format) + super(CMapType, self).__init__(format) self.atom1 = None self.atom2 = None @@ -528,62 +544,70 @@ def __init__(self, format): self.atype7 = None self.atype8 = None - self.charmm = {'param': []} - self.gromacs= {'param': []} + self.charmm = {"param": []} + self.gromacs = {"param": []} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.atype3 == other.atype3 and \ - self.atype4 == other.atype4 and \ - self.atype5 == other.atype5 and \ - self.atype6 == other.atype6 and \ - self.atype7 == other.atype7 and \ - self.atype8 == other.atype8 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.atype3 == other.atype3 + and self.atype4 == other.atype4 + and self.atype5 == other.atype5 + and self.atype6 == other.atype6 + and self.atype7 == other.atype7 + and self.atype8 == other.atype8 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) + class InteractionType(Param): def __init__(self, format): + super(InteractionType, self).__init__(format) - super(InteractionType,self).__init__(format) - - self.atom1 = None - self.atom2 = None + self.atom1 = None + self.atom2 = None self.atype1 = None self.atype2 = None - self.charmm = {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None} } - self.gromacs= {'param': {'lje':None, 'ljl':None, 'lje14':None, 'ljl14':None}, 'func':None } + self.charmm = { + "param": {"lje": None, "ljl": None, "lje14": None, "ljl14": None} + } + self.gromacs = { + "param": {"lje": None, "ljl": None, "lje14": None, "ljl14": None}, + "func": None, + } def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) def __repr__(self): - return '<{0!s} {1!s} {2!s} (gromacs:{3!s})>'.format( - self.__class__.__name__, self.atype1, self.atype2, self.gromacs) + return "<{0!s} {1!s} {2!s} (gromacs:{3!s})>".format( + self.__class__.__name__, self.atype1, self.atype2, self.gromacs + ) class SettleType(Param): def __init__(self, format): - assert format in ('gromacs',) - super(SettleType,self).__init__(format) + assert format in ("gromacs",) + super(SettleType, self).__init__(format) self.atom = None - self.dOH = None - self.dHH = None + self.dOH = None + self.dHH = None class ConstraintType(Param): def __init__(self, format): - assert format in ('gromacs',) - super(ConstraintType,self).__init__(format) + assert format in ("gromacs",) + super(ConstraintType, self).__init__(format) self.atom1 = None self.atom2 = None @@ -591,45 +615,47 @@ def __init__(self, format): self.atype1 = None self.atype2 = None - self.gromacs= {'param': {'b0':None}, 'func':None} + self.gromacs = {"param": {"b0": None}, "func": None} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) class NonbondedParamType(Param): def __init__(self, format): - assert format in ('gromacs',) - super(NonbondedParamType,self).__init__(format) + assert format in ("gromacs",) + super(NonbondedParamType, self).__init__(format) self.atype1 = None self.atype2 = None - self.gromacs= {'param': {'eps':None, 'sig':None}, 'func':None} + self.gromacs = {"param": {"eps": None, "sig": None}, "func": None} def __eq__(self, other): - return \ - self.atype1 == other.atype1 and \ - self.atype2 == other.atype2 and \ - self.gromacs == other.gromacs and \ - self.charmm == other.charmm + return ( + self.atype1 == other.atype1 + and self.atype2 == other.atype2 + and self.gromacs == other.gromacs + and self.charmm == other.charmm + ) class VirtualSites3Type(Param): def __init__(self, format): - assert format in ('gromacs',) - super(VirtualSites3Type,self).__init__(format) + assert format in ("gromacs",) + super(VirtualSites3Type, self).__init__(format) self.atom1 = None self.atom2 = None self.atom3 = None self.atom4 = None - self.gromacs= {'param': {'a':None, 'b': None}, 'func':None} + self.gromacs = {"param": {"a": None, "b": None}, "func": None} class Exclusion(object): @@ -640,6 +666,7 @@ class Exclusion(object): Does not inherit from :class:`Param` unlike other classes in :mod:`blocks` """ + def __init__(self): - self.main_atom = None + self.main_atom = None self.other_atoms = [] diff --git a/gromacs/fileformats/convert.py b/gromacs/fileformats/convert.py index f6c62748..3b1dc543 100644 --- a/gromacs/fileformats/convert.py +++ b/gromacs/fileformats/convert.py @@ -42,6 +42,7 @@ import re + def to_unicode(obj): """Convert obj to unicode (if it can be be converted). @@ -61,6 +62,7 @@ def to_unicode(obj): pass return obj + class Autoconverter(object): """Automatically convert an input value to a special python object. @@ -102,7 +104,7 @@ class Autoconverter(object): """ - def __init__(self, mode="fancy", mapping=None, active=True, sep=False, **kwargs): + def __init__(self, mode="fancy", mapping=None, active=True, sep=False, **kwargs): """Initialize the converter. :Arguments: @@ -137,35 +139,54 @@ def __init__(self, mode="fancy", mapping=None, active=True, sep=False, **kwargs removed *encoding* keyword argument """ - self._convertors = {'unicode': to_unicode, - 'simple': besttype, - 'singlet': self._convert_singlet, - 'fancy': self._convert_fancy, - } + self._convertors = { + "unicode": to_unicode, + "simple": besttype, + "singlet": self._convert_singlet, + "fancy": self._convert_fancy, + } self.convert = None # convertor function; set when self.active <-- True. if mapping is None: - mapping = {'---': None, '':None, - 'True':True, 'x': True, 'X':True, 'yes':True, 'Present':True, 'present':True, - 'False':False, 'no': False, '-':False, 'None':False, 'none':False, } + mapping = { + "---": None, + "": None, + "True": True, + "x": True, + "X": True, + "yes": True, + "Present": True, + "present": True, + "False": False, + "no": False, + "-": False, + "None": False, + "none": False, + } self.mapping = mapping self.mode = mode self.__active = None - self.active = kwargs.pop('autoconvert', active) # 'autoconvert' is a "strong" alias of 'active' + self.active = kwargs.pop( + "autoconvert", active + ) # 'autoconvert' is a "strong" alias of 'active' if sep is True: - sep = None # split on *all* white space, sep=' ' splits single spaces! + sep = None # split on *all* white space, sep=' ' splits single spaces! self.sep = sep def active(): doc = """Toggle the state of the Autoconverter. ``True`` uses the mode, ``False`` does nothing""" + def fget(self): return self.__active + def fset(self, x): self.__active = x if self.__active: self.convert = self._convertors[self.mode] else: - self.convert = lambda x: x # do nothing + self.convert = lambda x: x # do nothing + return locals() + active = property(**active()) def _convert_singlet(self, s): @@ -182,12 +203,13 @@ def _convert_fancy(self, field): else: x = tuple([self._convert_singlet(s) for s in field.split(self.sep)]) if len(x) == 0: - x = '' + x = "" elif len(x) == 1: x = x[0] - #print "%r --> %r" % (field, x) + # print "%r --> %r" % (field, x) return x + def besttype(x): """Convert string x to the most useful type, i.e. int, float or unicode string. @@ -209,41 +231,49 @@ def besttype(x): m = re.match(r"""['"](?P.*)["']$""", x) if m is None: # not a quoted string, try different types - for converter in int, float, to_unicode: # try them in increasing order of lenience + for converter in ( + int, + float, + to_unicode, + ): # try them in increasing order of lenience try: return converter(x) except ValueError: pass else: # quoted string - x = to_unicode(m.group('value')) + x = to_unicode(m.group("value")) return x def to_int64(a): """Return view of the recarray with all int32 cast to int64.""" + # build new dtype and replace i4 --> i8 def promote_i4(typestr): - if typestr[1:] == 'i4': - typestr = typestr[0]+'i8' + if typestr[1:] == "i4": + typestr = typestr[0] + "i8" return typestr - dtype = [(name, promote_i4(typestr)) for name,typestr in a.dtype.descr] + dtype = [(name, promote_i4(typestr)) for name, typestr in a.dtype.descr] return a.astype(dtype) + def pyify(typestr): - if typestr[1] in 'iu': + if typestr[1] in "iu": return int - elif typestr[1] == 'f': + elif typestr[1] == "f": return float - elif typestr[1] == 'S': + elif typestr[1] == "S": return str return lambda x: x + def to_pytypes(a): - dtype = [(name, pyify(typestr)) for name,typestr in a.dtype.descr] + dtype = [(name, pyify(typestr)) for name, typestr in a.dtype.descr] return a.astype(dtype) + def irecarray_to_py(a): """Slow conversion of a recarray into a list of records with python types. @@ -251,7 +281,9 @@ def irecarray_to_py(a): :Returns: iterator so that one can handle big input arrays """ - pytypes = [pyify(typestr) for name,typestr in a.dtype.descr] + pytypes = [pyify(typestr) for name, typestr in a.dtype.descr] + def convert_record(r): - return tuple([converter(value) for converter, value in zip(pytypes,r)]) + return tuple([converter(value) for converter, value in zip(pytypes, r)]) + return (convert_record(r) for r in a) diff --git a/gromacs/fileformats/mdp.py b/gromacs/fileformats/mdp.py index 3a65ca35..d1a07277 100644 --- a/gromacs/fileformats/mdp.py +++ b/gromacs/fileformats/mdp.py @@ -33,6 +33,7 @@ import logging + class MDP(odict, utilities.FileUtils): """Class that represents a Gromacs mdp run input file. @@ -53,16 +54,20 @@ class MDP(odict, utilities.FileUtils): :func:`gromacs.cbook.edit_mdp` (which works like a poor replacement for sed). """ + default_extension = "mdp" - logger = logging.getLogger('gromacs.formats.MDP') + logger = logging.getLogger("gromacs.formats.MDP") - COMMENT = re.compile("""\s*;\s*(?P.*)""") # eat initial ws + COMMENT = re.compile("""\s*;\s*(?P.*)""") # eat initial ws # see regex in cbook.edit_mdp() - PARAMETER = re.compile(""" + PARAMETER = re.compile( + """ \s*(?P[^=]+?)\s*=\s* # parameter (ws-stripped), before '=' (?P[^;]*) # value (stop before comment=;) (?P\s*;.*)? # optional comment - """, re.VERBOSE) + """, + re.VERBOSE, + ) def __init__(self, filename=None, autoconvert=True, **kwargs): """Initialize mdp structure. @@ -78,7 +83,9 @@ def __init__(self, filename=None, autoconvert=True, **kwargs): does not work for keys that are not legal python variable names such as anything that includes a minus '-' sign or starts with a number). """ - super(MDP, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!) + super(MDP, self).__init__( + **kwargs + ) # can use kwargs to set dict! (but no sanity checks!) self.autoconvert = autoconvert @@ -98,6 +105,7 @@ def read(self, filename=None): def BLANK(i): return "B{0:04d}".format(i) + def COMMENT(i): return "C{0:04d}".format(i) @@ -108,27 +116,28 @@ def COMMENT(i): line = line.strip() if len(line) == 0: iblank += 1 - data[BLANK(iblank)] = '' + data[BLANK(iblank)] = "" continue m = self.COMMENT.match(line) if m: icomment += 1 - data[COMMENT(icomment)] = m.group('value') + data[COMMENT(icomment)] = m.group("value") continue # parameter m = self.PARAMETER.match(line) if m: # check for comments after parameter?? -- currently discarded - parameter = m.group('parameter') - value = self._transform(m.group('value')) + parameter = m.group("parameter") + value = self._transform(m.group("value")) data[parameter] = value else: - errmsg = '{filename!r}: unknown line in mdp file, {line!r}'.format(**vars()) + errmsg = "{filename!r}: unknown line in mdp file, {line!r}".format( + **vars() + ) self.logger.error(errmsg) raise ParseError(errmsg) - super(MDP,self).update(data) - + super(MDP, self).update(data) def write(self, filename=None, skipempty=False): """Write mdp file to *filename*. @@ -145,16 +154,16 @@ def write(self, filename=None, skipempty=False): *filename* supplied. """ - with open(self.filename(filename, ext='mdp'), 'w') as mdp: - for k,v in self.items(): - if k[0] == 'B': # blank line + with open(self.filename(filename, ext="mdp"), "w") as mdp: + for k, v in self.items(): + if k[0] == "B": # blank line mdp.write("\n") - elif k[0] == 'C': # comment + elif k[0] == "C": # comment mdp.write("; {v!s}\n".format(**vars())) - else: # parameter = value - if skipempty and (v == '' or v is None): + else: # parameter = value + if skipempty and (v == "" or v is None): continue - if isinstance(v, six.string_types) or not hasattr(v, '__iter__'): + if isinstance(v, six.string_types) or not hasattr(v, "__iter__"): mdp.write("{k!s} = {v!s}\n".format(**vars())) else: - mdp.write("{} = {}\n".format(k,' '.join(map(str, v)))) + mdp.write("{} = {}\n".format(k, " ".join(map(str, v)))) diff --git a/gromacs/fileformats/ndx.py b/gromacs/fileformats/ndx.py index 287d3d4a..69150e27 100644 --- a/gromacs/fileformats/ndx.py +++ b/gromacs/fileformats/ndx.py @@ -40,6 +40,7 @@ import logging + class NDX(odict, utilities.FileUtils): """Gromacs index file. @@ -81,6 +82,7 @@ class NDX(odict, utilities.FileUtils): ndx.write() """ + default_extension = "ndx" # match: [ index_groupname ] @@ -89,10 +91,12 @@ class NDX(odict, utilities.FileUtils): #: standard ndx file format: 15 columns ncol = 15 #: standard ndx file format: '%6d' - format = '%6d' + format = "%6d" def __init__(self, filename=None, **kwargs): - super(NDX, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!) + super(NDX, self).__init__( + **kwargs + ) # can use kwargs to set dict! (but no sanity checks!) if filename is not None: self._init_filename(filename) @@ -111,26 +115,34 @@ def read(self, filename=None): continue m = self.SECTION.match(line) if m: - current_section = m.group('name') + current_section = m.group("name") data[current_section] = [] # can fail if name not legal python key continue if current_section is not None: data[current_section].extend(map(int, line.split())) - super(NDX,self).update(odict([(name, self._transform(atomnumbers)) - for name, atomnumbers in data.items()])) + super(NDX, self).update( + odict( + [ + (name, self._transform(atomnumbers)) + for name, atomnumbers in data.items() + ] + ) + ) def write(self, filename=None, ncol=ncol, format=format): """Write index file to *filename* (or overwrite the file that the index was read from)""" - with open(self.filename(filename, ext='ndx'), 'w') as ndx: + with open(self.filename(filename, ext="ndx"), "w") as ndx: for name in self: atomnumbers = self._getarray(name) # allows overriding - ndx.write('[ {0!s} ]\n'.format(name)) + ndx.write("[ {0!s} ]\n".format(name)) for k in range(0, len(atomnumbers), ncol): - line = atomnumbers[k:k+ncol].astype(int) # nice formatting in ncol-blocks + line = atomnumbers[k : k + ncol].astype( + int + ) # nice formatting in ncol-blocks n = len(line) - ndx.write((" ".join(n*[format])+'\n') % tuple(line)) - ndx.write('\n') + ndx.write((" ".join(n * [format]) + "\n") % tuple(line)) + ndx.write("\n") def get(self, name): """Return index array for index group *name*.""" @@ -161,8 +173,10 @@ def ndxlist(self): Format: [ {'name': group_name, 'natoms': number_atoms, 'nr': # group_number}, ....] """ - return [{'name': name, 'natoms': len(atomnumbers), 'nr': nr+1} for - nr,(name,atomnumbers) in enumerate(self.items())] + return [ + {"name": name, "natoms": len(atomnumbers), "nr": nr + 1} + for nr, (name, atomnumbers) in enumerate(self.items()) + ] def _getarray(self, name): """Helper getter that is used in write(). @@ -181,14 +195,16 @@ def _transform(self, v): def __setitem__(self, k, v): super(NDX, self).__setitem__(k, self._transform(v)) - def setdefault(*args,**kwargs): + def setdefault(*args, **kwargs): raise NotImplementedError class IndexSet(set): """set which defines '+' as union (OR) and '-' as intersection (AND).""" + def __add__(self, x): return self.union(x) + def __sub__(self, x): return self.intersection(x) @@ -231,8 +247,7 @@ def _transform(self, v): return IndexSet(v) def _getarray(self, k): - return numpy.sort(numpy.fromiter(self[k],dtype=int,count=len(self[k]))) - + return numpy.sort(numpy.fromiter(self[k], dtype=int, count=len(self[k]))) # or use list of these? diff --git a/gromacs/fileformats/top.py b/gromacs/fileformats/top.py index cdcc590a..7fd0f879 100644 --- a/gromacs/fileformats/top.py +++ b/gromacs/fileformats/top.py @@ -59,6 +59,7 @@ from . import blocks + class TOP(blocks.System): """Class to make a TOP object from a GROMACS processed.top file @@ -70,8 +71,9 @@ class TOP(blocks.System): are supported - the usual topol.top files are not supported (yet!) """ + default_extension = "top" - logger = logging.getLogger('gromacs.fileformats.TOP') + logger = logging.getLogger("gromacs.fileformats.TOP") def __init__(self, fname): """Initialize the TOP structure. @@ -85,12 +87,16 @@ def __init__(self, fname): self.fname = fname self.defaults = { - 'nbfunc': None, 'comb-rule':None, 'gen-pairs':None, 'fudgeLJ':None, 'fudgeQQ':None, + "nbfunc": None, + "comb-rule": None, + "gen-pairs": None, + "fudgeLJ": None, + "fudgeQQ": None, } - self.dict_molname_mol = odict() # contains molname:mol - self.found_sections = [] - self.forcefield = 'gromacs' + self.dict_molname_mol = odict() # contains molname:mol + self.found_sections = [] + self.forcefield = "gromacs" self.molecules = [] self._parse(fname) @@ -106,48 +112,63 @@ def __repr__(self): moltypenames.sort() data = [] - data.append('\n') - - main_items = set(['atomtypes', 'pairtypes', 'bondtypes', 'angletypes', 'dihedraltypes']) - other_items = ['{0:s} ({1:d})'.format(m, len(self.information[m])) for m in list(self.information.keys()) if m not in main_items] - other_items = ' '.join(other_items) + data.append("\n") + + main_items = set( + ["atomtypes", "pairtypes", "bondtypes", "angletypes", "dihedraltypes"] + ) + other_items = [ + "{0:s} ({1:d})".format(m, len(self.information[m])) + for m in list(self.information.keys()) + if m not in main_items + ] + other_items = " ".join(other_items) nattype = len(self.atomtypes) nprtype = len(self.pairtypes) - nbndtype= len(self.bondtypes) - nangtype= len(self.angletypes) - ndihtype= len(self.dihedraltypes) - nimptype= len(self.impropertypes) - data.append('{0:>20s} {1:>7s} {2:>7s} {3:>7s} {4:>7s} {5:>7s} {6:>7s}'.format('Param types:', 'atom', 'pair', 'bond', 'ang', 'dih', 'imp')) - msg = '{0:20s} {1:7d} {2:7d} {3:7d} {4:7d} {5:7d} {6:7d} {7:s}'.format('', nattype, nprtype, nbndtype, nangtype, ndihtype, nimptype, other_items) - data.append('=' * 69) + nbndtype = len(self.bondtypes) + nangtype = len(self.angletypes) + ndihtype = len(self.dihedraltypes) + nimptype = len(self.impropertypes) + data.append( + "{0:>20s} {1:>7s} {2:>7s} {3:>7s} {4:>7s} {5:>7s} {6:>7s}".format( + "Param types:", "atom", "pair", "bond", "ang", "dih", "imp" + ) + ) + msg = "{0:20s} {1:7d} {2:7d} {3:7d} {4:7d} {5:7d} {6:7d} {7:s}".format( + "", nattype, nprtype, nbndtype, nangtype, ndihtype, nimptype, other_items + ) + data.append("=" * 69) data.append(msg) - data.append('\n') - - - main_items = set(['atoms', 'pairs', 'bonds', 'angles', 'dihedrals']) - data.append('{0:>20s} {1:>7s} {2:>7s} {3:>7s} {4:>7s} {5:>7s} {6:>7s}'.format('Params:', 'atom', 'pair', 'bond', 'ang', 'dih', 'imp')) - data.append('=' * 69) + data.append("\n") + + main_items = set(["atoms", "pairs", "bonds", "angles", "dihedrals"]) + data.append( + "{0:>20s} {1:>7s} {2:>7s} {3:>7s} {4:>7s} {5:>7s} {6:>7s}".format( + "Params:", "atom", "pair", "bond", "ang", "dih", "imp" + ) + ) + data.append("=" * 69) for mname in moltypenames: mol = self.dict_molname_mol[mname] - other_items = ['{0:s} ({1:d})'.format(m, len(mol.information[m])) for m in list(mol.information.keys()) if m not in main_items] - other_items = ' '.join(other_items) + other_items = [ + "{0:s} ({1:d})".format(m, len(mol.information[m])) + for m in list(mol.information.keys()) + if m not in main_items + ] + other_items = " ".join(other_items) natoms = len(mol.atoms) npairs = len(mol.pairs) nbonds = len(mol.bonds) - nangles= len(mol.angles) - ndih = len(mol.dihedrals) - nimp = len(mol.impropers) - msg = '{0:20s} {1:7d} {2:7d} {3:7d} {4:7d} {5:7d} {6:7d} {7:s}'.format(mol.name, natoms, npairs, nbonds, nangles, ndih, nimp, other_items) + nangles = len(mol.angles) + ndih = len(mol.dihedrals) + nimp = len(mol.impropers) + msg = "{0:20s} {1:7d} {2:7d} {3:7d} {4:7d} {5:7d} {6:7d} {7:s}".format( + mol.name, natoms, npairs, nbonds, nangles, ndih, nimp, other_items + ) data.append(msg) - - - - return '\n'.join(data) - - - + return "\n".join(data) def _parse(self, fname): """Parse a processed.top GROMACS topology file @@ -172,97 +193,104 @@ def _parse(self, fname): :Returns: None """ + def _find_section(line): - return line.strip('[').strip(']').strip() + return line.strip("[").strip("]").strip() def _add_info(sys_or_mol, section, container): # like (mol, 'atomtypes', mol.atomtypes) if sys_or_mol.information.get(section, False) is False: sys_or_mol.information[section] = container - mol = None # to hold the current mol - curr_sec = None + mol = None # to hold the current mol + curr_sec = None cmap_lines = [] with open(fname) as f: for i_line, line in enumerate(f): - # trimming - if ';' in line: - line = line[0:line.index(';')] + if ";" in line: + line = line[0 : line.index(";")] line = line.strip() - if line == '': + if line == "": continue - if line[0] == '*': + if line[0] == "*": continue # the topology must be stand-alone (i.e. no includes) - if line.startswith('#include'): + if line.startswith("#include"): msg = 'The topology file has "#include" statements.' - msg+= ' You must provide a processed topology file that grompp creates.' + msg += " You must provide a processed topology file that grompp creates." raise ValueError(msg) # find sections - if line[0] == '[': + if line[0] == "[": curr_sec = _find_section(line) self.found_sections.append(curr_sec) continue fields = line.split() - if curr_sec == 'defaults': - ''' + if curr_sec == "defaults": + """ # ; nbfunc comb-rule gen-pairs fudgeLJ fudgeQQ #1 2 yes 0.5 0.8333 - ''' - assert len(fields) in [2, 5] - self.defaults['nbfunc'] = int(fields[0]) - self.defaults['comb-rule'] = int(fields[1]) + """ + assert len(fields) in [2, 5] + self.defaults["nbfunc"] = int(fields[0]) + self.defaults["comb-rule"] = int(fields[1]) if len(fields) == 5: + self.defaults["gen-pairs"] = fields[2] + self.defaults["fudgeLJ"] = float(fields[3]) + self.defaults["fudgeQQ"] = float(fields[4]) - self.defaults['gen-pairs'] = fields[2] - self.defaults['fudgeLJ'] = float(fields[3]) - self.defaults['fudgeQQ'] = float(fields[4]) - - elif curr_sec == 'atomtypes': - ''' + elif curr_sec == "atomtypes": + """ # ;name at.num mass charge ptype sigma epsilon # ;name bond_type at.num mass charge ptype sigma epsilon # ;name mass charge ptype c6 c12 - ''' - if len(fields) not in (6,7,8): - self.logger.warning('skipping atomtype line with neither 7 or 8 fields: \n {0:s}'.format(line)) + """ + if len(fields) not in (6, 7, 8): + self.logger.warning( + "skipping atomtype line with neither 7 or 8 fields: \n {0:s}".format( + line + ) + ) continue - #shift = 0 if len(fields) == 7 else 1 + # shift = 0 if len(fields) == 7 else 1 shift = len(fields) - 7 - at = blocks.AtomType('gromacs') + at = blocks.AtomType("gromacs") at.atype = fields[0] - if shift == 1: at.bond_type = fields[1] + if shift == 1: + at.bond_type = fields[1] - at.mass = float(fields[2+shift]) - at.charge= float(fields[3+shift]) + at.mass = float(fields[2 + shift]) + at.charge = float(fields[3 + shift]) - particletype = fields[4+shift] - assert particletype in ('A', 'S', 'V', 'D') - if particletype not in ('A',): - self.logger.warning('warning: non-atom particletype: "{0:s}"'.format(line)) + particletype = fields[4 + shift] + assert particletype in ("A", "S", "V", "D") + if particletype not in ("A",): + self.logger.warning( + 'warning: non-atom particletype: "{0:s}"'.format(line) + ) - sig = float(fields[5+shift]) - eps = float(fields[6+shift]) + sig = float(fields[5 + shift]) + eps = float(fields[6 + shift]) - at.gromacs= {'param': {'lje':eps, 'ljl':sig, 'lje14':None, 'ljl14':None} } + at.gromacs = { + "param": {"lje": eps, "ljl": sig, "lje14": None, "ljl14": None} + } self.atomtypes.append(at) _add_info(self, curr_sec, self.atomtypes) - # extend system.molecules - elif curr_sec == 'moleculetype': + elif curr_sec == "moleculetype": assert len(fields) == 2 mol = blocks.Molecule() @@ -272,9 +300,8 @@ def _add_info(sys_or_mol, section, container): self.dict_molname_mol[mol.name] = mol - - elif curr_sec == 'atoms': - ''' + elif curr_sec == "atoms": + """ #id at_type res_nr residu_name at_name cg_nr charge mass typeB chargeB massB # 1 OC 1 OH O1 1 -1.32 @@ -284,24 +311,24 @@ def _add_info(sys_or_mol, section, container): ; id at type res nr residu name at name cg nr charge 1 OT 1 SOL OW 1 -0.834 - ''' + """ aserial = int(fields[0]) - atype = fields[1] + atype = fields[1] resnumb = int(fields[2]) resname = fields[3] - aname = fields[4] - cgnr = int(fields[5]) - charge = float(fields[6]) + aname = fields[4] + cgnr = int(fields[5]) + charge = float(fields[6]) rest = fields[7:] - atom = blocks.Atom() - atom.name = aname - atom.atomtype= atype - atom.number = aserial + atom = blocks.Atom() + atom.name = aname + atom.atomtype = atype + atom.number = aserial atom.resname = resname atom.resnumb = resnumb - atom.charge = charge + atom.charge = charge if rest: mass = float(rest[0]) @@ -311,36 +338,44 @@ def _add_info(sys_or_mol, section, container): _add_info(mol, curr_sec, mol.atoms) - elif curr_sec in ('pairtypes', 'pairs', 'pairs_nb'): - ''' + elif curr_sec in ("pairtypes", "pairs", "pairs_nb"): + """ section #at fu #param --------------------------------- pairs 2 1 V,W pairs 2 2 fudgeQQ, qi, qj, V, W pairs_nb 2 1 qi, qj, V, W - ''' + """ ai, aj = fields[:2] - fu = int(fields[2]) - assert fu in (1,2) + fu = int(fields[2]) + assert fu in (1, 2) - pair = blocks.InteractionType('gromacs') + pair = blocks.InteractionType("gromacs") if fu == 1: - if curr_sec=='pairtypes': + if curr_sec == "pairtypes": pair.atype1 = ai pair.atype2 = aj v, w = list(map(float, fields[3:5])) - pair.gromacs = {'param': {'lje':None, 'ljl':None, 'lje14':w, 'ljl14':v}, 'func':fu } + pair.gromacs = { + "param": { + "lje": None, + "ljl": None, + "lje14": w, + "ljl14": v, + }, + "func": fu, + } self.pairtypes.append(pair) _add_info(self, curr_sec, self.pairtypes) - elif curr_sec == 'pairs': - ai, aj = list( map(int, [ai,aj]) ) - pair.atom1 = mol.atoms[ai-1] - pair.atom2 = mol.atoms[aj-1] - pair.gromacs['func'] = fu + elif curr_sec == "pairs": + ai, aj = list(map(int, [ai, aj])) + pair.atom1 = mol.atoms[ai - 1] + pair.atom2 = mol.atoms[aj - 1] + pair.gromacs["func"] = fu mol.pairs.append(pair) _add_info(mol, curr_sec, mol.pairs) @@ -349,34 +384,38 @@ def _add_info(sys_or_mol, section, container): raise ValueError else: - raise NotImplementedError('{0:s} with functiontype {1:d} is not supported'.format(curr_sec,fu)) - - elif curr_sec == 'nonbond_params': - ''' + raise NotImplementedError( + "{0:s} with functiontype {1:d} is not supported".format( + curr_sec, fu + ) + ) + + elif curr_sec == "nonbond_params": + """ ; typei typej f.type sigma epsilon ; f.type=1 means LJ (not buckingham) ; sigma&eps since mixing-rule = 2 - ''' + """ assert len(fields) == 5 ai, aj = fields[:2] - fu = int(fields[2]) + fu = int(fields[2]) assert fu == 1 - sig = float(fields[3]) - eps = float(fields[4]) + sig = float(fields[3]) + eps = float(fields[4]) - nonbond_param = blocks.NonbondedParamType('gromacs') + nonbond_param = blocks.NonbondedParamType("gromacs") nonbond_param.atype1 = ai nonbond_param.atype2 = aj - nonbond_param.gromacs['func'] = fu - nonbond_param.gromacs['param'] = {'eps': eps, 'sig': sig} + nonbond_param.gromacs["func"] = fu + nonbond_param.gromacs["param"] = {"eps": eps, "sig": sig} self.nonbond_params.append(nonbond_param) _add_info(self, curr_sec, self.nonbond_params) - elif curr_sec in ('bondtypes', 'bonds'): - ''' + elif curr_sec in ("bondtypes", "bonds"): + """ section #at fu #param ---------------------------------- bonds 2 1 2 @@ -389,37 +428,42 @@ def _add_info(sys_or_mol, section, container): bonds 2 8 ?? bonds 2 9 ?? bonds 2 10 4 - ''' + """ ai, aj = fields[:2] - fu = int(fields[2]) - assert fu in (1,2,3,4,5,6,7,8,9,10) + fu = int(fields[2]) + assert fu in (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) if fu != 1: - raise NotImplementedError('function {0:d} is not yet supported'.format(fu)) + raise NotImplementedError( + "function {0:d} is not yet supported".format(fu) + ) - bond = blocks.BondType('gromacs') + bond = blocks.BondType("gromacs") if fu == 1: - if curr_sec == 'bondtypes': + if curr_sec == "bondtypes": bond.atype1 = ai bond.atype2 = aj b0, kb = list(map(float, fields[3:5])) - bond.gromacs = {'param':{'kb':kb, 'b0':b0}, 'func':fu} + bond.gromacs = {"param": {"kb": kb, "b0": b0}, "func": fu} self.bondtypes.append(bond) _add_info(self, curr_sec, self.bondtypes) - elif curr_sec == 'bonds': + elif curr_sec == "bonds": ai, aj = list(map(int, [ai, aj])) - bond.atom1 = mol.atoms[ai-1] - bond.atom2 = mol.atoms[aj-1] - bond.gromacs['func'] = fu + bond.atom1 = mol.atoms[ai - 1] + bond.atom2 = mol.atoms[aj - 1] + bond.gromacs["func"] = fu if len(fields) > 3: b0, kb = list(map(float, fields[3:5])) - bond.gromacs = {'param':{'kb':kb, 'b0':b0}, 'func':fu} + bond.gromacs = { + "param": {"kb": kb, "b0": b0}, + "func": fu, + } mol.bonds.append(bond) _add_info(mol, curr_sec, mol.bonds) @@ -427,8 +471,8 @@ def _add_info(sys_or_mol, section, container): else: raise NotImplementedError - elif curr_sec in ('angletypes', 'angles'): - ''' + elif curr_sec in ("angletypes", "angles"): + """ section #at fu #param ---------------------------------- angles 3 1 2 @@ -438,34 +482,44 @@ def _add_info(sys_or_mol, section, container): angles 3 5 4 angles 3 6 6 angles 3 8 ?? - ''' + """ - ai, aj , ak = fields[:3] - fu = int(fields[3]) - assert fu in (1,2,3,4,5,6,8) # no 7 + ai, aj, ak = fields[:3] + fu = int(fields[3]) + assert fu in (1, 2, 3, 4, 5, 6, 8) # no 7 - if fu not in (1,2,5): - raise NotImplementedError('function {0:d} is not yet supported'.format(fu)) + if fu not in (1, 2, 5): + raise NotImplementedError( + "function {0:d} is not yet supported".format(fu) + ) - ang = blocks.AngleType('gromacs') + ang = blocks.AngleType("gromacs") if fu == 1: - if curr_sec == 'angletypes': + if curr_sec == "angletypes": ang.atype1 = ai ang.atype2 = aj ang.atype3 = ak tetha0, ktetha = list(map(float, fields[4:6])) - ang.gromacs = {'param':{'ktetha':ktetha, 'tetha0':tetha0, 'kub':None, 's0':None}, 'func':fu} + ang.gromacs = { + "param": { + "ktetha": ktetha, + "tetha0": tetha0, + "kub": None, + "s0": None, + }, + "func": fu, + } self.angletypes.append(ang) _add_info(self, curr_sec, self.angletypes) - elif curr_sec == 'angles': + elif curr_sec == "angles": ai, aj, ak = list(map(int, [ai, aj, ak])) - ang.atom1 = mol.atoms[ai-1] - ang.atom2 = mol.atoms[aj-1] - ang.atom3 = mol.atoms[ak-1] - ang.gromacs['func'] = fu + ang.atom1 = mol.atoms[ai - 1] + ang.atom2 = mol.atoms[aj - 1] + ang.atom3 = mol.atoms[ak - 1] + ang.gromacs["func"] = fu mol.angles.append(ang) _add_info(mol, curr_sec, mol.angles) @@ -474,40 +528,56 @@ def _add_info(sys_or_mol, section, container): raise ValueError elif fu == 2: - if curr_sec == 'angletypes': + if curr_sec == "angletypes": raise NotImplementedError() - elif curr_sec == 'angles': + elif curr_sec == "angles": ai, aj, ak = list(map(int, [ai, aj, ak])) - ang.atom1 = mol.atoms[ai-1] - ang.atom2 = mol.atoms[aj-1] - ang.atom3 = mol.atoms[ak-1] - ang.gromacs['func'] = fu + ang.atom1 = mol.atoms[ai - 1] + ang.atom2 = mol.atoms[aj - 1] + ang.atom3 = mol.atoms[ak - 1] + ang.gromacs["func"] = fu tetha0, ktetha = list(map(float, fields[4:6])) - ang.gromacs = {'param':{'ktetha':ktetha, 'tetha0':tetha0, 'kub':None, 's0':None}, 'func':fu} + ang.gromacs = { + "param": { + "ktetha": ktetha, + "tetha0": tetha0, + "kub": None, + "s0": None, + }, + "func": fu, + } mol.angles.append(ang) _add_info(mol, curr_sec, mol.angles) elif fu == 5: - if curr_sec == 'angletypes': + if curr_sec == "angletypes": ang.atype1 = ai ang.atype2 = aj ang.atype3 = ak tetha0, ktetha, s0, kub = list(map(float, fields[4:8])) - ang.gromacs = {'param':{'ktetha':ktetha, 'tetha0':tetha0, 'kub':kub, 's0':s0}, 'func':fu} + ang.gromacs = { + "param": { + "ktetha": ktetha, + "tetha0": tetha0, + "kub": kub, + "s0": s0, + }, + "func": fu, + } self.angletypes.append(ang) _add_info(self, curr_sec, self.angletypes) - elif curr_sec == 'angles': + elif curr_sec == "angles": ai, aj, ak = list(map(int, [ai, aj, ak])) - ang.atom1 = mol.atoms[ai-1] - ang.atom2 = mol.atoms[aj-1] - ang.atom3 = mol.atoms[ak-1] - ang.gromacs['func'] = fu + ang.atom1 = mol.atoms[ai - 1] + ang.atom2 = mol.atoms[aj - 1] + ang.atom3 = mol.atoms[ak - 1] + ang.gromacs["func"] = fu mol.angles.append(ang) _add_info(mol, curr_sec, mol.angles) @@ -518,9 +588,8 @@ def _add_info(sys_or_mol, section, container): else: raise NotImplementedError - - elif curr_sec in ('dihedraltypes', 'dihedrals'): - ''' + elif curr_sec in ("dihedraltypes", "dihedrals"): + """ section #at fu #param ---------------------------------- dihedrals 4 1 3 @@ -530,25 +599,27 @@ def _add_info(sys_or_mol, section, container): dihedrals 4 5 4 dihedrals 4 8 ?? dihedrals 4 9 3 - ''' + """ - if curr_sec == 'dihedraltypes' and len(fields) == 6: + if curr_sec == "dihedraltypes" and len(fields) == 6: # in oplsaa - quartz parameters - fields.insert(2, 'X') - fields.insert(0, 'X') + fields.insert(2, "X") + fields.insert(0, "X") ai, aj, ak, am = fields[:4] fu = int(fields[4]) - assert fu in (1,2,3,4,5,8,9) + assert fu in (1, 2, 3, 4, 5, 8, 9) - if fu not in (1,2,3,4,9): - raise NotImplementedError('dihedral function {0:d} is not yet supported'.format(fu)) + if fu not in (1, 2, 3, 4, 9): + raise NotImplementedError( + "dihedral function {0:d} is not yet supported".format(fu) + ) - dih = blocks.DihedralType('gromacs') - imp = blocks.ImproperType('gromacs') + dih = blocks.DihedralType("gromacs") + imp = blocks.ImproperType("gromacs") # proper dihedrals - if fu in (1,3,9): - if curr_sec == 'dihedraltypes': + if fu in (1, 3, 9): + if curr_sec == "dihedraltypes": dih.atype1 = ai dih.atype2 = aj dih.atype3 = ak @@ -558,37 +629,45 @@ def _add_info(sys_or_mol, section, container): if fu == 1: delta, kchi, n = list(map(float, fields[5:8])) - dih.gromacs['param'].append({'kchi':kchi, 'n':n, 'delta':delta}) + dih.gromacs["param"].append( + {"kchi": kchi, "n": n, "delta": delta} + ) elif fu == 3: c0, c1, c2, c3, c4, c5 = list(map(float, fields[5:11])) m = dict(c0=c0, c1=c1, c2=c2, c3=c3, c4=c4, c5=c5) - dih.gromacs['param'].append(m) + dih.gromacs["param"].append(m) elif fu == 4: delta, kchi, n = list(map(float, fields[5:8])) - dih.gromacs['param'].append({'kchi':kchi, 'n':int(n), 'delta':delta}) + dih.gromacs["param"].append( + {"kchi": kchi, "n": int(n), "delta": delta} + ) elif fu == 9: delta, kchi, n = list(map(float, fields[5:8])) - dih.gromacs['param'].append({'kchi':kchi, 'n':int(n), 'delta':delta}) + dih.gromacs["param"].append( + {"kchi": kchi, "n": int(n), "delta": delta} + ) else: raise ValueError - dih.gromacs['func'] = fu + dih.gromacs["func"] = fu self.dihedraltypes.append(dih) _add_info(self, curr_sec, self.dihedraltypes) - elif curr_sec == 'dihedrals': + elif curr_sec == "dihedrals": ai, aj, ak, am = list(map(int, fields[:4])) - dih.atom1 = mol.atoms[ai-1] - dih.atom2 = mol.atoms[aj-1] - dih.atom3 = mol.atoms[ak-1] - dih.atom4 = mol.atoms[am-1] - dih.gromacs['func'] = fu + dih.atom1 = mol.atoms[ai - 1] + dih.atom2 = mol.atoms[aj - 1] + dih.atom3 = mol.atoms[ak - 1] + dih.atom4 = mol.atoms[am - 1] + dih.gromacs["func"] = fu dih.line = i_line + 1 if fu == 1: delta, kchi, n = list(map(float, fields[5:8])) - dih.gromacs['param'].append({'kchi':kchi, 'n': int(n), 'delta':delta}) + dih.gromacs["param"].append( + {"kchi": kchi, "n": int(n), "delta": delta} + ) elif fu == 3: pass elif fu == 4: @@ -596,7 +675,9 @@ def _add_info(sys_or_mol, section, container): elif fu == 9: if len(fields[5:8]) == 3: delta, kchi, n = list(map(float, fields[5:8])) - dih.gromacs['param'].append({'kchi':kchi, 'n':int(n), 'delta':delta}) + dih.gromacs["param"].append( + {"kchi": kchi, "n": int(n), "delta": delta} + ) else: raise ValueError @@ -606,8 +687,8 @@ def _add_info(sys_or_mol, section, container): else: raise ValueError # impropers - elif fu in (2,4): - if curr_sec == 'dihedraltypes': + elif fu in (2, 4): + if curr_sec == "dihedraltypes": imp.atype1 = ai imp.atype2 = aj imp.atype3 = ak @@ -616,25 +697,29 @@ def _add_info(sys_or_mol, section, container): imp.line = i_line + 1 if fu == 2: - psi0 , kpsi = list(map(float, fields[5:7])) - imp.gromacs['param'].append({'kpsi':kpsi, 'psi0': psi0}) + psi0, kpsi = list(map(float, fields[5:7])) + imp.gromacs["param"].append( + {"kpsi": kpsi, "psi0": psi0} + ) elif fu == 4: - psi0 , kpsi, n = list(map(float, fields[5:8])) - imp.gromacs['param'].append({'kpsi':kpsi, 'psi0': psi0, 'n': int(n)}) + psi0, kpsi, n = list(map(float, fields[5:8])) + imp.gromacs["param"].append( + {"kpsi": kpsi, "psi0": psi0, "n": int(n)} + ) else: raise ValueError - imp.gromacs['func'] = fu + imp.gromacs["func"] = fu self.impropertypes.append(imp) _add_info(self, curr_sec, self.impropertypes) - elif curr_sec == 'dihedrals': + elif curr_sec == "dihedrals": ai, aj, ak, am = list(map(int, fields[:4])) - imp.atom1 = mol.atoms[ai-1] - imp.atom2 = mol.atoms[aj-1] - imp.atom3 = mol.atoms[ak-1] - imp.atom4 = mol.atoms[am-1] - imp.gromacs['func'] = fu + imp.atom1 = mol.atoms[ai - 1] + imp.atom2 = mol.atoms[aj - 1] + imp.atom3 = mol.atoms[ak - 1] + imp.atom4 = mol.atoms[am - 1] + imp.gromacs["func"] = fu imp.line = i_line + 1 @@ -643,8 +728,10 @@ def _add_info(sys_or_mol, section, container): elif fu == 4: # in-line override of dihedral parameters if len(fields[5:8]) == 3: - psi0 , kpsi, n = list(map(float, fields[5:8])) - imp.gromacs['param'].append({'kpsi':kpsi, 'psi0': psi0, 'n': int(n)}) + psi0, kpsi, n = list(map(float, fields[5:8])) + imp.gromacs["param"].append( + {"kpsi": kpsi, "psi0": psi0, "n": int(n)} + ) else: raise ValueError @@ -657,41 +744,38 @@ def _add_info(sys_or_mol, section, container): else: raise NotImplementedError - - elif curr_sec in ('cmaptypes', 'cmap'): - - cmap = blocks.CMapType('gromacs') - if curr_sec == 'cmaptypes': + elif curr_sec in ("cmaptypes", "cmap"): + cmap = blocks.CMapType("gromacs") + if curr_sec == "cmaptypes": cmap_lines.append(line) _add_info(self, curr_sec, self.cmaptypes) else: ai, aj, ak, am, an = list(map(int, fields[:5])) fu = int(fields[5]) assert fu == 1 - cmap.atom1 = mol.atoms[ai-1] - cmap.atom2 = mol.atoms[aj-1] - cmap.atom3 = mol.atoms[ak-1] - cmap.atom4 = mol.atoms[am-1] - cmap.atom8 = mol.atoms[an-1] - cmap.gromacs['func'] = fu + cmap.atom1 = mol.atoms[ai - 1] + cmap.atom2 = mol.atoms[aj - 1] + cmap.atom3 = mol.atoms[ak - 1] + cmap.atom4 = mol.atoms[am - 1] + cmap.atom8 = mol.atoms[an - 1] + cmap.gromacs["func"] = fu mol.cmaps.append(cmap) _add_info(mol, curr_sec, mol.cmaps) - - elif curr_sec == 'settles': - ''' + elif curr_sec == "settles": + """ section #at fu #param ---------------------------------- - ''' + """ assert len(fields) == 4 ai = int(fields[0]) fu = int(fields[1]) assert fu == 1 - settle = blocks.SettleType('gromacs') - settle.atom = mol.atoms[ai-1] + settle = blocks.SettleType("gromacs") + settle.atom = mol.atoms[ai - 1] settle.dOH = float(fields[2]) settle.dHH = float(fields[3]) @@ -699,10 +783,10 @@ def _add_info(sys_or_mol, section, container): _add_info(mol, curr_sec, mol.settles) elif curr_sec == "virtual_sites3": - ''' - ; Dummy from funct a b - 4 1 2 3 1 0.131937768 0.131937768 - ''' + """ + ; Dummy from funct a b + 4 1 2 3 1 0.131937768 0.131937768 + """ assert len(fields) == 7 ai = int(fields[0]) aj = int(fields[1]) @@ -713,59 +797,57 @@ def _add_info(sys_or_mol, section, container): a = float(fields[5]) b = float(fields[6]) - vs3 = blocks.VirtualSites3Type('gromacs') + vs3 = blocks.VirtualSites3Type("gromacs") vs3.atom1 = ai vs3.atom2 = aj vs3.atom3 = ak vs3.atom4 = al - vs3.gromacs['func'] = fu - vs3.gromacs['param'] = { 'a': a, 'b':b } + vs3.gromacs["func"] = fu + vs3.gromacs["param"] = {"a": a, "b": b} mol.virtual_sites3.append(vs3) _add_info(mol, curr_sec, mol.virtual_sites3) - - elif curr_sec in ('exclusions',): + elif curr_sec in ("exclusions",): ai = int(fields[0]) other = list(map(int, fields[1:])) exc = blocks.Exclusion() - exc.main_atom = mol.atoms[ai-1] - exc.other_atoms= [mol.atoms[k-1] for k in other] + exc.main_atom = mol.atoms[ai - 1] + exc.other_atoms = [mol.atoms[k - 1] for k in other] mol.exclusions.append(exc) _add_info(mol, curr_sec, mol.exclusions) - - elif curr_sec in ('constrainttypes', 'constraints'): - ''' + elif curr_sec in ("constrainttypes", "constraints"): + """ section #at fu #param ---------------------------------- constraints 2 1 1 constraints 2 2 1 - ''' + """ ai, aj = fields[:2] fu = int(fields[2]) - assert fu in (1,2) + assert fu in (1, 2) - cons = blocks.ConstraintType('gromacs') + cons = blocks.ConstraintType("gromacs") # TODO: what's different between 1 and 2 if fu in [1, 2]: - if curr_sec == 'constrainttypes': + if curr_sec == "constrainttypes": cons.atype1 = ai cons.atype2 = aj b0 = float(fields[3]) - cons.gromacs = {'param':{'b0':b0}, 'func': fu} + cons.gromacs = {"param": {"b0": b0}, "func": fu} self.constrainttypes.append(cons) _add_info(self, curr_sec, self.constrainttypes) - elif curr_sec == 'constraints': + elif curr_sec == "constraints": ai, aj = list(map(int, fields[:2])) - cons.atom1 = mol.atoms[ai-1] - cons.atom2 = mol.atoms[aj-1] - cons.gromacs['func'] = fu + cons.atom1 = mol.atoms[ai - 1] + cons.atom2 = mol.atoms[aj - 1] + cons.gromacs["func"] = fu mol.constraints.append(cons) _add_info(mol, curr_sec, mol.constraints) @@ -775,27 +857,27 @@ def _add_info(sys_or_mol, section, container): else: raise ValueError - elif curr_sec in ('position_restraints', - 'distance_restraints', - 'dihedral_restraints', - 'orientation_restraints', - 'angle_restraints', - 'angle_restraints_z'): + elif curr_sec in ( + "position_restraints", + "distance_restraints", + "dihedral_restraints", + "orientation_restraints", + "angle_restraints", + "angle_restraints_z", + ): pass - - elif curr_sec in ('implicit_genborn_params',): - ''' + elif curr_sec in ("implicit_genborn_params",): + """ attype sar st pi gbr hct - ''' + """ pass - elif curr_sec == 'system': - #assert len(fields) == 1 + elif curr_sec == "system": + # assert len(fields) == 1 self.name = fields[0] - - elif curr_sec == 'molecules': + elif curr_sec == "molecules": assert len(fields) == 2 mname, nmol = fields[0], int(fields[1]) @@ -804,71 +886,82 @@ def _add_info(sys_or_mol, section, container): self.molecules.append(self.dict_molname_mol[mname]) else: - raise NotImplementedError('Unknown section in topology: {0}'.format(curr_sec)) + raise NotImplementedError( + "Unknown section in topology: {0}".format(curr_sec) + ) # process cmap_lines curr_cons = None for line in cmap_lines: - # cmaptype opening line if len(line.split()) == 8: - cons = blocks.CMapType('gromacs') - - atype1, atype2, atype3, atype4, atype8, func, sizeX, sizeY = line.replace("\\","").split() + cons = blocks.CMapType("gromacs") + + ( + atype1, + atype2, + atype3, + atype4, + atype8, + func, + sizeX, + sizeY, + ) = line.replace("\\", "").split() func, sizeX, sizeY = int(func), int(sizeX), int(sizeY) cons.atype1 = atype1 cons.atype2 = atype2 cons.atype3 = atype3 cons.atype4 = atype4 cons.atype8 = atype8 - cons.gromacs = {'param':[], 'func': func} + cons.gromacs = {"param": [], "func": func} curr_cons = cons # cmap body elif len(line.split()) == 10: - cmap_param = map(float, line.replace("\\","").split()) - cons.gromacs['param'] += cmap_param + cmap_param = map(float, line.replace("\\", "").split()) + cons.gromacs["param"] += cmap_param # cmaptype cloning line elif len(line.split()) == 6: - cmap_param = map(float, line.replace("\\","").split()) - cons.gromacs['param'] += cmap_param + cmap_param = map(float, line.replace("\\", "").split()) + cons.gromacs["param"] += cmap_param self.cmaptypes.append(curr_cons) else: raise ValueError + class SystemToGroTop(object): """Converter class - represent TOP objects as GROMACS topology file.""" + formats = { - 'atomtypes' : '{:<7s} {:3s} {:>7} {} {:3s} {} {}\n', - 'atoms' : '{:6d} {:>10s} {:6d} {:6s} {:6s} {:6d} {} {}\n', - 'atoms_nomass' : '{:6d} {:>10s} {:6d} {:6s} {:6s} {:6d} {}\n', - 'nonbond_params' : '{:20s} {:20s} {:1d} {} {}\n', - 'bondtypes' : '{:5s} {:5s} {:1d} {} {}\n', - 'bonds' : '{:3d} {:3d} {:1d}\n', - 'bonds_ext' : '{:3d} {:3d} {:1d} {} {}\n', - 'settles' : '{:3d} {:3d} {} {}\n', - 'virtual_sites3' : '{:3d} {:3d} {:3d} {:3d} {:1d} {} {}\n', - 'exclusions' : '{:3d} {}\n', - 'pairtypes' : '{:6s} {:6s} {:d} {:.13g} {:.13g}\n', - 'pairs' : '{:3d} {:3d} {:1d}\n', - 'angletypes_1' : '{:>8s} {:>8s} {:>8s} {:1d} {} {}\n', - 'angletypes_5' : '{:>8s} {:>8s} {:>8s} {:1d} {} {} {} {}\n', - 'constrainttypes': '{:6s} {:6s} {:1d} {}\n', - 'angles' : '{:3d} {:3d} {:3d} {:1d}\n', - 'angles_ext' : '{:3d} {:3d} {:3d} {:1d} {} {}\n', - 'dihedraltypes' : '{:6s} {:6s} {:6s} {:6s} {:1d} {} {} {:1d}\n', - 'dihedrals' : '{:3d} {:3d} {:3d} {:3d} {:1d}\n', - 'dihedrals_ext' : '{:3d} {:3d} {:3d} {:3d} {:1d} {} {} {:1d}\n', - 'impropertypes_2' : '{:6s} {:6s} {:6s} {:6s} {:1d} {} {} \n', - 'impropertypes_4' : '{:6s} {:6s} {:6s} {:6s} {:1d} {} {} {:2d}\n', - 'impropers' : '{:3d} {:3d} {:3d} {:3d} {:1d}\n', - 'impropers_2' : '{:3d} {:3d} {:3d} {:3d} {:1d} {} {} \n', - 'impropers_4' : '{:3d} {:3d} {:3d} {:3d} {:1d} {} {} {:2d}\n', + "atomtypes": "{:<7s} {:3s} {:>7} {} {:3s} {} {}\n", + "atoms": "{:6d} {:>10s} {:6d} {:6s} {:6s} {:6d} {} {}\n", + "atoms_nomass": "{:6d} {:>10s} {:6d} {:6s} {:6s} {:6d} {}\n", + "nonbond_params": "{:20s} {:20s} {:1d} {} {}\n", + "bondtypes": "{:5s} {:5s} {:1d} {} {}\n", + "bonds": "{:3d} {:3d} {:1d}\n", + "bonds_ext": "{:3d} {:3d} {:1d} {} {}\n", + "settles": "{:3d} {:3d} {} {}\n", + "virtual_sites3": "{:3d} {:3d} {:3d} {:3d} {:1d} {} {}\n", + "exclusions": "{:3d} {}\n", + "pairtypes": "{:6s} {:6s} {:d} {:.13g} {:.13g}\n", + "pairs": "{:3d} {:3d} {:1d}\n", + "angletypes_1": "{:>8s} {:>8s} {:>8s} {:1d} {} {}\n", + "angletypes_5": "{:>8s} {:>8s} {:>8s} {:1d} {} {} {} {}\n", + "constrainttypes": "{:6s} {:6s} {:1d} {}\n", + "angles": "{:3d} {:3d} {:3d} {:1d}\n", + "angles_ext": "{:3d} {:3d} {:3d} {:1d} {} {}\n", + "dihedraltypes": "{:6s} {:6s} {:6s} {:6s} {:1d} {} {} {:1d}\n", + "dihedrals": "{:3d} {:3d} {:3d} {:3d} {:1d}\n", + "dihedrals_ext": "{:3d} {:3d} {:3d} {:3d} {:1d} {} {} {:1d}\n", + "impropertypes_2": "{:6s} {:6s} {:6s} {:6s} {:1d} {} {} \n", + "impropertypes_4": "{:6s} {:6s} {:6s} {:6s} {:1d} {} {} {:2d}\n", + "impropers": "{:3d} {:3d} {:3d} {:3d} {:1d}\n", + "impropers_2": "{:3d} {:3d} {:3d} {:3d} {:1d} {} {} \n", + "impropers_4": "{:3d} {:3d} {:3d} {:3d} {:1d} {} {} {:2d}\n", } - toptemplate = """ [ defaults ] *DEFAULTS* @@ -930,92 +1023,113 @@ def __init__(self, system, outfile="output.top", multiple_output=False): *multiple_output* if True, write moleculetypes to separate files, named mol_MOLNAME.itp (default: False) """ - self.logger = logging.getLogger('gromacs.fileformats.SystemToGroTop') + self.logger = logging.getLogger("gromacs.fileformats.SystemToGroTop") self.logger.debug(">> entering SystemToGroTop") - self.system = system + self.system = system self.outfile = outfile self.multiple_output = multiple_output self.assemble_topology() self.logger.debug("<< leaving SystemToGroTop") - @staticmethod def _redefine_atomtypes(mol): for i, atom in enumerate(mol.atoms): - atom.atomtype = 'at{0:03d}'.format(i+1) + atom.atomtype = "at{0:03d}".format(i + 1) def assemble_topology(self): """Call the various member self._make_* functions to convert the topology object into a string""" self.logger.debug("starting to assemble topology...") - top = '' + top = "" self.logger.debug("making atom/pair/bond/angle/dihedral/improper types") top += self.toptemplate - top = top.replace('*DEFAULTS*', ''.join( self._make_defaults(self.system)) ) - top = top.replace('*ATOMTYPES*', ''.join( self._make_atomtypes(self.system)) ) - top = top.replace('*NONBOND_PARAM*', ''.join( self._make_nonbond_param(self.system)) ) - top = top.replace('*PAIRTYPES*', ''.join( self._make_pairtypes(self.system)) ) - top = top.replace('*BONDTYPES*', ''.join( self._make_bondtypes(self.system)) ) - top = top.replace('*CONSTRAINTTYPES*',''.join( self._make_constrainttypes(self.system))) - top = top.replace('*ANGLETYPES*', ''.join( self._make_angletypes(self.system))) - top = top.replace('*DIHEDRALTYPES*', ''.join( self._make_dihedraltypes(self.system)) ) - top = top.replace('*IMPROPERTYPES*', ''.join( self._make_impropertypes(self.system)) ) - top = top.replace('*CMAPTYPES*', ''.join( self._make_cmaptypes(self.system)) ) - - for i,(molname,m) in enumerate(self.system.dict_molname_mol.items()): - + top = top.replace("*DEFAULTS*", "".join(self._make_defaults(self.system))) + top = top.replace("*ATOMTYPES*", "".join(self._make_atomtypes(self.system))) + top = top.replace( + "*NONBOND_PARAM*", "".join(self._make_nonbond_param(self.system)) + ) + top = top.replace("*PAIRTYPES*", "".join(self._make_pairtypes(self.system))) + top = top.replace("*BONDTYPES*", "".join(self._make_bondtypes(self.system))) + top = top.replace( + "*CONSTRAINTTYPES*", "".join(self._make_constrainttypes(self.system)) + ) + top = top.replace("*ANGLETYPES*", "".join(self._make_angletypes(self.system))) + top = top.replace( + "*DIHEDRALTYPES*", "".join(self._make_dihedraltypes(self.system)) + ) + top = top.replace( + "*IMPROPERTYPES*", "".join(self._make_impropertypes(self.system)) + ) + top = top.replace("*CMAPTYPES*", "".join(self._make_cmaptypes(self.system))) + + for i, (molname, m) in enumerate(self.system.dict_molname_mol.items()): itp = self.itptemplate - itp = itp.replace('*MOLECULETYPE*', ''.join( self._make_moleculetype(m, molname, m.exclusion_numb)) ) - itp = itp.replace('*ATOMS*', ''.join( self._make_atoms(m)) ) - itp = itp.replace('*BONDS*', ''.join( self._make_bonds(m)) ) - itp = itp.replace('*PAIRS*', ''.join( self._make_pairs(m)) ) - itp = itp.replace('*SETTLES*', ''.join( self._make_settles(m)) ) - itp = itp.replace('*VIRTUAL_SITES3*',''.join( self._make_virtual_sites3(m)) ) - itp = itp.replace('*EXCLUSIONS*', ''.join( self._make_exclusions(m)) ) - itp = itp.replace('*ANGLES*', ''.join( self._make_angles(m)) ) - itp = itp.replace('*DIHEDRALS*', ''.join( self._make_dihedrals(m)) ) - itp = itp.replace('*IMPROPERS*', ''.join( self._make_impropers(m)) ) - itp = itp.replace('*CMAPS*', ''.join( self._make_cmaps(m)) ) + itp = itp.replace( + "*MOLECULETYPE*", + "".join(self._make_moleculetype(m, molname, m.exclusion_numb)), + ) + itp = itp.replace("*ATOMS*", "".join(self._make_atoms(m))) + itp = itp.replace("*BONDS*", "".join(self._make_bonds(m))) + itp = itp.replace("*PAIRS*", "".join(self._make_pairs(m))) + itp = itp.replace("*SETTLES*", "".join(self._make_settles(m))) + itp = itp.replace("*VIRTUAL_SITES3*", "".join(self._make_virtual_sites3(m))) + itp = itp.replace("*EXCLUSIONS*", "".join(self._make_exclusions(m))) + itp = itp.replace("*ANGLES*", "".join(self._make_angles(m))) + itp = itp.replace("*DIHEDRALS*", "".join(self._make_dihedrals(m))) + itp = itp.replace("*IMPROPERS*", "".join(self._make_impropers(m))) + itp = itp.replace("*CMAPS*", "".join(self._make_cmaps(m))) if not self.multiple_output: top += itp else: outfile = "mol_{0}.itp".format(molname) - top += '#include "mol_{0}.itp" \n'.format( molname ) + top += '#include "mol_{0}.itp" \n'.format(molname) with open(outfile, "w") as f: f.writelines([itp]) - top += '\n[system] \nConvertedSystem\n\n' - top += '[molecules] \n' + top += "\n[system] \nConvertedSystem\n\n" + top += "[molecules] \n" molecules = [("", 0)] for m in self.system.molecules: - if (molecules[-1][0] != m.name): + if molecules[-1][0] != m.name: molecules.append([m.name, 0]) if molecules[-1][0] == m.name: molecules[-1][1] += 1 for molname, n in molecules[1:]: - top += '{0:s} {1:d}\n'.format(molname, n) - top += '\n' + top += "{0:s} {1:d}\n".format(molname, n) + top += "\n" - with open(self.outfile, 'w') as f: + with open(self.outfile, "w") as f: f.writelines([top]) - def _make_defaults(self,m): - if m.defaults['gen-pairs'] and m.defaults['fudgeLJ']and m.defaults['fudgeQQ']: - line = ['{0:d} {1:d} {2} {3} {4} \n'.format(m.defaults['nbfunc'], m.defaults['comb-rule'], m.defaults['gen-pairs'] , m.defaults['fudgeLJ'], m.defaults['fudgeQQ'])] + def _make_defaults(self, m): + if m.defaults["gen-pairs"] and m.defaults["fudgeLJ"] and m.defaults["fudgeQQ"]: + line = [ + "{0:d} {1:d} {2} {3} {4} \n".format( + m.defaults["nbfunc"], + m.defaults["comb-rule"], + m.defaults["gen-pairs"], + m.defaults["fudgeLJ"], + m.defaults["fudgeQQ"], + ) + ] else: - line = ['{0:d} {1:d}\n'.format(m.defaults['nbfunc'], m.defaults['comb-rule'], )] + line = [ + "{0:d} {1:d}\n".format( + m.defaults["nbfunc"], + m.defaults["comb-rule"], + ) + ] return line - - def _make_atomtypes(self,m): + def _make_atomtypes(self, m): def get_prot(at): # TODO improve this - _protons = {'C':6, 'H':1, 'N':7, 'O':8, 'S':16, 'P':15} + _protons = {"C": 6, "H": 1, "N": 7, "O": 8, "S": 16, "P": 15} if at[0] in list(_protons.keys()): return _protons[at[0]] else: @@ -1023,12 +1137,21 @@ def get_prot(at): result = [] for at in m.atomtypes: - at.convert('gromacs') + at.convert("gromacs") prot = get_prot(at.atype) - ljl = at.gromacs['param']['ljl'] - lje = at.gromacs['param']['lje'] - line = self.formats['atomtypes'].format(at.atype, at.bond_type if at.bond_type else "", at.mass, at.charge, 'A', ljl, lje) - if at.comment : line += at.comment + ljl = at.gromacs["param"]["ljl"] + lje = at.gromacs["param"]["lje"] + line = self.formats["atomtypes"].format( + at.atype, + at.bond_type if at.bond_type else "", + at.mass, + at.charge, + "A", + ljl, + lje, + ) + if at.comment: + line += at.comment result.append(line) return result @@ -1039,87 +1162,89 @@ def _make_nonbond_param(self, m): at1 = pr.atype1 at2 = pr.atype2 - #pr.convert('gromacs') - eps = pr.gromacs['param']['eps'] - sig = pr.gromacs['param']['sig'] + # pr.convert('gromacs') + eps = pr.gromacs["param"]["eps"] + sig = pr.gromacs["param"]["sig"] fu = 1 # TODO - line = self.formats['nonbond_params'].format(at1, at2, fu, sig, eps) - if pr.comment : line = line[:-1] + pr.comment + line[-1:] + line = self.formats["nonbond_params"].format(at1, at2, fu, sig, eps) + if pr.comment: + line = line[:-1] + pr.comment + line[-1:] result.append(line) return result - def _make_pairtypes(self,m): - + def _make_pairtypes(self, m): result = [] for pt in m.pairtypes: at1, at2 = pt.atype1, pt.atype2 - fu, l14, e14 = pt.gromacs['func'], pt.gromacs['param']['ljl14'], pt.gromacs['param']['lje14'] - line = self.formats['pairtypes'].format(at1, at2, fu, l14, e14) - if pt.comment : line = line[:-1] + pt.comment + fu, l14, e14 = ( + pt.gromacs["func"], + pt.gromacs["param"]["ljl14"], + pt.gromacs["param"]["lje14"], + ) + line = self.formats["pairtypes"].format(at1, at2, fu, l14, e14) + if pt.comment: + line = line[:-1] + pt.comment result.append(line) return result - - - def _make_bondtypes(self,m): + def _make_bondtypes(self, m): result = [] for bond in m.bondtypes: at1 = bond.atype1 at2 = bond.atype2 - bond.convert('gromacs') + bond.convert("gromacs") - kb = bond.gromacs['param']['kb'] - b0 = bond.gromacs['param']['b0'] - fu = bond.gromacs['func'] + kb = bond.gromacs["param"]["kb"] + b0 = bond.gromacs["param"]["b0"] + fu = bond.gromacs["func"] - line = self.formats['bondtypes'].format(at1, at2, fu, b0, kb) + line = self.formats["bondtypes"].format(at1, at2, fu, b0, kb) result.append(line) return result - - def _make_constrainttypes(self,m): + def _make_constrainttypes(self, m): result = [] for con in m.constrainttypes: at1 = con.atype1 at2 = con.atype2 - fu = con.gromacs['func'] - b0 = con.gromacs['param']['b0'] + fu = con.gromacs["func"] + b0 = con.gromacs["param"]["b0"] - line = self.formats['constrainttypes'].format(at1, at2, fu, b0) + line = self.formats["constrainttypes"].format(at1, at2, fu, b0) result.append(line) return result - - def _make_angletypes(self,m): + def _make_angletypes(self, m): result = [] for ang in m.angletypes: at1 = ang.atype1 at2 = ang.atype2 at3 = ang.atype3 - ang.convert('gromacs') + ang.convert("gromacs") - ktetha = ang.gromacs['param']['ktetha'] - tetha0 = ang.gromacs['param']['tetha0'] - kub = ang.gromacs['param']['kub'] - s0 = ang.gromacs['param']['s0'] + ktetha = ang.gromacs["param"]["ktetha"] + tetha0 = ang.gromacs["param"]["tetha0"] + kub = ang.gromacs["param"]["kub"] + s0 = ang.gromacs["param"]["s0"] - fu = ang.gromacs['func'] + fu = ang.gromacs["func"] - angletypes = 'angletypes_{0:d}'.format(fu) - line = self.formats[angletypes].format(at1, at2, at3, fu, tetha0, ktetha, s0, kub) + angletypes = "angletypes_{0:d}".format(fu) + line = self.formats[angletypes].format( + at1, at2, at3, fu, tetha0, ktetha, s0, kub + ) result.append(line) return result - - def _make_dihedraltypes(self,m): + def _make_dihedraltypes(self, m): result = [] for dih in m.dihedraltypes: at1 = dih.atype1 @@ -1127,24 +1252,28 @@ def _make_dihedraltypes(self,m): at3 = dih.atype3 at4 = dih.atype4 - dih.convert('gromacs') - fu = dih.gromacs['func'] + dih.convert("gromacs") + fu = dih.gromacs["func"] - for dpar in dih.gromacs['param']: - kchi = dpar['kchi'] - n = dpar['n'] - delta= dpar['delta'] + for dpar in dih.gromacs["param"]: + kchi = dpar["kchi"] + n = dpar["n"] + delta = dpar["delta"] if not dih.disabled: - line = self.formats['dihedraltypes'].format(at1, at2, at3, at4, fu, delta, kchi, n) + line = self.formats["dihedraltypes"].format( + at1, at2, at3, at4, fu, delta, kchi, n + ) else: - line = self.formats['dihedraltypes'].format(at1, at2, at3, at4, fu, delta, kchi, n) + line = self.formats["dihedraltypes"].format( + at1, at2, at3, at4, fu, delta, kchi, n + ) line = dih.comment + line result.append(line) return result - def _make_impropertypes(self,m): + def _make_impropertypes(self, m): result = [] for imp in m.impropertypes: at1 = imp.atype1 @@ -1152,21 +1281,25 @@ def _make_impropertypes(self,m): at3 = imp.atype3 at4 = imp.atype4 - imp.convert('gromacs') - fu = imp.gromacs['func'] - - for ipar in imp.gromacs['param']: + imp.convert("gromacs") + fu = imp.gromacs["func"] - kpsi = ipar['kpsi'] - psi0 = ipar['psi0'] + for ipar in imp.gromacs["param"]: + kpsi = ipar["kpsi"] + psi0 = ipar["psi0"] if fu == 2: - line = self.formats['impropertypes_2'].format(at1, at2, at3, at4, fu, psi0, kpsi) + line = self.formats["impropertypes_2"].format( + at1, at2, at3, at4, fu, psi0, kpsi + ) if fu == 4: - n = ipar['n'] - line = self.formats['impropertypes_4'].format(at1, at2, at3, at4, fu, psi0, kpsi, n) + n = ipar["n"] + line = self.formats["impropertypes_4"].format( + at1, at2, at3, at4, fu, psi0, kpsi, n + ) - if imp.disabled: line = imp.comment + line + if imp.disabled: + line = imp.comment + line result.append(line) return result @@ -1178,174 +1311,250 @@ def _make_cmaptypes(self, m): at2 = cmap.atype2 at3 = cmap.atype3 at4 = cmap.atype4 - #at5 = cmap.atype5 - #at6 = cmap.atype6 - #at7 = cmap.atype7 + # at5 = cmap.atype5 + # at6 = cmap.atype6 + # at7 = cmap.atype7 at8 = cmap.atype8 - cmap.convert('gromacs') + cmap.convert("gromacs") - fu = cmap.gromacs['func'] - line = '{0:s} {1:s} {2:s} {3:s} {4:s} {5:d} 24 24'.format(at1, at2, at3, at4, at8, fu) - for i,c in enumerate(cmap.gromacs['param']): - if i%10 == 0: - line += '\\\n' + fu = cmap.gromacs["func"] + line = "{0:s} {1:s} {2:s} {3:s} {4:s} {5:d} 24 24".format( + at1, at2, at3, at4, at8, fu + ) + for i, c in enumerate(cmap.gromacs["param"]): + if i % 10 == 0: + line += "\\\n" else: - line += ' ' - line += '{0:12.8f}'.format(c) + line += " " + line += "{0:12.8f}".format(c) - line += '\n\n' + line += "\n\n" result.append(line) return result - def _make_moleculetype(self,m,molname,nrexcl): - return ['; Name \t\t nrexcl \n {0} {1} \n'.format(molname,nrexcl)] + def _make_moleculetype(self, m, molname, nrexcl): + return ["; Name \t\t nrexcl \n {0} {1} \n".format(molname, nrexcl)] - def _make_atoms(self,m): + def _make_atoms(self, m): result = [] - #i = 1 + # i = 1 for atom in m.atoms: numb = cgnr = atom.number atype = atom.get_atomtype() - assert atype!= False - assert hasattr(atom, 'charge') #and hasattr(atom, 'mass') - - if hasattr(atom, 'mass'): - line = self.formats['atoms'].format( - numb, atype, atom.resnumb, atom.resname, atom.name, cgnr, atom.charge, atom.mass) + assert atype != False + assert hasattr(atom, "charge") # and hasattr(atom, 'mass') + + if hasattr(atom, "mass"): + line = self.formats["atoms"].format( + numb, + atype, + atom.resnumb, + atom.resname, + atom.name, + cgnr, + atom.charge, + atom.mass, + ) else: - line = self.formats['atoms_nomass'].format( - numb, atype, atom.resnumb, atom.resname, atom.name, cgnr, atom.charge) + line = self.formats["atoms_nomass"].format( + numb, + atype, + atom.resnumb, + atom.resname, + atom.name, + cgnr, + atom.charge, + ) result.append(line) - result.insert(0,'; {0:5d} atoms\n'.format(len(result))) + result.insert(0, "; {0:5d} atoms\n".format(len(result))) return result - def _make_pairs(self,m): - + def _make_pairs(self, m): result = [] for pr in m.pairs: fu = 1 p1 = pr.atom1.number p4 = pr.atom2.number - line = self.formats['pairs'].format(p1, p4, fu) + line = self.formats["pairs"].format(p1, p4, fu) result.append(line) - result.insert(0,'; {0:5d} pairs\n'.format(len(result))) + result.insert(0, "; {0:5d} pairs\n".format(len(result))) return result - - def _make_bonds(self,m): + def _make_bonds(self, m): result = [] for bond in m.bonds: fu = bond.gromacs["func"] - if bond.gromacs["param"]["kb"] and bond.gromacs["param"]["b0"]: kb, b0 = bond.gromacs["param"]["kb"], bond.gromacs["param"]["b0"] - line = self.formats['bonds_ext'].format(bond.atom1.number, bond.atom2.number, fu, b0, kb) + line = self.formats["bonds_ext"].format( + bond.atom1.number, bond.atom2.number, fu, b0, kb + ) else: - line = self.formats['bonds'].format(bond.atom1.number, bond.atom2.number, fu) + line = self.formats["bonds"].format( + bond.atom1.number, bond.atom2.number, fu + ) result.append(line) - result.insert(0,'; {0:5d} bonds\n'.format(len(result))) + result.insert(0, "; {0:5d} bonds\n".format(len(result))) return result - def _make_angles(self,m): + def _make_angles(self, m): result = [] for ang in m.angles: fu = ang.gromacs["func"] if ang.gromacs["param"]["ktetha"] and ang.gromacs["param"]["tetha0"]: - ktetha, tetha0 = ang.gromacs["param"]["ktetha"] , ang.gromacs["param"]["tetha0"] - line = self.formats['angles_ext'].format(ang.atom1.number, ang.atom2.number, ang.atom3.number, fu, tetha0, ktetha) + ktetha, tetha0 = ( + ang.gromacs["param"]["ktetha"], + ang.gromacs["param"]["tetha0"], + ) + line = self.formats["angles_ext"].format( + ang.atom1.number, + ang.atom2.number, + ang.atom3.number, + fu, + tetha0, + ktetha, + ) else: - line = self.formats['angles'].format(ang.atom1.number, ang.atom2.number, ang.atom3.number, fu) + line = self.formats["angles"].format( + ang.atom1.number, ang.atom2.number, ang.atom3.number, fu + ) result.append(line) - result.insert(0,'; {0:5d} angles\n'.format(len(result))) + result.insert(0, "; {0:5d} angles\n".format(len(result))) return result - def _make_settles(self,m): + def _make_settles(self, m): result = [] for st in m.settles: - line = self.formats['settles'].format(st.atom.number, 1, st.dOH, st.dHH) + line = self.formats["settles"].format(st.atom.number, 1, st.dOH, st.dHH) result.append(line) - result.insert(0,'; {0:5d} settles\n'.format(len(result))) + result.insert(0, "; {0:5d} settles\n".format(len(result))) return result - - def _make_virtual_sites3(self,m): + def _make_virtual_sites3(self, m): result = [] for vs in m.virtual_sites3: fu = 1 - line = self.formats['virtual_sites3'].format(vs.atom1, vs.atom2, vs.atom3, vs.atom4, fu, vs.gromacs['param']['a'], vs.gromacs['param']['b']) + line = self.formats["virtual_sites3"].format( + vs.atom1, + vs.atom2, + vs.atom3, + vs.atom4, + fu, + vs.gromacs["param"]["a"], + vs.gromacs["param"]["b"], + ) result.append(line) - result.insert(0,'; {0:5d} virtual_sites3\n'.format(len(result))) + result.insert(0, "; {0:5d} virtual_sites3\n".format(len(result))) return result - - def _make_exclusions(self,m): + def _make_exclusions(self, m): result = [] for excl in m.exclusions: other_atoms = [" {:3d}".format(at.number) for at in excl.other_atoms] - line = self.formats['exclusions'].format(excl.main_atom.number, "".join(other_atoms)) + line = self.formats["exclusions"].format( + excl.main_atom.number, "".join(other_atoms) + ) result.append(line) - result.insert(0,'; {0:5d} exclusions\n'.format(len(result))) + result.insert(0, "; {0:5d} exclusions\n".format(len(result))) return result - def _make_dihedrals(self,m): + def _make_dihedrals(self, m): result = [] for dih in m.dihedrals: fu = dih.gromacs["func"] - if not dih.gromacs['param']: - line = self.formats['dihedrals'].format( - dih.atom1.number, dih.atom2.number, dih.atom3.number, dih.atom4.number, fu) + if not dih.gromacs["param"]: + line = self.formats["dihedrals"].format( + dih.atom1.number, + dih.atom2.number, + dih.atom3.number, + dih.atom4.number, + fu, + ) result.append(line) - for dpar in dih.gromacs['param']: - kchi = dpar['kchi'] - n = dpar['n'] - delta= dpar['delta'] - - line = self.formats['dihedrals_ext'].format(dih.atom1.number, dih.atom2.number, dih.atom3.number, dih.atom4.number, fu, delta, kchi, n) - if dih.comment: line = dih.comment + line + for dpar in dih.gromacs["param"]: + kchi = dpar["kchi"] + n = dpar["n"] + delta = dpar["delta"] + + line = self.formats["dihedrals_ext"].format( + dih.atom1.number, + dih.atom2.number, + dih.atom3.number, + dih.atom4.number, + fu, + delta, + kchi, + n, + ) + if dih.comment: + line = dih.comment + line result.append(line) - result.insert(0,'; {0:5d} dihedrals\n'.format(len(result))) + result.insert(0, "; {0:5d} dihedrals\n".format(len(result))) return result - def _make_impropers(self,m): + def _make_impropers(self, m): result = [] for imp in m.impropers: - fu = imp.gromacs['func'] - - if not imp.gromacs['param']: - line = self.formats['impropers'].format( - imp.atom1.number, imp.atom2.number, imp.atom3.number, imp.atom4.number, fu) + fu = imp.gromacs["func"] + + if not imp.gromacs["param"]: + line = self.formats["impropers"].format( + imp.atom1.number, + imp.atom2.number, + imp.atom3.number, + imp.atom4.number, + fu, + ) result.append(line) - for ipar in imp.gromacs['param']: - kpsi = ipar['kpsi'] - psi0 = ipar['psi0'] + for ipar in imp.gromacs["param"]: + kpsi = ipar["kpsi"] + psi0 = ipar["psi0"] if fu == 2: - line = self.formats['impropers_2'].format(imp.atom1.number, imp.atom2.number, imp.atom3.number, imp.atom4.number, fu, psi0, kpsi) + line = self.formats["impropers_2"].format( + imp.atom1.number, + imp.atom2.number, + imp.atom3.number, + imp.atom4.number, + fu, + psi0, + kpsi, + ) if fu == 4: - n = ipar['n'] - line = self.formats['impropers_4'].format(imp.atom1.number, imp.atom2.number, imp.atom3.number, imp.atom4.number, fu, psi0, kpsi, n) - - if imp.comment: line = imp.comment + line + n = ipar["n"] + line = self.formats["impropers_4"].format( + imp.atom1.number, + imp.atom2.number, + imp.atom3.number, + imp.atom4.number, + fu, + psi0, + kpsi, + n, + ) + + if imp.comment: + line = imp.comment + line result.append(line) - result.insert(0,'; {0:5d} impropers\n'.format(len(result))) + result.insert(0, "; {0:5d} impropers\n".format(len(result))) return result def _make_cmaps(self, m): @@ -1353,10 +1562,15 @@ def _make_cmaps(self, m): for cmap in m.cmaps: fu = 1 - line = '{0:5d} {1:5d} {2:5d} {3:5d} {4:5d} {5:d}\n'.format( - cmap.atom1.number, cmap.atom2.number, cmap.atom3.number, cmap.atom4.number, - cmap.atom8.number, fu) + line = "{0:5d} {1:5d} {2:5d} {3:5d} {4:5d} {5:d}\n".format( + cmap.atom1.number, + cmap.atom2.number, + cmap.atom3.number, + cmap.atom4.number, + cmap.atom8.number, + fu, + ) result.append(line) - result.insert(0,'; {0:5d} cmaps\n'.format(len(result))) + result.insert(0, "; {0:5d} cmaps\n".format(len(result))) return result diff --git a/gromacs/fileformats/xpm.py b/gromacs/fileformats/xpm.py index 2a0ba6fc..fd203b84 100644 --- a/gromacs/fileformats/xpm.py +++ b/gromacs/fileformats/xpm.py @@ -85,6 +85,7 @@ import logging + class XPM(utilities.FileUtils): """Class to make a Gromacs XPM matrix available as a NumPy :class:`numpy.ndarray`. @@ -100,8 +101,9 @@ class XPM(utilities.FileUtils): file) to match the order of the rows. """ + default_extension = "xpm" - logger = logging.getLogger('gromacs.formats.XPM') + logger = logging.getLogger("gromacs.formats.XPM") #: compiled regular expression to parse the colors in the xpm file:: #: #: static char *gromacs_xpm[] = { @@ -116,7 +118,8 @@ class XPM(utilities.FileUtils): #: 0x20 (space) to 0x7E (~). #: #: .. _`printable ASCII character`: http://www.danshort.com/ASCIImap/indexhex.htm - COLOUR = re.compile("""\ + COLOUR = re.compile( + """\ ^.*" # start with quotation mark (?P[\x20-\x7E])# printable ASCII symbol used in the actual pixmap: 'space' to '~' \s+ # white-space separated @@ -127,7 +130,9 @@ class XPM(utilities.FileUtils): " # start new string (?P.*) # description/value as free form string " # ... terminated by quotes - """, re.VERBOSE) + """, + re.VERBOSE, + ) def __init__(self, filename=None, **kwargs): """Initialize xpm structure. @@ -146,13 +151,14 @@ def __init__(self, filename=None, **kwargs): self.autoconvert = kwargs.pop("autoconvert", True) self.reverse = kwargs.pop("reverse", True) self.__array = None - super(XPM, self).__init__(**kwargs) # can use kwargs to set dict! (but no sanity checks!) + super(XPM, self).__init__( + **kwargs + ) # can use kwargs to set dict! (but no sanity checks!) if filename is not None: self._init_filename(filename) self.read(filename) - def to_df(self): import pandas as pd @@ -160,10 +166,10 @@ def to_df(self): data = numpy.vstack((self.xvalues, self.array.T)).T # Column names are resids - df = pd.DataFrame(data, columns=["Time"]+ list(self.yvalues)) + df = pd.DataFrame(data, columns=["Time"] + list(self.yvalues)) # Converts Time to a numeric type - df['Time'] = pd.to_numeric(df['Time']) + df["Time"] = pd.to_numeric(df["Time"]) return df @property @@ -199,7 +205,6 @@ def parse(self): # at the end of the line contains the corresponding value colors = dict([self.col(xpm.readline()) for i in range(nc)]) - if self.autoconvert: autoconverter = Autoconverter(mode="singlet") for symbol, value in colors.items(): @@ -211,10 +216,17 @@ def parse(self): self.logger.debug("Guessed array type: %s", dtype.name) # pre-allocate array - data = numpy.zeros((int(nx/nb), ny), dtype=dtype) - - self.logger.debug("dimensions: NX=%d NY=%d strideX=%d (NC=%d) --> (%d, %d)", - nx, ny, nb, nc, nx/nb, ny) + data = numpy.zeros((int(nx / nb), ny), dtype=dtype) + + self.logger.debug( + "dimensions: NX=%d NY=%d strideX=%d (NC=%d) --> (%d, %d)", + nx, + ny, + nb, + nc, + nx / nb, + ny, + ) iy = 0 xval = [] @@ -225,9 +237,9 @@ def parse(self): # lines '/* x-axis:' ... and '/* y-axis:' contain the # values of x and y coordinates s = self.uncomment(line).strip() - if s.startswith('x-axis:'): + if s.startswith("x-axis:"): xval.extend([autoconverter.convert(x) for x in s[7:].split()]) - elif s.startswith('y-axis:'): + elif s.startswith("y-axis:"): yval.extend([autoconverter.convert(y) for y in s[7:].split()]) continue s = self.unquote(line) @@ -240,9 +252,14 @@ def parse(self): # However, without a test case I am not eager to change it right away so in # case some faulty behavior is discovered with the XPM reader then this comment # might be helpful. --- Oliver 2014-10-25 - data[:, iy] = [colors[s[k:k+nb]] for k in range(0,nx,nb)] - self.logger.debug("read row %d with %d columns: '%s....%s'", - iy, data.shape[0], s[:4], s[-4:]) + data[:, iy] = [colors[s[k : k + nb]] for k in range(0, nx, nb)] + self.logger.debug( + "read row %d with %d columns: '%s....%s'", + iy, + data.shape[0], + s[:4], + s[-4:], + ) iy += 1 # for next row self.xvalues = numpy.array(xval) @@ -257,22 +274,22 @@ def parse(self): @staticmethod def unquote(s): """Return string *s* with quotes ``"`` removed.""" - return s[1+s.find('"'):s.rfind('"')] + return s[1 + s.find('"') : s.rfind('"')] @staticmethod def uncomment(s): """Return string *s* with C-style comments ``/*`` ... ``*/`` removed.""" - return s[2+s.find('/*'):s.rfind('*/')] - + return s[2 + s.find("/*") : s.rfind("*/")] def col(self, c): """Parse colour specification""" m = self.COLOUR.search(c) if not m: self.logger.fatal("Cannot parse colour specification %r.", c) - raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c)) - value = m.group('value') - color = m.group('symbol') + raise ParseError( + "XPM reader: Cannot parse colour specification {0!r}.".format(c) + ) + value = m.group("value") + color = m.group("symbol") self.logger.debug("%s: %s %s\n", c.strip(), color, value) return color, value - diff --git a/gromacs/fileformats/xvg.py b/gromacs/fileformats/xvg.py index 34355874..9b4cfefa 100644 --- a/gromacs/fileformats/xvg.py +++ b/gromacs/fileformats/xvg.py @@ -204,14 +204,16 @@ import numkit.timeseries -from gromacs.exceptions import (ParseError, MissingDataError, - MissingDataWarning, AutoCorrectionWarning) +from gromacs.exceptions import ( + ParseError, + MissingDataError, + MissingDataWarning, + AutoCorrectionWarning, +) import gromacs.utilities as utilities import gromacs.collections - - class XVG(utilities.FileUtils): """Class that represents the numerical data in a grace xvg file. @@ -261,18 +263,32 @@ class XVG(utilities.FileUtils): # logger: for pickling to work, this *must* be class-level and # cannot be done in __init__() (because we cannot pickle self.logger) - logger = logging.getLogger('gromacs.formats.XVG') + logger = logging.getLogger("gromacs.formats.XVG") #: If :attr:`XVG.savedata` is ``False`` then any attributes in #: :attr:`XVG.__pickle_excluded` are *not* pickled as they are but simply #: pickled with the default value. - __pickle_excluded = {'__array': None} # note class name un-mangling in __getstate__()! + __pickle_excluded = { + "__array": None + } # note class name un-mangling in __getstate__()! #: Default color cycle for :meth:`XVG.plot_coarsened`: #: ``['black', 'red', 'blue', 'orange', 'magenta', 'cyan', 'yellow', 'brown', 'green']`` - default_color_cycle = ['black', 'red', 'blue', 'orange', 'magenta', 'cyan', 'yellow', 'brown', 'green'] - - def __init__(self, filename=None, names=None, array=None, permissive=False, **kwargs): + default_color_cycle = [ + "black", + "red", + "blue", + "orange", + "magenta", + "cyan", + "yellow", + "brown", + "green", + ] + + def __init__( + self, filename=None, names=None, array=None, permissive=False, **kwargs + ): """Initialize the class from a xvg file. :Arguments: @@ -304,25 +320,27 @@ def __init__(self, filename=None, names=None, array=None, permissive=False, **kw dictionary of metadata, which is not touched by the class """ - self.__array = None # cache for array (BIG) (used by XVG.array) - self.__cache = {} # cache for computed results - self.savedata = kwargs.pop('savedata', False) + self.__array = None # cache for array (BIG) (used by XVG.array) + self.__cache = {} # cache for computed results + self.savedata = kwargs.pop("savedata", False) if filename is not None: - self._init_filename(filename) # note: reading data from file is delayed until required + self._init_filename( + filename + ) # note: reading data from file is delayed until required if names is None: self.names = [] else: try: - self.names = names.split(',') + self.names = names.split(",") except AttributeError: self.names = names - self.metadata = kwargs.pop('metadata', {}) # reserved for user data + self.metadata = kwargs.pop("metadata", {}) # reserved for user data self.permissive = permissive - self.stride = kwargs.pop('stride', 1) - self.corrupted_lineno = None # must parse() first before this makes sense + self.stride = kwargs.pop("stride", 1) + self.corrupted_lineno = None # must parse() first before this makes sense # default number of data points for calculating correlation times via FFT - self.ncorrel = kwargs.pop('ncorrel', 25000) - self.__correlkwargs = {} # see set_correlparameters() + self.ncorrel = kwargs.pop("ncorrel", 25000) + self.__correlkwargs = {} # see set_correlparameters() if array is not None: self.set(array) @@ -338,13 +356,17 @@ def write(self, filename=None): .. Note:: Only plain files working at the moment, not compressed. """ self._init_filename(filename) - with utilities.openany(self.real_filename, 'w') as xvg: - xvg.write("# xmgrace compatible NXY data file\n" - "# Written by gromacs.formats.XVG()\n") + with utilities.openany(self.real_filename, "w") as xvg: + xvg.write( + "# xmgrace compatible NXY data file\n" + "# Written by gromacs.formats.XVG()\n" + ) xvg.write("# :columns: {0!r}\n".format(self.names)) for xyy in self.array.T: - xyy.tofile(xvg, sep=" ", format="%-8s") # quick and dirty ascii output...--no compression! - xvg.write('\n') + xyy.tofile( + xvg, sep=" ", format="%-8s" + ) # quick and dirty ascii output...--no compression! + xvg.write("\n") @property def array(self): @@ -398,8 +420,13 @@ def _tcorrel(self, nstep=100, **kwargs): .. SeeAlso:: :func:`numkit.timeseries.tcorrel` """ - t = self.array[0,::nstep] - r = gromacs.collections.Collection([numkit.timeseries.tcorrel(t, Y, nstep=1, **kwargs) for Y in self.array[1:,::nstep]]) + t = self.array[0, ::nstep] + r = gromacs.collections.Collection( + [ + numkit.timeseries.tcorrel(t, Y, nstep=1, **kwargs) + for Y in self.array[1:, ::nstep] + ] + ) return r def set_correlparameters(self, **kwargs): @@ -423,21 +450,23 @@ def set_correlparameters(self, **kwargs): .. SeeAlso: :attr:`XVG.error` for details and references. """ - self.ncorrel = kwargs.pop('ncorrel', self.ncorrel) or 25000 - nstep = kwargs.pop('nstep', None) + self.ncorrel = kwargs.pop("ncorrel", self.ncorrel) or 25000 + nstep = kwargs.pop("nstep", None) if nstep is None: # good step size leads to ~25,000 data points - nstep = len(self.array[0])/float(self.ncorrel) + nstep = len(self.array[0]) / float(self.ncorrel) nstep = int(numpy.ceil(nstep)) # catch small data sets - kwargs['nstep'] = nstep - self.__correlkwargs.update(kwargs) # only contains legal kw for numkit.timeseries.tcorrel or force + kwargs["nstep"] = nstep + self.__correlkwargs.update( + kwargs + ) # only contains legal kw for numkit.timeseries.tcorrel or force return self.__correlkwargs def _correlprop(self, key, **kwargs): kwargs = self.set_correlparameters(**kwargs) - if not self.__cache.get('tcorrel', None) or kwargs.pop('force', False): - self.__cache['tcorrel'] = self._tcorrel(**kwargs) - return numpy.array(self.__cache['tcorrel'].get(key).tolist()) + if not self.__cache.get("tcorrel", None) or kwargs.pop("force", False): + self.__cache["tcorrel"] = self._tcorrel(**kwargs) + return numpy.array(self.__cache["tcorrel"].get(key).tolist()) @property def error(self): @@ -457,7 +486,7 @@ def error(self): .. _p526: http://books.google.co.uk/books?id=XmyO2oRUg0cC&pg=PA526 """ - return self._correlprop('sigma') + return self._correlprop("sigma") @property def tc(self): @@ -465,7 +494,7 @@ def tc(self): See :meth:`XVG.error` for details. """ - return self._correlprop('tc') + return self._correlprop("tc") def parse(self, stride=None): """Read and cache the file as a numpy array. @@ -478,49 +507,70 @@ def parse(self, stride=None): if stride is None: stride = self.stride self.corrupted_lineno = [] - irow = 0 # count rows of data + irow = 0 # count rows of data # cannot use numpy.loadtxt() because xvg can have two types of 'comment' lines with utilities.openany(self.real_filename) as xvg: rows = [] ncol = None - for lineno,line in enumerate(xvg): + for lineno, line in enumerate(xvg): line = line.strip() if len(line) == 0: continue if "label" in line and "xaxis" in line: - self.xaxis = line.split('"')[-2] + self.xaxis = line.split('"')[-2] if "label" in line and "yaxis" in line: - self.yaxis = line.split('"')[-2] + self.yaxis = line.split('"')[-2] if line.startswith("@ legend"): - if not "legend" in self.metadata: self.metadata["legend"] = [] - self.metadata["legend"].append(line.split("legend ")[-1]) + if not "legend" in self.metadata: + self.metadata["legend"] = [] + self.metadata["legend"].append(line.split("legend ")[-1]) if line.startswith("@ s") and "subtitle" not in line: - name = line.split("legend ")[-1].replace('"','').strip() - self.names.append(name) - if line.startswith(('#', '@')) : - continue - if line.startswith('&'): - raise NotImplementedError('{0!s}: Multi-data not supported, only simple NXY format.'.format(self.real_filename)) + name = line.split("legend ")[-1].replace('"', "").strip() + self.names.append(name) + if line.startswith(("#", "@")): + continue + if line.startswith("&"): + raise NotImplementedError( + "{0!s}: Multi-data not supported, only simple NXY format.".format( + self.real_filename + ) + ) # parse line as floats try: row = [float(el) for el in line.split()] except: if self.permissive: - self.logger.warning("%s: SKIPPING unparsable line %d: %r", - self.real_filename, lineno+1, line) - self.corrupted_lineno.append(lineno+1) + self.logger.warning( + "%s: SKIPPING unparsable line %d: %r", + self.real_filename, + lineno + 1, + line, + ) + self.corrupted_lineno.append(lineno + 1) continue - self.logger.error("%s: Cannot parse line %d: %r", - self.real_filename, lineno+1, line) + self.logger.error( + "%s: Cannot parse line %d: %r", + self.real_filename, + lineno + 1, + line, + ) raise # check for same number of columns as in previous step if ncol is not None and len(row) != ncol: if self.permissive: - self.logger.warning("%s: SKIPPING line %d with wrong number of columns: %r", - self.real_filename, lineno+1, line) - self.corrupted_lineno.append(lineno+1) + self.logger.warning( + "%s: SKIPPING line %d with wrong number of columns: %r", + self.real_filename, + lineno + 1, + line, + ) + self.corrupted_lineno.append(lineno + 1) continue - errmsg = "{0!s}: Wrong number of columns in line {1:d}: {2!r}".format(self.real_filename, lineno+1, line) + errmsg = ( + "{0!s}: Wrong number of columns in line {1:d}: {2!r}".format( + self.real_filename, lineno + 1, line + ) + ) self.logger.error(errmsg) raise IOError(errno.ENODATA, errmsg, self.real_filename) # finally: a good line @@ -529,17 +579,25 @@ def parse(self, stride=None): rows.append(row) irow += 1 try: - self.__array = numpy.array(rows).transpose() # cache result + self.__array = numpy.array(rows).transpose() # cache result except: - self.logger.error("%s: Failed reading XVG file, possibly data corrupted. " - "Check the last line of the file...", self.real_filename) + self.logger.error( + "%s: Failed reading XVG file, possibly data corrupted. " + "Check the last line of the file...", + self.real_filename, + ) raise finally: - del rows # try to clean up as well as possible as it can be massively big + del rows # try to clean up as well as possible as it can be massively big def to_df(self): import pandas as _pd - return _pd.DataFrame(self.array.T, columns=[self.xaxis] + (self.names if len(self.names) else [self.yaxis]) , dtype=float) + + return _pd.DataFrame( + self.array.T, + columns=[self.xaxis] + (self.names if len(self.names) else [self.yaxis]), + dtype=float, + ) def set(self, a): """Set the *array* data from *a* (i.e. completely replace). @@ -551,7 +609,11 @@ def set(self, a): def _get_colors(self, color, columns): try: cmap = matplotlib.cm.get_cmap(color) - colors = cmap(matplotlib.colors.Normalize()(numpy.arange(len(columns[1:]), dtype=float))) + colors = cmap( + matplotlib.colors.Normalize()( + numpy.arange(len(columns[1:]), dtype=float) + ) + ) except (TypeError, ValueError): colors = cycle(utilities.asiterable(color)) return colors @@ -598,11 +660,13 @@ def plot(self, **kwargs): *ax* axes instance """ - columns = kwargs.pop('columns', Ellipsis) # slice for everything - maxpoints = kwargs.pop('maxpoints', self.maxpoints_default) - transform = kwargs.pop('transform', lambda x: x) # default is identity transformation - method = kwargs.pop('method', "mean") - ax = kwargs.pop('ax', None) + columns = kwargs.pop("columns", Ellipsis) # slice for everything + maxpoints = kwargs.pop("maxpoints", self.maxpoints_default) + transform = kwargs.pop( + "transform", lambda x: x + ) # default is identity transformation + method = kwargs.pop("method", "mean") + ax = kwargs.pop("ax", None) if columns is Ellipsis or columns is None: columns = numpy.arange(self.array.shape[0]) @@ -615,27 +679,34 @@ def plot(self, **kwargs): a = numpy.ravel(self.array) X = numpy.arange(len(a)) a = numpy.vstack((X, a)) - columns = [0] + [c+1 for c in columns] + columns = [0] + [c + 1 for c in columns] else: a = self.array - colors = self._get_colors(kwargs.pop('color', self.default_color_cycle), columns) + colors = self._get_colors( + kwargs.pop("color", self.default_color_cycle), columns + ) if ax is None: ax = plt.gca() # (decimate/smooth o slice o transform)(array) - a = self.decimate(method, numpy.asarray(transform(a))[columns], maxpoints=maxpoints) + a = self.decimate( + method, numpy.asarray(transform(a))[columns], maxpoints=maxpoints + ) # now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan) ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a))) # finally plot (each column separately to catch empty sets) - for column, color in zip(range(1,len(columns)), colors): + for column, color in zip(range(1, len(columns)), colors): if len(ma[column]) == 0: - warnings.warn("No data to plot for column {column:d}".format(**vars()), category=MissingDataWarning) - kwargs['color'] = color - ax.plot(ma[0], ma[column], **kwargs) # plot all other columns in parallel + warnings.warn( + "No data to plot for column {column:d}".format(**vars()), + category=MissingDataWarning, + ) + kwargs["color"] = color + ax.plot(ma[0], ma[column], **kwargs) # plot all other columns in parallel return ax def plot_coarsened(self, **kwargs): @@ -666,24 +737,28 @@ def plot_coarsened(self, **kwargs): .. SeeAlso:: :meth:`XVG.plot`, :meth:`XVG.errorbar` and :meth:`XVG.decimate` """ - ax = kwargs.pop('ax', None) - columns = kwargs.pop('columns', Ellipsis) # slice for everything + ax = kwargs.pop("ax", None) + columns = kwargs.pop("columns", Ellipsis) # slice for everything if columns is Ellipsis or columns is None: columns = numpy.arange(self.array.shape[0]) if len(columns) < 2: - raise MissingDataError("plot_coarsened() assumes that there is at least one column " - "of data for the abscissa and one or more for the ordinate.") + raise MissingDataError( + "plot_coarsened() assumes that there is at least one column " + "of data for the abscissa and one or more for the ordinate." + ) - colors = self._get_colors(kwargs.pop('color', self.default_color_cycle), columns) + colors = self._get_colors( + kwargs.pop("color", self.default_color_cycle), columns + ) if ax is None: ax = plt.gca() t = columns[0] - kwargs['demean'] = True - kwargs['ax'] = ax + kwargs["demean"] = True + kwargs["ax"] = ax for column, color in zip(columns[1:], colors): - kwargs['color'] = color + kwargs["color"] = color self.errorbar(columns=[t, column, column], **kwargs) return ax @@ -725,32 +800,40 @@ def errorbar(self, **kwargs): :meth:`XVG.plot` lists keywords common to both methods. """ - ax = kwargs.pop('ax', None) - color = kwargs.pop('color', 'black') - filled = kwargs.pop('filled', True) - fill_alpha = kwargs.pop('fill_alpha', 0.2) - - kwargs.setdefault('capsize', 0) - kwargs.setdefault('elinewidth', 1) - kwargs.setdefault('ecolor', color) - kwargs.setdefault('alpha', 0.3) - kwargs.setdefault('fmt', None) - - columns = kwargs.pop('columns', Ellipsis) # slice for everything - maxpoints = kwargs.pop('maxpoints', self.maxpoints_default) - transform = kwargs.pop('transform', lambda x: x) # default is identity transformation - method = kwargs.pop('method', "mean") + ax = kwargs.pop("ax", None) + color = kwargs.pop("color", "black") + filled = kwargs.pop("filled", True) + fill_alpha = kwargs.pop("fill_alpha", 0.2) + + kwargs.setdefault("capsize", 0) + kwargs.setdefault("elinewidth", 1) + kwargs.setdefault("ecolor", color) + kwargs.setdefault("alpha", 0.3) + kwargs.setdefault("fmt", None) + + columns = kwargs.pop("columns", Ellipsis) # slice for everything + maxpoints = kwargs.pop("maxpoints", self.maxpoints_default) + transform = kwargs.pop( + "transform", lambda x: x + ) # default is identity transformation + method = kwargs.pop("method", "mean") if method != "mean": raise NotImplementedError("For errors only method == 'mean' is supported.") - error_method = kwargs.pop('error_method', "percentile") # can also use 'rms' and 'error' - percentile = numpy.abs(kwargs.pop('percentile', 95.)) - demean = kwargs.pop('demean', False) + error_method = kwargs.pop( + "error_method", "percentile" + ) # can also use 'rms' and 'error' + percentile = numpy.abs(kwargs.pop("percentile", 95.0)) + demean = kwargs.pop("demean", False) # order: (decimate/smooth o slice o transform)(array) try: data = numpy.asarray(transform(self.array))[columns] except IndexError: - raise MissingDataError("columns {0!r} are not suitable to index the transformed array, possibly not eneough data".format(columns)) + raise MissingDataError( + "columns {0!r} are not suitable to index the transformed array, possibly not eneough data".format( + columns + ) + ) if data.shape[-1] == 0: raise MissingDataError("There is no data to be plotted.") a = numpy.zeros((data.shape[0], maxpoints), dtype=numpy.float64) @@ -765,30 +848,46 @@ def errorbar(self, **kwargs): lower_per = percentile # demean generally does not make sense with the percentiles (but for analysing # the regularised data itself we use this as a flag --- see below!) - upper = a[2:] = self.decimate("percentile", error_data, maxpoints=maxpoints, - per=upper_per, demean=False)[1:] - lower = self.decimate("percentile", error_data, maxpoints=maxpoints, - per=lower_per, demean=False)[1:] + upper = a[2:] = self.decimate( + "percentile", + error_data, + maxpoints=maxpoints, + per=upper_per, + demean=False, + )[1:] + lower = self.decimate( + "percentile", + error_data, + maxpoints=maxpoints, + per=lower_per, + demean=False, + )[1:] else: - a[2:] = self.decimate(error_method, error_data, maxpoints=maxpoints, demean=demean)[1:] + a[2:] = self.decimate( + error_method, error_data, maxpoints=maxpoints, demean=demean + )[1:] lower = None # now deal with infs, nans etc AFTER all transformations (needed for plotting across inf/nan) ma = numpy.ma.MaskedArray(a, mask=numpy.logical_not(numpy.isfinite(a))) if lower is not None: - mlower = numpy.ma.MaskedArray(lower, mask=numpy.logical_not(numpy.isfinite(lower))) + mlower = numpy.ma.MaskedArray( + lower, mask=numpy.logical_not(numpy.isfinite(lower)) + ) # finally plot - X = ma[0] # abscissa set separately + X = ma[0] # abscissa set separately Y = ma[1] try: - kwargs['yerr'] = ma[3] - kwargs['xerr'] = ma[2] + kwargs["yerr"] = ma[3] + kwargs["xerr"] = ma[2] except IndexError: try: - kwargs['yerr'] = ma[2] + kwargs["yerr"] = ma[2] except IndexError: - raise TypeError("Either too few columns selected or data does not have a error column") + raise TypeError( + "Either too few columns selected or data does not have a error column" + ) if ax is None: ax = plt.gca() @@ -799,26 +898,26 @@ def errorbar(self, **kwargs): if demean: # signal that we are looking at percentiles of an observable and not error y1 = mlower[-1] - y2 = kwargs['yerr'] + y2 = kwargs["yerr"] else: # percentiles of real errors (>0) y1 = Y - mlower[-1] - y2 = Y + kwargs['yerr'] + y2 = Y + kwargs["yerr"] else: - y1 = Y - kwargs['yerr'] - y2 = Y + kwargs['yerr'] + y1 = Y - kwargs["yerr"] + y2 = Y + kwargs["yerr"] ax.fill_between(X, y1, y2, color=color, alpha=fill_alpha) else: if error_method == "percentile": # errorbars extend to different lengths; if demean: - kwargs['yerr'] = numpy.vstack((mlower[-1], kwargs['yerr'])) + kwargs["yerr"] = numpy.vstack((mlower[-1], kwargs["yerr"])) else: - kwargs['yerr'] = numpy.vstack((Y - mlower[-1], Y + kwargs['yerr'])) + kwargs["yerr"] = numpy.vstack((Y - mlower[-1], Y + kwargs["yerr"])) try: # xerr only makes sense when the data is a real # error so we don't even bother with demean=? - kwargs['xerr'] = numpy.vstack((X - mlower[0], X + kwargs['xerr'])) + kwargs["xerr"] = numpy.vstack((X - mlower[0], X + kwargs["xerr"])) except (KeyError, IndexError): pass ax.errorbar(X, Y, **kwargs) @@ -826,7 +925,7 @@ def errorbar(self, **kwargs): # clean up args for plot for kw in "yerr", "xerr", "capsize", "ecolor", "elinewidth", "fmt": kwargs.pop(kw, None) - kwargs['alpha'] = 1.0 + kwargs["alpha"] = 1.0 ax.plot(X, Y, color=color, **kwargs) @@ -872,21 +971,22 @@ def decimate(self, method, a, maxpoints=10000, **kwargs): with ``M' == M`` (except when ``M == 1``, see above) and ``N' <= N`` (``N'`` is *maxpoints*). """ - methods = {'mean': self.decimate_mean, - 'min': self.decimate_min, - 'max': self.decimate_max, - 'smooth': self.decimate_smooth, - 'rms': self.decimate_rms, - 'percentile': self.decimate_percentile, - 'error': self.decimate_error, # undocumented, not working well - 'circmean': self.decimate_circmean, - } + methods = { + "mean": self.decimate_mean, + "min": self.decimate_min, + "max": self.decimate_max, + "smooth": self.decimate_smooth, + "rms": self.decimate_rms, + "percentile": self.decimate_percentile, + "error": self.decimate_error, # undocumented, not working well + "circmean": self.decimate_circmean, + } if len(a.shape) == 1: # add first column as index # (probably should do this in class/init anyway...) X = numpy.arange(len(a)) a = numpy.vstack([X, a]) - ny = a.shape[-1] # assume 1D or 2D array with last dimension varying fastest + ny = a.shape[-1] # assume 1D or 2D array with last dimension varying fastest if maxpoints is None or ny <= maxpoints: return a return methods[method](a, maxpoints, **kwargs) @@ -909,7 +1009,9 @@ def decimate_mean(self, a, maxpoints, **kwargs): Assumes that the first column is time. """ - return self._decimate(numkit.timeseries.mean_histogrammed_function, a, maxpoints, **kwargs) + return self._decimate( + numkit.timeseries.mean_histogrammed_function, a, maxpoints, **kwargs + ) def decimate_circmean(self, a, maxpoints, **kwargs): """Return data *a* circmean-decimated on *maxpoints*. @@ -944,13 +1046,17 @@ def decimate_circmean(self, a, maxpoints, **kwargs): """ a_rad = numpy.vstack((a[0], numpy.deg2rad(a[1:]))) - b = self._decimate(numkit.timeseries.circmean_histogrammed_function, a_rad, maxpoints, **kwargs) + b = self._decimate( + numkit.timeseries.circmean_histogrammed_function, a_rad, maxpoints, **kwargs + ) y_ma, x_ma = break_array(b[1], threshold=numpy.pi, other=b[0]) v = [y_ma] for y in b[2:]: v.append(break_array(y, threshold=numpy.pi)[0]) if v[-1].shape != v[0].shape: - raise ValueError("y dimensions have different breaks: you MUST deal with them separately") + raise ValueError( + "y dimensions have different breaks: you MUST deal with them separately" + ) return numpy.vstack((x_ma, numpy.rad2deg(v))) def decimate_min(self, a, maxpoints, **kwargs): @@ -971,7 +1077,9 @@ def decimate_min(self, a, maxpoints, **kwargs): Assumes that the first column is time. """ - return self._decimate(numkit.timeseries.min_histogrammed_function, a, maxpoints, **kwargs) + return self._decimate( + numkit.timeseries.min_histogrammed_function, a, maxpoints, **kwargs + ) def decimate_max(self, a, maxpoints, **kwargs): """Return data *a* max-decimated on *maxpoints*. @@ -991,7 +1099,9 @@ def decimate_max(self, a, maxpoints, **kwargs): Assumes that the first column is time. """ - return self._decimate(numkit.timeseries.max_histogrammed_function, a, maxpoints, **kwargs) + return self._decimate( + numkit.timeseries.max_histogrammed_function, a, maxpoints, **kwargs + ) def decimate_rms(self, a, maxpoints, **kwargs): """Return data *a* rms-decimated on *maxpoints*. @@ -1011,7 +1121,9 @@ def decimate_rms(self, a, maxpoints, **kwargs): Assumes that the first column is time. """ - return self._decimate(numkit.timeseries.rms_histogrammed_function, a, maxpoints, **kwargs) + return self._decimate( + numkit.timeseries.rms_histogrammed_function, a, maxpoints, **kwargs + ) def decimate_percentile(self, a, maxpoints, **kwargs): """Return data *a* percentile-decimated on *maxpoints*. @@ -1038,7 +1150,9 @@ def decimate_percentile(self, a, maxpoints, **kwargs): .. SeeAlso:: :func:`numkit.timeseries.regularized_function` with :func:`scipy.stats.scoreatpercentile` """ - return self._decimate(numkit.timeseries.percentile_histogrammed_function, a, maxpoints, **kwargs) + return self._decimate( + numkit.timeseries.percentile_histogrammed_function, a, maxpoints, **kwargs + ) def decimate_error(self, a, maxpoints, **kwargs): """Return data *a* error-decimated on *maxpoints*. @@ -1063,13 +1177,16 @@ def decimate_error(self, a, maxpoints, **kwargs): datapoints to compute a good autocorrelation function. """ - warnings.warn("Using undocumented decimate_error() is highly EXPERIMENTAL", - category=gromacs.exceptions.LowAccuracyWarning) - return self._decimate(numkit.timeseries.error_histogrammed_function, a, - maxpoints, **kwargs) + warnings.warn( + "Using undocumented decimate_error() is highly EXPERIMENTAL", + category=gromacs.exceptions.LowAccuracyWarning, + ) + return self._decimate( + numkit.timeseries.error_histogrammed_function, a, maxpoints, **kwargs + ) def _decimate(self, func, a, maxpoints, **kwargs): - ny = a.shape[-1] # assume 2D array with last dimension varying fastest + ny = a.shape[-1] # assume 2D array with last dimension varying fastest out = numpy.zeros((a.shape[0], maxpoints), dtype=float) t = a[0] @@ -1077,18 +1194,21 @@ def _decimate(self, func, a, maxpoints, **kwargs): # compute regularised data for each column separately out[i], out[0] = func(t, a[i], bins=maxpoints, **kwargs) - if maxpoints == self.maxpoints_default: # only warn if user did not set maxpoints + if ( + maxpoints == self.maxpoints_default + ): # only warn if user did not set maxpoints try: - funcname = func.func_name # Python 2 + funcname = func.func_name # Python 2 except AttributeError: - funcname = func.__name__ # Python 3 - warnings.warn("Plot had %d datapoints > maxpoints = %d; decimated to %d regularly " - "spaced points from the histogrammed data with %s()." - % (ny, maxpoints, maxpoints, funcname), - category=AutoCorrectionWarning) + funcname = func.__name__ # Python 3 + warnings.warn( + "Plot had %d datapoints > maxpoints = %d; decimated to %d regularly " + "spaced points from the histogrammed data with %s()." + % (ny, maxpoints, maxpoints, funcname), + category=AutoCorrectionWarning, + ) return out - def decimate_smooth(self, a, maxpoints, window="flat"): """Return smoothed data *a* decimated on approximately *maxpoints* points. @@ -1110,7 +1230,7 @@ def decimate_smooth(self, a, maxpoints, window="flat"): TODO: - Allow treating the 1st column as data """ - ny = a.shape[-1] # assume 1D or 2D array with last dimension varying fastest + ny = a.shape[-1] # assume 1D or 2D array with last dimension varying fastest # reduce size by averaging oover stepsize steps and then just # picking every stepsize data points. (primitive --- can # leave out bits at the end or end up with almost twice of @@ -1121,16 +1241,20 @@ def decimate_smooth(self, a, maxpoints, window="flat"): out = numpy.empty_like(a) # smoothed - out[0,:] = a[0] + out[0, :] = a[0] for i in range(1, a.shape[0]): # process columns because smooth() only handles 1D arrays :-p - out[i,:] = numkit.timeseries.smooth(a[i], stepsize, window=window) - - if maxpoints == self.maxpoints_default: # only warn if user did not set maxpoints - warnings.warn("Plot had %d datapoints > maxpoints = %d; decimated to %d regularly " - "spaced points with smoothing (%r) over %d steps." - % (ny, maxpoints, ny/stepsize, window, stepsize), - category=AutoCorrectionWarning) + out[i, :] = numkit.timeseries.smooth(a[i], stepsize, window=window) + + if ( + maxpoints == self.maxpoints_default + ): # only warn if user did not set maxpoints + warnings.warn( + "Plot had %d datapoints > maxpoints = %d; decimated to %d regularly " + "spaced points with smoothing (%r) over %d steps." + % (ny, maxpoints, ny / stepsize, window, stepsize), + category=AutoCorrectionWarning, + ) return out[..., ::stepsize] def __getstate__(self): @@ -1144,12 +1268,14 @@ def __getstate__(self): d = self.__dict__ else: # do not pickle the big array cache - mangleprefix = '_'+self.__class__.__name__ + mangleprefix = "_" + self.__class__.__name__ + def demangle(k): """_XVG__array --> __array""" if k.startswith(mangleprefix): - k = k.replace(mangleprefix,'') + k = k.replace(mangleprefix, "") return k + d = {} for k in self.__dict__: d[k] = self.__pickle_excluded.get(demangle(k), self.__dict__[k]) @@ -1157,11 +1283,11 @@ def demangle(k): def __setstate__(self, d): # compatibility with older (pre 0.1.13) pickled instances - if not 'savedata' in d: + if not "savedata" in d: wmsg = "Reading pre 0.1.13 pickle file: setting savedata=False" warnings.warn(wmsg, category=DeprecationWarning) self.logger.warning(wmsg) - d['savedata'] = False # new default + d["savedata"] = False # new default self.__dict__.update(d) @@ -1189,18 +1315,18 @@ def break_array(a, threshold=numpy.pi, other=None): breaks += 1 # is this needed?? -- no, but leave it here as a reminder - #f2 = numpy.diff(a, 2) - #up = (f2[breaks - 1] >= 0) # >0: up, <0: down + # f2 = numpy.diff(a, 2) + # up = (f2[breaks - 1] >= 0) # >0: up, <0: down # sort into up and down breaks: - #breaks_up = breaks[up] - #breaks_down = breaks[~up] + # breaks_up = breaks[up] + # breaks_down = breaks[~up] # new array b including insertions for all the breaks m = len(breaks) b = numpy.empty((len(a) + m)) # calculate new indices for breaks in b, taking previous insertions into account b_breaks = breaks + numpy.arange(m) - mask = numpy.zeros_like(b, dtype=bool) + mask = numpy.zeros_like(b, dtype=bool) mask[b_breaks] = True b[~mask] = a b[mask] = numpy.NAN @@ -1214,6 +1340,3 @@ def break_array(a, threshold=numpy.pi, other=None): ma_c = None return numpy.ma.array(b, mask=mask), ma_c - - - diff --git a/gromacs/formats.py b/gromacs/formats.py index 431ee22e..d534bb3c 100644 --- a/gromacs/formats.py +++ b/gromacs/formats.py @@ -52,5 +52,3 @@ __all__ = ["XVG", "MDP", "NDX", "uniqueNDX", "XPM", "TOP"] from .fileformats import XVG, MDP, NDX, uniqueNDX, XPM, TOP - - diff --git a/gromacs/log.py b/gromacs/log.py index 5c30f959..0337aa9f 100644 --- a/gromacs/log.py +++ b/gromacs/log.py @@ -64,16 +64,17 @@ import logging -def create(logger_name, logfile='gromacs.log'): + +def create(logger_name, logfile="gromacs.log"): """Create a top level logger. - The file logger logs everything (including DEBUG). - The console logger only logs INFO and above. Logging to a file and the console. - + See http://docs.python.org/library/logging.html?#logging-to-multiple-destinations - + The top level logger of the library is named 'gromacs'. Note that we are configuring this logger with console output. If the root logger also does this then we will get two output lines to the @@ -86,7 +87,9 @@ def create(logger_name, logfile='gromacs.log'): logger.setLevel(logging.DEBUG) logfile = logging.FileHandler(logfile) - logfile_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s') + logfile_formatter = logging.Formatter( + "%(asctime)s %(name)-12s %(levelname)-8s %(message)s" + ) logfile.setFormatter(logfile_formatter) logger.addHandler(logfile) @@ -94,15 +97,16 @@ def create(logger_name, logfile='gromacs.log'): console = logging.StreamHandler() console.setLevel(logging.INFO) # set a format which is simpler for console use - formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') + formatter = logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s") console.setFormatter(formatter) logger.addHandler(console) return logger + def clear_handlers(logger): - """clean out handlers in the library top level logger + """clean out handlers in the library top level logger (only important for reload/debug cycles...) """ @@ -118,7 +122,6 @@ class NullHandler(logging.Handler): logging.getLogger("gromacs").addHandler(h) del h """ + def emit(self, record): pass - - diff --git a/gromacs/qsub.py b/gromacs/qsub.py index 9a5a9b0b..8f722b98 100644 --- a/gromacs/qsub.py +++ b/gromacs/qsub.py @@ -205,13 +205,16 @@ from .exceptions import AutoCorrectionWarning import logging -logger = logging.getLogger('gromacs.qsub') + +logger = logging.getLogger("gromacs.qsub") class QueuingSystem(object): """Class that represents minimum information about a batch submission system.""" - def __init__(self, name, suffix, qsub_prefix, array_variable=None, array_option=None): + def __init__( + self, name, suffix, qsub_prefix, array_variable=None, array_option=None + ): """Define a queuing system's functionality :Arguments: @@ -238,7 +241,7 @@ def __init__(self, name, suffix, qsub_prefix, array_variable=None, array_option= def flag(self, *args): """Return string for qsub flag *args* prefixed with appropriate inscript prefix.""" - return " ".join((self.qsub_prefix,)+args) + return " ".join((self.qsub_prefix,) + args) def has_arrays(self): """True if known how to do job arrays.""" @@ -246,7 +249,7 @@ def has_arrays(self): def array_flag(self, directories): """Return string to embed the array launching option in the script.""" - return self.flag(self.array_option % (1,len(directories))) + return self.flag(self.array_option % (1, len(directories))) def array(self, directories): """Return multiline string for simple array jobs over *directories*. @@ -255,46 +258,61 @@ def array(self, directories): be ``bash`` (and *not* ``csh`` or ``sh``). """ if not self.has_arrays(): - raise NotImplementedError('Not known how make array jobs for ' - 'queuing system %(name)s' % vars(self)) - hrule = '#'+60*'-' + raise NotImplementedError( + "Not known how make array jobs for " + "queuing system %(name)s" % vars(self) + ) + hrule = "#" + 60 * "-" lines = [ - '', + "", hrule, - '# job array:', + "# job array:", self.array_flag(directories), hrule, - '# directories for job tasks', - 'declare -a jobdirs'] - for i,dirname in enumerate(asiterable(directories)): - idx = i+1 # job array indices are 1-based - lines.append('jobdirs[{idx:d}]={dirname!r}'.format(**vars())) - lines.extend([ - '# Switch to the current tasks directory:', + "# directories for job tasks", + "declare -a jobdirs", + ] + for i, dirname in enumerate(asiterable(directories)): + idx = i + 1 # job array indices are 1-based + lines.append("jobdirs[{idx:d}]={dirname!r}".format(**vars())) + lines.extend( + [ + "# Switch to the current tasks directory:", 'wdir="${{jobdirs[${{{array_variable!s}}}]}}"'.format(**vars(self)), 'cd "$wdir" || { echo "ERROR: failed to enter $wdir."; exit 1; }', hrule, - '' - ]) + "", + ] + ) return "\n".join(lines) def isMine(self, scriptname): """Primitive queuing system detection; only looks at suffix at the moment.""" suffix = os.path.splitext(scriptname)[1].lower() - if suffix.startswith('.'): + if suffix.startswith("."): suffix = suffix[1:] return self.suffix == suffix def __repr__(self): - return "<"+self.name+" QueuingSystem instance>" + return "<" + self.name + " QueuingSystem instance>" + #: Pre-defined queuing systems (SGE, PBS). Add your own here. queuing_systems = [ - QueuingSystem('Sun Gridengine', 'sge', '#$', array_variable='SGE_TASK_ID', array_option='-t %d-%d'), - QueuingSystem('PBS', 'pbs', '#PBS', array_variable='PBS_ARRAY_INDEX', array_option='-J %d-%d'), - QueuingSystem('LoadLeveler', 'll', '#@'), # no idea how to do arrays in LL - QueuingSystem('Slurm', 'slu', '#SBATCH'), # will add array settings - ] + QueuingSystem( + "Sun Gridengine", + "sge", + "#$", + array_variable="SGE_TASK_ID", + array_option="-t %d-%d", + ), + QueuingSystem( + "PBS", "pbs", "#PBS", array_variable="PBS_ARRAY_INDEX", array_option="-J %d-%d" + ), + QueuingSystem("LoadLeveler", "ll", "#@"), # no idea how to do arrays in LL + QueuingSystem("Slurm", "slu", "#SBATCH"), # will add array settings +] + def detect_queuing_system(scriptfile): """Return the queuing system for which *scriptfile* was written.""" @@ -303,9 +321,20 @@ def detect_queuing_system(scriptfile): return qs return None -def generate_submit_scripts(templates, prefix=None, deffnm='md', jobname='MD', budget=None, - mdrun_opts=None, walltime=1.0, jobarray_string=None, startdir=None, - npme=None, **kwargs): + +def generate_submit_scripts( + templates, + prefix=None, + deffnm="md", + jobname="MD", + budget=None, + mdrun_opts=None, + walltime=1.0, + jobarray_string=None, + startdir=None, + npme=None, + **kwargs +): """Write scripts for queuing systems. @@ -347,16 +376,20 @@ def generate_submit_scripts(templates, prefix=None, deffnm='md', jobname='MD', b :Returns: list of generated run scripts """ if not jobname[0].isalpha(): - jobname = 'MD_'+jobname - wmsg = "To make the jobname legal it must start with a letter: changed to {0!r}".format(jobname) + jobname = "MD_" + jobname + wmsg = "To make the jobname legal it must start with a letter: changed to {0!r}".format( + jobname + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) if prefix is None: prefix = "" if mdrun_opts is not None: - mdrun_opts = '"'+str(mdrun_opts)+'"' # TODO: could test if quotes already present + mdrun_opts = ( + '"' + str(mdrun_opts) + '"' + ) # TODO: could test if quotes already present - dirname = kwargs.pop('dirname', os.path.curdir) + dirname = kwargs.pop("dirname", os.path.curdir) wt = Timedelta(hours=walltime) walltime = wt.strftime("%h:%M:%S") @@ -364,38 +397,66 @@ def generate_submit_scripts(templates, prefix=None, deffnm='md', jobname='MD', b def write_script(template): submitscript = os.path.join(dirname, prefix + os.path.basename(template)) - logger.info("Setting up queuing system script {submitscript!r}...".format(**vars())) + logger.info( + "Setting up queuing system script {submitscript!r}...".format(**vars()) + ) # These substitution rules are documented for the user in the module doc string qsystem = detect_queuing_system(template) - if qsystem is not None and (qsystem.name == 'Slurm'): - cbook.edit_txt(template, - [('^ *DEFFNM=','(?<==)(.*)', deffnm), - ('^#.*(-J)', '((?<=-J\s))\s*\w+', jobname), - ('^#.*(-A|account_no)', '((?<=-A\s)|(?<=account_no\s))\s*\w+', budget), - ('^#.*(-t)', '(?<=-t\s)(\d+:\d+:\d+)', walltime), - ('^ *WALL_HOURS=', '(?<==)(.*)', wall_hours), - ('^ *STARTDIR=', '(?<==)(.*)', startdir), - ('^ *NPME=', '(?<==)(.*)', npme), - ('^ *MDRUN_OPTS=', '(?<==)("")', mdrun_opts), # only replace literal "" - ('^# JOB_ARRAY_PLACEHOLDER', '^.*$', jobarray_string), - ], - newname=submitscript) + if qsystem is not None and (qsystem.name == "Slurm"): + cbook.edit_txt( + template, + [ + ("^ *DEFFNM=", "(?<==)(.*)", deffnm), + ("^#.*(-J)", "((?<=-J\s))\s*\w+", jobname), + ( + "^#.*(-A|account_no)", + "((?<=-A\s)|(?<=account_no\s))\s*\w+", + budget, + ), + ("^#.*(-t)", "(?<=-t\s)(\d+:\d+:\d+)", walltime), + ("^ *WALL_HOURS=", "(?<==)(.*)", wall_hours), + ("^ *STARTDIR=", "(?<==)(.*)", startdir), + ("^ *NPME=", "(?<==)(.*)", npme), + ( + "^ *MDRUN_OPTS=", + '(?<==)("")', + mdrun_opts, + ), # only replace literal "" + ("^# JOB_ARRAY_PLACEHOLDER", "^.*$", jobarray_string), + ], + newname=submitscript, + ) ext = os.path.splitext(submitscript)[1] else: - cbook.edit_txt(template, - [('^ *DEFFNM=','(?<==)(.*)', deffnm), - ('^#.*(-N|job_name)', '((?<=-N\s)|(?<=job_name\s))\s*\w+', jobname), - ('^#.*(-A|account_no)', '((?<=-A\s)|(?<=account_no\s))\s*\w+', budget), - ('^#.*(-l walltime|wall_clock_limit)', '(?<==)(\d+:\d+:\d+)', walltime), - ('^ *WALL_HOURS=', '(?<==)(.*)', wall_hours), - ('^ *STARTDIR=', '(?<==)(.*)', startdir), - ('^ *NPME=', '(?<==)(.*)', npme), - ('^ *MDRUN_OPTS=', '(?<==)("")', mdrun_opts), # only replace literal "" - ('^# JOB_ARRAY_PLACEHOLDER', '^.*$', jobarray_string), - ], - newname=submitscript) + cbook.edit_txt( + template, + [ + ("^ *DEFFNM=", "(?<==)(.*)", deffnm), + ("^#.*(-N|job_name)", "((?<=-N\s)|(?<=job_name\s))\s*\w+", jobname), + ( + "^#.*(-A|account_no)", + "((?<=-A\s)|(?<=account_no\s))\s*\w+", + budget, + ), + ( + "^#.*(-l walltime|wall_clock_limit)", + "(?<==)(\d+:\d+:\d+)", + walltime, + ), + ("^ *WALL_HOURS=", "(?<==)(.*)", wall_hours), + ("^ *STARTDIR=", "(?<==)(.*)", startdir), + ("^ *NPME=", "(?<==)(.*)", npme), + ( + "^ *MDRUN_OPTS=", + '(?<==)("")', + mdrun_opts, + ), # only replace literal "" + ("^# JOB_ARRAY_PLACEHOLDER", "^.*$", jobarray_string), + ], + newname=submitscript, + ) ext = os.path.splitext(submitscript)[1] - if ext in ('.sh', '.csh', '.bash'): + if ext in (".sh", ".csh", ".bash"): os.chmod(submitscript, 0o755) return submitscript @@ -433,21 +494,34 @@ def generate_submit_array(templates, directories, **kwargs): *kwargs* See :func:`gromacs.setup.generate_submit_script` for details. """ - dirname = kwargs.setdefault('dirname', os.path.curdir) + dirname = kwargs.setdefault("dirname", os.path.curdir) reldirs = [relpath(p, start=dirname) for p in asiterable(directories)] - missing = [p for p in (os.path.join(dirname, subdir) for subdir in reldirs) - if not os.path.exists(p)] + missing = [ + p + for p in (os.path.join(dirname, subdir) for subdir in reldirs) + if not os.path.exists(p) + ] if len(missing) > 0: - logger.debug("template=%(template)r: dirname=%(dirname)r reldirs=%(reldirs)r", vars()) - logger.error("Some directories are not accessible from the array script: " - "%(missing)r", vars()) + logger.debug( + "template=%(template)r: dirname=%(dirname)r reldirs=%(reldirs)r", vars() + ) + logger.error( + "Some directories are not accessible from the array script: " "%(missing)r", + vars(), + ) + def write_script(template): qsystem = detect_queuing_system(template) if qsystem is None or not qsystem.has_arrays(): - logger.warning("Not known how to make a job array for %(template)r; skipping...", vars()) + logger.warning( + "Not known how to make a job array for %(template)r; skipping...", + vars(), + ) return None - kwargs['jobarray_string'] = qsystem.array(reldirs) - return generate_submit_scripts(template, **kwargs)[0] # returns list of length 1 + kwargs["jobarray_string"] = qsystem.array(reldirs) + return generate_submit_scripts(template, **kwargs)[ + 0 + ] # returns list of length 1 # must use config.get_templates() because we need to access the file for detecting return [write_script(template) for template in config.get_templates(templates)] diff --git a/gromacs/run.py b/gromacs/run.py index 8ff970d5..701141c1 100644 --- a/gromacs/run.py +++ b/gromacs/run.py @@ -97,6 +97,7 @@ class MDrunnerMPI(gromacs.run.MDrunner): """ from __future__ import absolute_import, with_statement + __docformat__ = "restructuredtext en" import warnings @@ -106,15 +107,17 @@ class MDrunnerMPI(gromacs.run.MDrunner): # logging import logging -logger = logging.getLogger('gromacs.run') + +logger = logging.getLogger("gromacs.run") # gromacs modules import gromacs -from . exceptions import GromacsError, AutoCorrectionWarning +from .exceptions import GromacsError, AutoCorrectionWarning from . import core from . import utilities + def find_gromacs_command(commands): """Return *driver* and *name* of the first command that can be found on :envvar:`PATH`""" @@ -132,7 +135,9 @@ def find_gromacs_command(commands): if utilities.which(executable): break else: - raise OSError(errno.ENOENT, "No Gromacs executable found in", ", ".join(commands)) + raise OSError( + errno.ENOENT, "No Gromacs executable found in", ", ".join(commands) + ) return driver, name @@ -204,24 +209,29 @@ def __init__(self, dirname=os.path.curdir, **kwargs): self.driver, self.name = find_gromacs_command(self.mdrun) # use a GromacsCommand class for handling arguments - cls = type('MDRUN', (core.GromacsCommand,), - {'command_name': self.name, - 'driver': self.driver, - '__doc__': "MDRUN command {0} {1}".format(self.driver, self.name) - }) - - kwargs['failure'] = 'raise' # failure mode of class + cls = type( + "MDRUN", + (core.GromacsCommand,), + { + "command_name": self.name, + "driver": self.driver, + "__doc__": "MDRUN command {0} {1}".format(self.driver, self.name), + }, + ) + + kwargs["failure"] = "raise" # failure mode of class self.MDRUN = cls(**kwargs) # might fail for mpi binaries? .. -h? # analyze command line to deduce logfile name - logname = kwargs.get('g') # explicit - if logname in (True, None): # implicit - logname = 'md' # mdrun default - deffnm = kwargs.get('deffnm') + logname = kwargs.get("g") # explicit + if logname in (True, None): # implicit + logname = "md" # mdrun default + deffnm = kwargs.get("deffnm") if deffnm is not None: logname = deffnm self.logname = os.path.realpath( - os.path.join(self.dirname, self.filename(logname, ext='log'))) + os.path.join(self.dirname, self.filename(logname, ext="log")) + ) def commandline(self, **mpiargs): """Returns simple command line to invoke mdrun. @@ -250,10 +260,12 @@ def mpicommand(self, *args, **kwargs): complicated cases.) """ if self.mpiexec is None: - raise NotImplementedError("Override mpiexec to enable the simple OpenMP launcher") + raise NotImplementedError( + "Override mpiexec to enable the simple OpenMP launcher" + ) # example implementation - ncores = kwargs.pop('ncores', 8) - return [self.mpiexec, '-n', str(ncores)] + ncores = kwargs.pop("ncores", 8) + return [self.mpiexec, "-n", str(ncores)] def prehook(self, **kwargs): """Called directly before launching the process.""" @@ -286,22 +298,24 @@ def run(self, pre=None, post=None, mdrunargs=None, **mpiargs): try: self.MDRUN.gmxargs.update(mdrunargs) except (ValueError, TypeError): - msg = "mdrunargs must be a dict of mdrun options, not {0}".format(mdrunargs) + msg = "mdrunargs must be a dict of mdrun options, not {0}".format( + mdrunargs + ) logger.error(msg) raise cmd = self.commandline(**mpiargs) with utilities.in_dir(self.dirname, create=False): - try: - self.prehook(**pre) - logger.info(" ".join(cmd)) - rc = subprocess.call(cmd) - except: - logger.exception("Failed MD run for unknown reasons.") - raise - finally: - self.posthook(**post) + try: + self.prehook(**pre) + logger.info(" ".join(cmd)) + rc = subprocess.call(cmd) + except: + logger.exception("Failed MD run for unknown reasons.") + raise + finally: + self.posthook(**post) if rc == 0: logger.info("MDrun completed ok, returncode = {0:d}".format(rc)) else: @@ -323,7 +337,7 @@ def run_check(self, **kwargs): - ``True`` if run completed successfully - ``False`` otherwise """ - rc = None # set to something in case we ever want to look at it later (and bomb in the try block) + rc = None # set to something in case we ever want to look at it later (and bomb in the try block) try: rc = self.run(**kwargs) except: @@ -342,37 +356,43 @@ def check_success(self): """ return check_mdrun_success(self.logname) + class MDrunnerDoublePrecision(MDrunner): """Manage running :program:`mdrun_d`.""" + mdrun = ("mdrun_d", "gmx_d mdrun") + class MDrunnerOpenMP(MDrunner): """Manage running :program:`mdrun` as an OpenMP_ multiprocessor job. .. _OpenMP: http://openmp.org/wp/ """ + mdrun = ("mdrun_openmp", "gmx_openmp mdrun") mpiexec = "mpiexec" + class MDrunnerMpich2Smpd(MDrunner): """Manage running :program:`mdrun` as mpich2_ multiprocessor job with the SMPD mechanism. .. _mpich2: http://www.mcs.anl.gov/research/projects/mpich2/ """ + mdrun = "mdrun_mpich2" mpiexec = "mpiexec" def prehook(self, **kwargs): """Launch local smpd.""" - cmd = ['smpd', '-s'] - logger.info("Starting smpd: "+" ".join(cmd)) + cmd = ["smpd", "-s"] + logger.info("Starting smpd: " + " ".join(cmd)) rc = subprocess.call(cmd) return rc def posthook(self, **kwargs): """Shut down smpd""" - cmd = ['smpd', '-shutdown'] - logger.info("Shutting down smpd: "+" ".join(cmd)) + cmd = ["smpd", "-shutdown"] + logger.info("Shutting down smpd: " + " ".join(cmd)) rc = subprocess.call(cmd) return rc @@ -393,10 +413,10 @@ def check_mdrun_success(logfile): """ if not os.path.exists(logfile): return None - with open(logfile, 'rb') as log: + with open(logfile, "rb") as log: log.seek(-1024, 2) for line in log: - line = line.decode('ASCII') + line = line.decode("ASCII") if line.startswith("Finished mdrun on"): return True return False @@ -417,8 +437,10 @@ def get_double_or_single_prec_mdrun(): return gromacs.mdrun_d except (AttributeError, GromacsError, OSError): # fall back to mdrun if no double precision binary - wmsg = "No 'mdrun_d' binary found so trying 'mdrun' instead.\n"\ + wmsg = ( + "No 'mdrun_d' binary found so trying 'mdrun' instead.\n" "(Note that energy minimization runs better with mdrun_d.)" + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) return gromacs.mdrun diff --git a/gromacs/scaling.py b/gromacs/scaling.py index 96f1305e..a4802221 100644 --- a/gromacs/scaling.py +++ b/gromacs/scaling.py @@ -33,234 +33,294 @@ logger = logging.getLogger("gromacs.scaling") + def scale_dihedrals(mol, dihedrals, scale, banned_lines=None): - """Scale dihedral angles""" - - if banned_lines is None: - banned_lines = [] - new_dihedrals = [] - for dh in mol.dihedrals: - atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype() - atypes = [a.replace("_", "").replace("=","") for a in atypes] - - # special-case: this is a [ dihedral ] override in molecule block, continue and don't match - if dh.gromacs['param'] != []: - for p in dh.gromacs['param']: - p['kch'] *= scale - new_dihedrals.append(dh) - continue - - for iswitch in range(32): - if (iswitch%2==0 ): - a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3] - else: - a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0] - - if((iswitch//2)%2==1): a1="X"; - if((iswitch//4)%2==1): a2="X"; - if((iswitch//8)%2==1): a3="X"; - if((iswitch//16)%2==1): a4="X"; - key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func']) - if (key in dihedrals): - for i, dt in enumerate(dihedrals[key]): - dhA = copy.deepcopy(dh) - param = copy.deepcopy(dt.gromacs['param']) - # Only check the first dihedral in a list - if not dihedrals[key][0].line in banned_lines: - for p in param: p['kchi'] *= scale - dhA.gromacs['param'] = param - #if key == "CT3-C-NH1-CT1-9": print i, dt, key - if i == 0: - dhA.comment = "; banned lines {0} found={1}\n".format(" ".join( - map(str, banned_lines)), 1 if dt.line in banned_lines else 0) - dhA.comment += "; parameters for types {}-{}-{}-{}-9 at LINE({})\n".format( - dhA.atom1.atomtype, dhA.atom2.atomtype, dhA.atom3.atomtype, - dhA.atom4.atomtype, dt.line).replace("_","") - name = "{}-{}-{}-{}-9".format(dhA.atom1.atomtype, dhA.atom2.atomtype, - dhA.atom3.atomtype, dhA.atom4.atomtype).replace("_","") - #if name == "CL-CTL2-CTL2-HAL2-9": print dihedrals[key], key - new_dihedrals.append(dhA) - break - - - mol.dihedrals = new_dihedrals - #assert(len(mol.dihedrals) == new_dihedrals) - return mol + """Scale dihedral angles""" + + if banned_lines is None: + banned_lines = [] + new_dihedrals = [] + for dh in mol.dihedrals: + atypes = ( + dh.atom1.get_atomtype(), + dh.atom2.get_atomtype(), + dh.atom3.get_atomtype(), + dh.atom4.get_atomtype(), + ) + atypes = [a.replace("_", "").replace("=", "") for a in atypes] + + # special-case: this is a [ dihedral ] override in molecule block, continue and don't match + if dh.gromacs["param"] != []: + for p in dh.gromacs["param"]: + p["kch"] *= scale + new_dihedrals.append(dh) + continue + + for iswitch in range(32): + if iswitch % 2 == 0: + a1 = atypes[0] + a2 = atypes[1] + a3 = atypes[2] + a4 = atypes[3] + else: + a1 = atypes[3] + a2 = atypes[2] + a3 = atypes[1] + a4 = atypes[0] + + if (iswitch // 2) % 2 == 1: + a1 = "X" + if (iswitch // 4) % 2 == 1: + a2 = "X" + if (iswitch // 8) % 2 == 1: + a3 = "X" + if (iswitch // 16) % 2 == 1: + a4 = "X" + key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs["func"]) + if key in dihedrals: + for i, dt in enumerate(dihedrals[key]): + dhA = copy.deepcopy(dh) + param = copy.deepcopy(dt.gromacs["param"]) + # Only check the first dihedral in a list + if not dihedrals[key][0].line in banned_lines: + for p in param: + p["kchi"] *= scale + dhA.gromacs["param"] = param + # if key == "CT3-C-NH1-CT1-9": print i, dt, key + if i == 0: + dhA.comment = "; banned lines {0} found={1}\n".format( + " ".join(map(str, banned_lines)), + 1 if dt.line in banned_lines else 0, + ) + dhA.comment += ( + "; parameters for types {}-{}-{}-{}-9 at LINE({})\n".format( + dhA.atom1.atomtype, + dhA.atom2.atomtype, + dhA.atom3.atomtype, + dhA.atom4.atomtype, + dt.line, + ).replace("_", "") + ) + name = "{}-{}-{}-{}-9".format( + dhA.atom1.atomtype, + dhA.atom2.atomtype, + dhA.atom3.atomtype, + dhA.atom4.atomtype, + ).replace("_", "") + # if name == "CL-CTL2-CTL2-HAL2-9": print dihedrals[key], key + new_dihedrals.append(dhA) + break + + mol.dihedrals = new_dihedrals + # assert(len(mol.dihedrals) == new_dihedrals) + return mol + def scale_impropers(mol, impropers, scale, banned_lines=None): - """Scale improper dihedrals""" - if banned_lines is None: - banned_lines = [] - new_impropers = [] - for im in mol.impropers: - atypes = (im.atom1.get_atomtype(), im.atom2.get_atomtype(), - im.atom3.get_atomtype(), im.atom4.get_atomtype()) - atypes = [a.replace("_", "").replace("=", "") for a in atypes] - - # special-case: this is a [ dihedral ] override in molecule block, continue and don't match - if im.gromacs['param'] != []: - for p in im.gromacs['param']: - p['kpsi'] *= scale - new_impropers.append(im) - continue - - for iswitch in range(32): - if (iswitch%2==0): - a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]; - else: - a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]; - if((iswitch//2)%2==1): a1="X"; - if((iswitch//4)%2==1): a2="X"; - if((iswitch//8)%2==1): a3="X"; - if((iswitch//16)%2==1): a4="X"; - key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func']) - if (key in impropers): - for i, imt in enumerate(impropers[key]): - imA = copy.deepcopy(im) - param = copy.deepcopy(imt.gromacs['param']) - # Only check the first dihedral in a list - if not impropers[key][0].line in banned_lines: - for p in param: p['kpsi'] *= scale - imA.gromacs['param'] = param - if i == 0: - imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format( - " ".join(map(str, banned_lines)), - 1 if imt.line in banned_lines else 0, - imt.atype1, imt.atype2, imt.atype3, imt.atype4, imt.line) - new_impropers.append(imA) - break - #assert(len(mol.impropers) == new_impropers) - mol.impropers = new_impropers - return mol - - -def partial_tempering(topfile="processed.top", outfile="scaled.top", banned_lines='', - scale_lipids=1.0, scale_protein=1.0): - """Set up topology for partial tempering (REST2) replica exchange. - - - .. versionchanged:: 0.7.0 - Use keyword arguments instead of an `args` Namespace object. - """ - - banned_lines = map(int, banned_lines.split()) - top = TOP(topfile) - groups = [("_", float(scale_protein)), ("=", float(scale_lipids))] - - # - # CMAPTYPES - # - cmaptypes = [] - for ct in top.cmaptypes: - cmaptypes.append(ct) - for gr, scale in groups: - ctA = copy.deepcopy(ct) - ctA.atype1 += gr - ctA.atype2 += gr - ctA.atype3 += gr - ctA.atype4 += gr - ctA.atype8 += gr - ctA.gromacs['param'] = [ v*scale for v in ct.gromacs['param'] ] - cmaptypes.append(ctA) - logger.debug("cmaptypes was {0}, is {1}".format(len(top.cmaptypes), len(cmaptypes))) - top.cmaptypes = cmaptypes - - - # - # ATOMTYPES - # - atomtypes = [] - for at in top.atomtypes: - atomtypes.append(at) - for gr, scale in groups: - atA = copy.deepcopy(at) - atA.atnum = atA.atype - atA.atype += gr - atA.gromacs['param']['lje'] *= scale - atomtypes.append(atA) - top.atomtypes = atomtypes - - # - # PAIRTYPES - # - pairtypes = [] - for pt in top.pairtypes: - pairtypes.append(pt) - for gr, scale in groups: - ptA = copy.deepcopy(pt) - ptA.atype1 += gr - ptA.atype2 += gr - ptA.gromacs['param']['lje14'] *= scale - - pairtypes.append(ptA) - top.pairtypes = pairtypes - - # - # BONDTYPES - # - bondtypes = [] - for bt in top.bondtypes: - bondtypes.append(bt) - for gr, scale in groups: - btA = copy.deepcopy(bt) - btA.atype1 += gr - btA.atype2 += gr - bondtypes.append(btA) - top.bondtypes = bondtypes - - - # - # ANGLETYPES - # - angletypes = [] - for at in top.angletypes: - angletypes.append(at) - for gr, scale in groups: - atA = copy.deepcopy(at) - atA.atype1 += gr - atA.atype2 += gr - atA.atype3 += gr - angletypes.append(atA) - top.angletypes = angletypes - - # - # Build dihedral dictionary - # - dihedraltypes = {} - for dt in top.dihedraltypes: - dt.disabled = True - dt.comment = "; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) ".format( - dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line) - dt.comment = dt.comment.replace("_","") - - #if "X-CTL2-CTL2-X-9" in dt.comment: print dt - name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func']) - if not name in dihedraltypes: - dihedraltypes[name] = [] - dihedraltypes[name].append(dt) - logger.debug("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes))) - - # - # Build improper dictionary - # - impropertypes = {} - for it in top.impropertypes: - it.disabled = True - it.comment = "; LINE({0:d}) ".format(it.line) - name = "{0}-{1}-{2}-{3}-{4}".format( - it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func']) - if not name in impropertypes: - impropertypes[name] = [] - impropertypes[name].append(it) - logger.debug("Build impropertypes dictionary with {0} entries".format(len(impropertypes))) - - for molname_mol in top.dict_molname_mol: - if not 'Protein' in molname_mol: - continue - mol = top.dict_molname_mol[molname_mol] - for at in mol.atoms: - at.charge *= math.sqrt(scale_protein) - mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines) - mol = scale_impropers(mol, impropertypes, 1.0, banned_lines) - - top.write(outfile) + """Scale improper dihedrals""" + if banned_lines is None: + banned_lines = [] + new_impropers = [] + for im in mol.impropers: + atypes = ( + im.atom1.get_atomtype(), + im.atom2.get_atomtype(), + im.atom3.get_atomtype(), + im.atom4.get_atomtype(), + ) + atypes = [a.replace("_", "").replace("=", "") for a in atypes] + + # special-case: this is a [ dihedral ] override in molecule block, continue and don't match + if im.gromacs["param"] != []: + for p in im.gromacs["param"]: + p["kpsi"] *= scale + new_impropers.append(im) + continue + + for iswitch in range(32): + if iswitch % 2 == 0: + a1 = atypes[0] + a2 = atypes[1] + a3 = atypes[2] + a4 = atypes[3] + else: + a1 = atypes[3] + a2 = atypes[2] + a3 = atypes[1] + a4 = atypes[0] + if (iswitch // 2) % 2 == 1: + a1 = "X" + if (iswitch // 4) % 2 == 1: + a2 = "X" + if (iswitch // 8) % 2 == 1: + a3 = "X" + if (iswitch // 16) % 2 == 1: + a4 = "X" + key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs["func"]) + if key in impropers: + for i, imt in enumerate(impropers[key]): + imA = copy.deepcopy(im) + param = copy.deepcopy(imt.gromacs["param"]) + # Only check the first dihedral in a list + if not impropers[key][0].line in banned_lines: + for p in param: + p["kpsi"] *= scale + imA.gromacs["param"] = param + if i == 0: + imA.comment = "; banned lines {0} found={1}\n ; parameters for types {2}-{3}-{4}-{5}-9 at LINE({6})\n".format( + " ".join(map(str, banned_lines)), + 1 if imt.line in banned_lines else 0, + imt.atype1, + imt.atype2, + imt.atype3, + imt.atype4, + imt.line, + ) + new_impropers.append(imA) + break + # assert(len(mol.impropers) == new_impropers) + mol.impropers = new_impropers + return mol + + +def partial_tempering( + topfile="processed.top", + outfile="scaled.top", + banned_lines="", + scale_lipids=1.0, + scale_protein=1.0, +): + """Set up topology for partial tempering (REST2) replica exchange. + + + .. versionchanged:: 0.7.0 + Use keyword arguments instead of an `args` Namespace object. + """ + + banned_lines = map(int, banned_lines.split()) + top = TOP(topfile) + groups = [("_", float(scale_protein)), ("=", float(scale_lipids))] + + # + # CMAPTYPES + # + cmaptypes = [] + for ct in top.cmaptypes: + cmaptypes.append(ct) + for gr, scale in groups: + ctA = copy.deepcopy(ct) + ctA.atype1 += gr + ctA.atype2 += gr + ctA.atype3 += gr + ctA.atype4 += gr + ctA.atype8 += gr + ctA.gromacs["param"] = [v * scale for v in ct.gromacs["param"]] + cmaptypes.append(ctA) + logger.debug("cmaptypes was {0}, is {1}".format(len(top.cmaptypes), len(cmaptypes))) + top.cmaptypes = cmaptypes + + # + # ATOMTYPES + # + atomtypes = [] + for at in top.atomtypes: + atomtypes.append(at) + for gr, scale in groups: + atA = copy.deepcopy(at) + atA.atnum = atA.atype + atA.atype += gr + atA.gromacs["param"]["lje"] *= scale + atomtypes.append(atA) + top.atomtypes = atomtypes + + # + # PAIRTYPES + # + pairtypes = [] + for pt in top.pairtypes: + pairtypes.append(pt) + for gr, scale in groups: + ptA = copy.deepcopy(pt) + ptA.atype1 += gr + ptA.atype2 += gr + ptA.gromacs["param"]["lje14"] *= scale + + pairtypes.append(ptA) + top.pairtypes = pairtypes + + # + # BONDTYPES + # + bondtypes = [] + for bt in top.bondtypes: + bondtypes.append(bt) + for gr, scale in groups: + btA = copy.deepcopy(bt) + btA.atype1 += gr + btA.atype2 += gr + bondtypes.append(btA) + top.bondtypes = bondtypes + + # + # ANGLETYPES + # + angletypes = [] + for at in top.angletypes: + angletypes.append(at) + for gr, scale in groups: + atA = copy.deepcopy(at) + atA.atype1 += gr + atA.atype2 += gr + atA.atype3 += gr + angletypes.append(atA) + top.angletypes = angletypes + + # + # Build dihedral dictionary + # + dihedraltypes = {} + for dt in top.dihedraltypes: + dt.disabled = True + dt.comment = "; type={0!s}-{1!s}-{2!s}-{3!s}-9\n; LINE({4:d}) ".format( + dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.line + ) + dt.comment = dt.comment.replace("_", "") + + # if "X-CTL2-CTL2-X-9" in dt.comment: print dt + name = "{0}-{1}-{2}-{3}-{4}".format( + dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs["func"] + ) + if not name in dihedraltypes: + dihedraltypes[name] = [] + dihedraltypes[name].append(dt) + logger.debug( + "Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes)) + ) + + # + # Build improper dictionary + # + impropertypes = {} + for it in top.impropertypes: + it.disabled = True + it.comment = "; LINE({0:d}) ".format(it.line) + name = "{0}-{1}-{2}-{3}-{4}".format( + it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs["func"] + ) + if not name in impropertypes: + impropertypes[name] = [] + impropertypes[name].append(it) + logger.debug( + "Build impropertypes dictionary with {0} entries".format(len(impropertypes)) + ) + + for molname_mol in top.dict_molname_mol: + if not "Protein" in molname_mol: + continue + mol = top.dict_molname_mol[molname_mol] + for at in mol.atoms: + at.charge *= math.sqrt(scale_protein) + mol = scale_dihedrals(mol, dihedraltypes, scale_protein, banned_lines) + mol = scale_impropers(mol, impropertypes, 1.0, banned_lines) + + top.write(outfile) diff --git a/gromacs/setup.py b/gromacs/setup.py index 6efed2a2..2beecbc1 100644 --- a/gromacs/setup.py +++ b/gromacs/setup.py @@ -126,14 +126,21 @@ import warnings import logging -logger = logging.getLogger('gromacs.setup') + +logger = logging.getLogger("gromacs.setup") import gromacs from . import config -from .exceptions import (GromacsError, GromacsFailureWarning, GromacsValueWarning, - AutoCorrectionWarning, BadParameterWarning, UsageWarning, - MissingDataError) +from .exceptions import ( + GromacsError, + GromacsFailureWarning, + GromacsValueWarning, + AutoCorrectionWarning, + BadParameterWarning, + UsageWarning, + MissingDataError, +) from . import run from . import cbook from . import qsub @@ -160,19 +167,26 @@ """ -trj_compact_main = gromacs.tools.Trjconv(ur='compact', center=True, boxcenter='tric', pbc='mol', - input=('__main__', 'system')) +trj_compact_main = gromacs.tools.Trjconv( + ur="compact", center=True, boxcenter="tric", pbc="mol", input=("__main__", "system") +) # trj_compact_main.__doc__ += "Returns a compact representation of the system centered on the __main__ group" # TODO: # - should be part of a class so that we can store the topology etc !!! # and also store mainselection -def topology(struct=None, protein='protein', - top='system.top', dirname='top', - posres="posres.itp", - ff="oplsaa", water="tip4p", - **pdb2gmx_args): + +def topology( + struct=None, + protein="protein", + top="system.top", + dirname="top", + posres="posres.itp", + ff="oplsaa", + water="tip4p", + **pdb2gmx_args +): """Build Gromacs topology files from pdb. :Keywords: @@ -202,23 +216,37 @@ def topology(struct=None, protein='protein', structure = realpath(struct) - new_struct = protein + '.pdb' + new_struct = protein + ".pdb" if posres is None: - posres = protein + '_posres.itp' - - pdb2gmx_args.update({'f': structure, 'o': new_struct, 'p': top, 'i': posres, - 'ff': ff, 'water': water}) + posres = protein + "_posres.itp" + + pdb2gmx_args.update( + { + "f": structure, + "o": new_struct, + "p": top, + "i": posres, + "ff": ff, + "water": water, + } + ) with in_dir(dirname): - logger.info("[{dirname!s}] Building topology {top!r} from struct = {struct!r}".format(**vars())) + logger.info( + "[{dirname!s}] Building topology {top!r} from struct = {struct!r}".format( + **vars() + ) + ) # perhaps parse output from pdb2gmx 4.5.x to get the names of the chain itp files? gromacs.pdb2gmx(**pdb2gmx_args) - return { \ - 'top': realpath(dirname, top), \ - 'struct': realpath(dirname, new_struct), \ - 'posres' : realpath(dirname, posres) } + return { + "top": realpath(dirname, top), + "struct": realpath(dirname, new_struct), + "posres": realpath(dirname, posres), + } -def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): + +def make_main_index(struct, selection='"Protein"', ndx="main.ndx", oldndx=None): """Make index file with the special groups. This routine adds the group __main__ and the group __environment__ @@ -258,8 +286,9 @@ def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): # pass 1: select # get a list of groups # need the first "" to get make_ndx to spit out the group list. - _,out,_ = gromacs.make_ndx(f=struct, n=oldndx, o=ndx, stdout=False, - input=("", "q")) + _, out, _ = gromacs.make_ndx( + f=struct, n=oldndx, o=ndx, stdout=False, input=("", "q") + ) groups = cbook.parse_ndxlist(out) # find the matching groups, @@ -267,15 +296,17 @@ def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): # groups, which caused the previous approach to fail big time. # this is a work around the make_ndx bug. # striping the "" allows compatibility with existing make_ndx selection commands. - selection = selection.strip("\"") + selection = selection.strip('"') - selected_groups = [g for g in groups if g['name'].lower() == selection.lower()] + selected_groups = [g for g in groups if g["name"].lower() == selection.lower()] if len(selected_groups) > 1: logging.warn("make_ndx created duplicated groups, performing work around") if len(selected_groups) <= 0: - msg = "no groups found for selection {0}, available groups are {1}".format(selection, groups) + msg = "no groups found for selection {0}, available groups are {1}".format( + selection, groups + ) logging.error(msg) raise ValueError(msg) @@ -283,27 +314,33 @@ def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): # index of last group last = len(groups) - 1 - assert last == groups[-1]['nr'] + assert last == groups[-1]["nr"] group = selected_groups[0] # pass 2: # 1) last group is __main__ # 2) __environment__ is everything else (eg SOL, ions, ...) - _,out,_ = gromacs.make_ndx(f=struct, n=ndx, o=ndx, - stdout=False, - # make copy selected group, this now has index last + 1 - input=("{0}".format(group['nr']), - # rename this to __main__ - "name {0} __main__".format(last+1), - # make a complement to this group, it get index last + 2 - "! \"__main__\"", - # rename this to __environment__ - "name {0} __environment__".format(last+2), - # list the groups - "", - # quit - "q")) + _, out, _ = gromacs.make_ndx( + f=struct, + n=ndx, + o=ndx, + stdout=False, + # make copy selected group, this now has index last + 1 + input=( + "{0}".format(group["nr"]), + # rename this to __main__ + "name {0} __main__".format(last + 1), + # make a complement to this group, it get index last + 2 + '! "__main__"', + # rename this to __environment__ + "name {0} __environment__".format(last + 2), + # list the groups + "", + # quit + "q", + ), + ) return cbook.parse_ndxlist(out) @@ -312,7 +349,8 @@ def make_main_index(struct, selection='"Protein"', ndx='main.ndx', oldndx=None): vdw_lipid_resnames = ["POPC", "POPE", "POPG", "DOPC", "DPPC", "DLPC", "DMPC", "DPPG"] #: Increased atom radii for lipid atoms; these are simply the standard values from #: ``GMXLIB/vdwradii.dat`` increased by 0.1 nm (C) or 0.05 nm (N, O, H). -vdw_lipid_atom_radii = {'C': 0.25, 'N': 0.16, 'O': 0.155, 'H': 0.09} +vdw_lipid_atom_radii = {"C": 0.25, "N": 0.16, "O": 0.155, "H": 0.09} + def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None): """Find vdwradii.dat and add special entries for lipids. @@ -323,75 +361,100 @@ def get_lipid_vdwradii(outdir=os.path.curdir, libdir=None): vdwradii_dat = os.path.join(outdir, "vdwradii.dat") if libdir is not None: - filename = os.path.join(libdir, 'vdwradii.dat') # canonical name + filename = os.path.join(libdir, "vdwradii.dat") # canonical name if not os.path.exists(filename): - msg = 'No VDW database file found in {filename!r}.'.format(**vars()) + msg = "No VDW database file found in {filename!r}.".format(**vars()) logger.exception(msg) raise OSError(msg, errno.ENOENT) else: try: - filename = os.path.join(os.environ['GMXLIB'], 'vdwradii.dat') + filename = os.path.join(os.environ["GMXLIB"], "vdwradii.dat") except KeyError: try: - filename = os.path.join(os.environ['GMXDATA'], 'top', 'vdwradii.dat') + filename = os.path.join(os.environ["GMXDATA"], "top", "vdwradii.dat") except KeyError: msg = "Cannot find vdwradii.dat. Set GMXLIB (point to 'top') or GMXDATA ('share/gromacs')." logger.exception(msg) raise OSError(msg, errno.ENOENT) if not os.path.exists(filename): - msg = "Cannot find {filename!r}; something is wrong with the Gromacs installation.".format(**vars()) + msg = "Cannot find {filename!r}; something is wrong with the Gromacs installation.".format( + **vars() + ) logger.exception(msg, errno.ENOENT) raise OSError(msg) # make sure to catch 3 and 4 letter resnames patterns = vdw_lipid_resnames + list({x[:3] for x in vdw_lipid_resnames}) # TODO: should do a tempfile... - with open(vdwradii_dat, 'w') as outfile: + with open(vdwradii_dat, "w") as outfile: # write lipid stuff before general - outfile.write('; Special larger vdw radii for solvating lipid membranes\n') + outfile.write("; Special larger vdw radii for solvating lipid membranes\n") for resname in patterns: - for atom,radius in vdw_lipid_atom_radii.items(): - outfile.write('{resname:4!s} {atom:<5!s} {radius:5.3f}\n'.format(**vars())) - with open(filename, 'r') as infile: + for atom, radius in vdw_lipid_atom_radii.items(): + outfile.write( + "{resname:4!s} {atom:<5!s} {radius:5.3f}\n".format(**vars()) + ) + with open(filename, "r") as infile: for line in infile: outfile.write(line) - logger.debug('Created lipid vdW radii file {vdwradii_dat!r}.'.format(**vars())) + logger.debug("Created lipid vdW radii file {vdwradii_dat!r}.".format(**vars())) return realpath(vdwradii_dat) -def solvate_sol(struct='top/protein.pdb', top='top/system.top', - distance=0.9, boxtype='dodecahedron', - water='tip4p', solvent_name='SOL', with_membrane=False, - dirname='solvate', - **kwargs): + +def solvate_sol( + struct="top/protein.pdb", + top="top/system.top", + distance=0.9, + boxtype="dodecahedron", + water="tip4p", + solvent_name="SOL", + with_membrane=False, + dirname="solvate", + **kwargs +): structure = realpath(struct) topology = realpath(top) # arguments for editconf that we honour - editconf_keywords = ["box", "bt", "angles", "c", "center", "aligncenter", - "align", "translate", "rotate", "princ"] - editconf_kwargs = dict((k,kwargs.pop(k,None)) for k in editconf_keywords) + editconf_keywords = [ + "box", + "bt", + "angles", + "c", + "center", + "aligncenter", + "align", + "translate", + "rotate", + "princ", + ] + editconf_kwargs = dict((k, kwargs.pop(k, None)) for k in editconf_keywords) editconf_boxtypes = ["triclinic", "cubic", "dodecahedron", "octahedron", None] # needed for topology scrubbing - scrubber_kwargs = {'marker': kwargs.pop('marker',None)} + scrubber_kwargs = {"marker": kwargs.pop("marker", None)} # sanity checks and argument dependencies - bt = editconf_kwargs.pop('bt') - boxtype = bt if bt else boxtype # bt takes precedence over boxtype + bt = editconf_kwargs.pop("bt") + boxtype = bt if bt else boxtype # bt takes precedence over boxtype if not boxtype in editconf_boxtypes: - msg = "Unsupported boxtype {boxtype!r}: Only {boxtypes!r} are possible.".format(**vars()) + msg = "Unsupported boxtype {boxtype!r}: Only {boxtypes!r} are possible.".format( + **vars() + ) logger.error(msg) raise ValueError(msg) - if editconf_kwargs['box']: - distance = None # if box is set then user knows what she is doing... - - if water.lower() in ('spc', 'spce'): - water = 'spc216' - elif water.lower() == 'tip3p': - water = 'spc216' - logger.warning("TIP3P water model selected: using SPC equilibrated box " - "for initial solvation because it is a reasonable starting point " - "for any 3-point model. EQUILIBRATE THOROUGHLY!") + if editconf_kwargs["box"]: + distance = None # if box is set then user knows what she is doing... + + if water.lower() in ("spc", "spce"): + water = "spc216" + elif water.lower() == "tip3p": + water = "spc216" + logger.warning( + "TIP3P water model selected: using SPC equilibrated box " + "for initial solvation because it is a reasonable starting point " + "for any 3-point model. EQUILIBRATE THOROUGHLY!" + ) # clean topology (if user added the marker; the default marker is # ; Gromacs auto-generated entries follow: @@ -402,74 +465,110 @@ def solvate_sol(struct='top/protein.pdb', top='top/system.top', if boxtype is None: hasBox = False ext = os.path.splitext(structure)[1] - if ext == '.gro': + if ext == ".gro": hasBox = True - elif ext == '.pdb': + elif ext == ".pdb": with open(structure) as struct: for line in struct: - if line.startswith('CRYST'): + if line.startswith("CRYST"): hasBox = True break if not hasBox: - msg = "No box data in the input structure {structure!r} and boxtype is set to None".format(**vars()) + msg = "No box data in the input structure {structure!r} and boxtype is set to None".format( + **vars() + ) logger.exception(msg) raise MissingDataError(msg) - distance = boxtype = None # ensures that editconf just converts - editconf_kwargs.update({'f': structure, 'o': 'boxed.gro', - 'bt': boxtype, 'd': distance}) + distance = boxtype = None # ensures that editconf just converts + editconf_kwargs.update( + {"f": structure, "o": "boxed.gro", "bt": boxtype, "d": distance} + ) gromacs.editconf(**editconf_kwargs) if with_membrane: vdwradii_dat = get_lipid_vdwradii() # need to clean up afterwards - logger.info("Using special vdW radii for lipids {0!r}".format(vdw_lipid_resnames)) + logger.info( + "Using special vdW radii for lipids {0!r}".format(vdw_lipid_resnames) + ) try: - gromacs.genbox(p=topology, cp='boxed.gro', cs=water, o='solvated.gro') + gromacs.genbox(p=topology, cp="boxed.gro", cs=water, o="solvated.gro") except: if with_membrane: # remove so that it's not picked up accidentally utilities.unlink_f(vdwradii_dat) raise logger.info("Solvated system with %s", water) - return {'struct': realpath(dirname, 'solvated.gro'),} - -def solvate_ion(struct='solvated.gro', top='top/system.top', - concentration=0, cation='NA', anion='CL', - solvent_name='SOL', ndx='main.ndx', - mainselection='"Protein"', dirname='solvate', - **kwargs): + return { + "struct": realpath(dirname, "solvated.gro"), + } + + +def solvate_ion( + struct="solvated.gro", + top="top/system.top", + concentration=0, + cation="NA", + anion="CL", + solvent_name="SOL", + ndx="main.ndx", + mainselection='"Protein"', + dirname="solvate", + **kwargs +): structure = realpath(struct) topology = realpath(top) # By default, grompp should not choke on a few warnings because at # this stage the user cannot do much about it (can be set to any # value but is kept undocumented...) - grompp_maxwarn = kwargs.pop('maxwarn',10) + grompp_maxwarn = kwargs.pop("maxwarn", 10) # handle additional include directories (kwargs are also modified!) mdp_kwargs = cbook.add_mdp_includes(topology, kwargs) with in_dir(dirname): - with open('none.mdp','w') as mdp: - mdp.write('; empty mdp file\ninclude = {include!s}\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n'.format(**mdp_kwargs)) - qtotgmx = cbook.grompp_qtot(f='none.mdp', o='topol.tpr', c=structure, - p=topology, stdout=False, maxwarn=grompp_maxwarn) + with open("none.mdp", "w") as mdp: + mdp.write( + "; empty mdp file\ninclude = {include!s}\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n".format( + **mdp_kwargs + ) + ) + qtotgmx = cbook.grompp_qtot( + f="none.mdp", + o="topol.tpr", + c=structure, + p=topology, + stdout=False, + maxwarn=grompp_maxwarn, + ) qtot = round(qtotgmx) - logger.info("[{dirname!s}] After solvation: total charge qtot = {qtotgmx!r} = {qtot!r}".format(**vars())) + logger.info( + "[{dirname!s}] After solvation: total charge qtot = {qtotgmx!r} = {qtot!r}".format( + **vars() + ) + ) if concentration != 0: - logger.info("[{dirname!s}] Adding ions for c = {concentration:f} M...".format(**vars())) + logger.info( + "[{dirname!s}] Adding ions for c = {concentration:f} M...".format( + **vars() + ) + ) # target concentration of free ions c ==> # N = N_water * c/c_water # add ions for concentration to the counter ions (counter ions are less free) # # get number of waters (count OW ... works for SPC*, TIP*P water models) - rc,output,junk = gromacs.make_ndx(f='topol.tpr', o='ow.ndx', - input=('keep 0', 'del 0', 'a OW*', 'name 0 OW', '', 'q'), - stdout=False) + rc, output, junk = gromacs.make_ndx( + f="topol.tpr", + o="ow.ndx", + input=("keep 0", "del 0", "a OW*", "name 0 OW", "", "q"), + stdout=False, + ) groups = cbook.parse_ndxlist(output) - gdict = {g['name']: g for g in groups} # overkill... - N_water = gdict['OW']['natoms'] # ... but dict lookup is nice - N_ions = int(N_water * concentration/CONC_WATER) # number of monovalents + gdict = {g["name"]: g for g in groups} # overkill... + N_water = gdict["OW"]["natoms"] # ... but dict lookup is nice + N_ions = int(N_water * concentration / CONC_WATER) # number of monovalents else: N_ions = 0 @@ -481,64 +580,96 @@ def solvate_ion(struct='solvated.gro', top='top/system.top', n_cation = int(abs(qtot)) n_cation += N_ions - n_anion += N_ions + n_anion += N_ions if n_cation != 0 or n_anion != 0: # sanity check: assert qtot + n_cation - n_anion < 1e-6 - logger.info("[{dirname!s}] Adding n_cation = {n_cation:d} and n_anion = {n_anion:d} ions...".format(**vars())) - gromacs.genion(s='topol.tpr', o='ionized.gro', p=topology, - pname=cation, nname=anion, np=n_cation, nn=n_anion, - input=solvent_name) + logger.info( + "[{dirname!s}] Adding n_cation = {n_cation:d} and n_anion = {n_anion:d} ions...".format( + **vars() + ) + ) + gromacs.genion( + s="topol.tpr", + o="ionized.gro", + p=topology, + pname=cation, + nname=anion, + np=n_cation, + nn=n_anion, + input=solvent_name, + ) else: # fake ionized file ... makes it easier to continue without too much fuzz try: - os.unlink('ionized.gro') + os.unlink("ionized.gro") except OSError as err: if err.errno != errno.ENOENT: raise - os.symlink('solvated.gro', 'ionized.gro') + os.symlink("solvated.gro", "ionized.gro") - qtot = cbook.grompp_qtot(f='none.mdp', o='ionized.tpr', c='ionized.gro', - p=topology, stdout=False, maxwarn=grompp_maxwarn) + qtot = cbook.grompp_qtot( + f="none.mdp", + o="ionized.tpr", + c="ionized.gro", + p=topology, + stdout=False, + maxwarn=grompp_maxwarn, + ) if abs(qtot) > 1e-4: - wmsg = "System has non-zero total charge qtot = {qtot:g} e.".format(**vars()) + wmsg = "System has non-zero total charge qtot = {qtot:g} e.".format( + **vars() + ) warnings.warn(wmsg, category=BadParameterWarning) logger.warning(wmsg) # make main index try: - make_main_index('ionized.tpr', selection=mainselection, ndx=ndx) + make_main_index("ionized.tpr", selection=mainselection, ndx=ndx) except GromacsError as err: # or should I rather fail here? - wmsg = "Failed to make main index file %r ... maybe set mainselection='...'.\n"\ - "The error message was:\n%s\n" % (ndx, str(err)) + wmsg = ( + "Failed to make main index file %r ... maybe set mainselection='...'.\n" + "The error message was:\n%s\n" % (ndx, str(err)) + ) logger.warning(wmsg) warnings.warn(wmsg, category=GromacsFailureWarning) try: - trj_compact_main(f='ionized.gro', s='ionized.tpr', o='compact.pdb', n=ndx) + trj_compact_main(f="ionized.gro", s="ionized.tpr", o="compact.pdb", n=ndx) except GromacsError as err: - wmsg = "Failed to make compact pdb for visualization... pressing on regardless. "\ - "The error message was:\n%s\n" % str(err) + wmsg = ( + "Failed to make compact pdb for visualization... pressing on regardless. " + "The error message was:\n%s\n" % str(err) + ) logger.warning(wmsg) warnings.warn(wmsg, category=GromacsFailureWarning) - return {'qtot': qtot, - 'struct': realpath(dirname, 'ionized.gro'), - 'ndx': realpath(dirname, ndx), # not sure why this is propagated-is it used? - 'mainselection': mainselection, - } - - - -def solvate(struct='top/protein.pdb', top='top/system.top', - distance=0.9, boxtype='dodecahedron', - concentration=0, cation='NA', anion='CL', - water='tip4p', solvent_name='SOL', with_membrane=False, - ndx = 'main.ndx', mainselection = '"Protein"', - dirname='solvate', - **kwargs): + return { + "qtot": qtot, + "struct": realpath(dirname, "ionized.gro"), + "ndx": realpath(dirname, ndx), # not sure why this is propagated-is it used? + "mainselection": mainselection, + } + + +def solvate( + struct="top/protein.pdb", + top="top/system.top", + distance=0.9, + boxtype="dodecahedron", + concentration=0, + cation="NA", + anion="CL", + water="tip4p", + solvent_name="SOL", + with_membrane=False, + ndx="main.ndx", + mainselection='"Protein"', + dirname="solvate", + **kwargs +): """Put protein into box, add water, add counter-ions. Currently this really only supports solutes in water. If you need @@ -613,33 +744,55 @@ def solvate(struct='top/protein.pdb', top='top/system.top', changed in the mdp file. """ - sol = solvate_sol(struct=struct, top=top, - distance=distance, boxtype=boxtype, - water=water, solvent_name=solvent_name, - with_membrane=with_membrane, - dirname=dirname, **kwargs) - - ion = solvate_ion(struct=sol['struct'], top=top, - concentration=concentration, cation=cation, anion=anion, - solvent_name=solvent_name, ndx=ndx, - mainselection=mainselection, dirname=dirname, - **kwargs) + sol = solvate_sol( + struct=struct, + top=top, + distance=distance, + boxtype=boxtype, + water=water, + solvent_name=solvent_name, + with_membrane=with_membrane, + dirname=dirname, + **kwargs + ) + + ion = solvate_ion( + struct=sol["struct"], + top=top, + concentration=concentration, + cation=cation, + anion=anion, + solvent_name=solvent_name, + ndx=ndx, + mainselection=mainselection, + dirname=dirname, + **kwargs + ) return ion def check_mdpargs(d): """Check if any arguments remain in dict *d*.""" if len(d) > 0: - wmsg = "Unprocessed mdp option are interpreted as options for grompp:\n"+str(d) + wmsg = "Unprocessed mdp option are interpreted as options for grompp:\n" + str( + d + ) logger.warning(wmsg) warnings.warn(wmsg, category=UsageWarning) return len(d) == 0 -def energy_minimize(dirname='em', mdp=config.templates['em.mdp'], - struct='solvate/ionized.gro', top='top/system.top', - output='em.pdb', deffnm="em", - mdrunner=None, mdrun_args=None, - **kwargs): + +def energy_minimize( + dirname="em", + mdp=config.templates["em.mdp"], + struct="solvate/ionized.gro", + top="top/system.top", + output="em.pdb", + deffnm="em", + mdrunner=None, + mdrun_args=None, + **kwargs +): """Energy minimize the system. This sets up the system (creates run input files) and also runs @@ -691,25 +844,29 @@ def energy_minimize(dirname='em', mdp=config.templates['em.mdp'], mdrun_args = {} if mdrun_args is None else mdrun_args # write the processed topology to the default output - kwargs.setdefault('pp', 'processed.top') + kwargs.setdefault("pp", "processed.top") # filter some kwargs that might come through when feeding output # from previous stages such as solvate(); necessary because *all* # **kwargs must be *either* substitutions in the mdp file *or* valid # command line parameters for ``grompp``. - kwargs.pop('ndx', None) + kwargs.pop("ndx", None) # mainselection is not used but only passed through; right now we # set it to the default that is being used in all argument lists # but that is not pretty. TODO. - mainselection = kwargs.pop('mainselection', '"Protein"') + mainselection = kwargs.pop("mainselection", '"Protein"') # only interesting when passed from solvate() - qtot = kwargs.pop('qtot', 0) + qtot = kwargs.pop("qtot", 0) # mdp is now the *output* MDP that will be generated from mdp_template - mdp = deffnm+'.mdp' - tpr = deffnm+'.tpr' + mdp = deffnm + ".mdp" + tpr = deffnm + ".tpr" - logger.info("[{dirname!s}] Energy minimization of struct={struct!r}, top={top!r}, mdp={mdp!r} ...".format(**vars())) + logger.info( + "[{dirname!s}] Energy minimization of struct={struct!r}, top={top!r}, mdp={mdp!r} ...".format( + **vars() + ) + ) cbook.add_mdp_includes(topology, kwargs) @@ -717,14 +874,18 @@ def energy_minimize(dirname='em', mdp=config.templates['em.mdp'], # At the moment this is purely user-reported and really only here because # it might get fed into the function when using the keyword-expansion pipeline # usage paradigm. - wmsg = "Total charge was reported as qtot = {qtot:g} <> 0; probably a problem.".format(**vars()) + wmsg = "Total charge was reported as qtot = {qtot:g} <> 0; probably a problem.".format( + **vars() + ) logger.warning(wmsg) warnings.warn(wmsg, category=BadParameterWarning) with in_dir(dirname): unprocessed = cbook.edit_mdp(mdp_template, new_mdp=mdp, **kwargs) check_mdpargs(unprocessed) - gromacs.grompp(f=mdp, o=tpr, c=structure, r=structure, p=topology, **unprocessed) + gromacs.grompp( + f=mdp, o=tpr, c=structure, r=structure, p=topology, **unprocessed + ) mdrun_args.update(v=True, stepout=10, deffnm=deffnm, c=output) if mdrunner is None: mdrun = run.get_double_or_single_prec_mdrun() @@ -741,8 +902,12 @@ def energy_minimize(dirname='em', mdp=config.templates['em.mdp'], try: mdrunner.run(mdrunargs=mdrun_args) except AttributeError: - logger.error("mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method") - raise TypeError("mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method") + logger.error( + "mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method" + ) + raise TypeError( + "mdrunner: Provide a gromacs.run.MDrunner class or instance or a callback with a run() method" + ) # em.gro --> gives 'Bad box in file em.gro' warning --- why?? # --> use em.pdb instead. @@ -752,11 +917,15 @@ def energy_minimize(dirname='em', mdp=config.templates['em.mdp'], raise GromacsError(errmsg) final_struct = realpath(output) - logger.info("[{dirname!s}] energy minimized structure {final_struct!r}".format(**vars())) - return {'struct': final_struct, - 'top': topology, - 'mainselection': mainselection, - } + logger.info( + "[{dirname!s}] energy minimized structure {final_struct!r}".format(**vars()) + ) + return { + "struct": final_struct, + "top": topology, + "mainselection": mainselection, + } + def em_schedule(**kwargs): """Run multiple energy minimizations one after each other. @@ -797,173 +966,232 @@ def em_schedule(**kwargs): per-minimizer basis. """ - mdrunner = kwargs.pop('mdrunner', None) - integrators = kwargs.pop('integrators', ['l-bfgs', 'steep']) - kwargs.pop('integrator', None) # clean input; we set intgerator from integrators - nsteps = kwargs.pop('nsteps', [100, 1000]) + mdrunner = kwargs.pop("mdrunner", None) + integrators = kwargs.pop("integrators", ["l-bfgs", "steep"]) + kwargs.pop("integrator", None) # clean input; we set intgerator from integrators + nsteps = kwargs.pop("nsteps", [100, 1000]) - outputs = ['em{0:03d}_{1!s}.pdb'.format(i, integrator) for i,integrator in enumerate(integrators)] - outputs[-1] = kwargs.pop('output', 'em.pdb') + outputs = [ + "em{0:03d}_{1!s}.pdb".format(i, integrator) + for i, integrator in enumerate(integrators) + ] + outputs[-1] = kwargs.pop("output", "em.pdb") - files = {'struct': kwargs.pop('struct', None)} # fake output from energy_minimize() + files = {"struct": kwargs.pop("struct", None)} # fake output from energy_minimize() for i, integrator in enumerate(integrators): - struct = files['struct'] - logger.info("[em %d] energy minimize with %s for maximum %d steps", i, integrator, nsteps[i]) - kwargs.update({'struct':struct, 'output':outputs[i], - 'integrator':integrator, 'nsteps': nsteps[i]}) - if not integrator == 'l-bfgs': - kwargs['mdrunner'] = mdrunner + struct = files["struct"] + logger.info( + "[em %d] energy minimize with %s for maximum %d steps", + i, + integrator, + nsteps[i], + ) + kwargs.update( + { + "struct": struct, + "output": outputs[i], + "integrator": integrator, + "nsteps": nsteps[i], + } + ) + if not integrator == "l-bfgs": + kwargs["mdrunner"] = mdrunner else: - kwargs['mdrunner'] = None - logger.warning("[em %d] Not using mdrunner for L-BFGS because it cannot " - "do parallel runs.", i) + kwargs["mdrunner"] = None + logger.warning( + "[em %d] Not using mdrunner for L-BFGS because it cannot " + "do parallel runs.", + i, + ) files = energy_minimize(**kwargs) return files -def _setup_MD(dirname, - deffnm='md', mdp=config.templates['md_OPLSAA.mdp'], - struct=None, - top='top/system.top', ndx=None, - mainselection='"Protein"', - qscript=config.qscript_template, qname=None, startdir=None, mdrun_opts="", budget=None, walltime=1/3., - dt=0.002, runtime=1e3, **mdp_kwargs): +def _setup_MD( + dirname, + deffnm="md", + mdp=config.templates["md_OPLSAA.mdp"], + struct=None, + top="top/system.top", + ndx=None, + mainselection='"Protein"', + qscript=config.qscript_template, + qname=None, + startdir=None, + mdrun_opts="", + budget=None, + walltime=1 / 3.0, + dt=0.002, + runtime=1e3, + **mdp_kwargs +): """Generic function to set up a ``mdrun`` MD simulation. See the user functions for usage. """ if struct is None: - raise ValueError('struct must be set to a input structure') + raise ValueError("struct must be set to a input structure") structure = realpath(struct) topology = realpath(top) try: index = realpath(ndx) except AttributeError: # (that's what realpath(None) throws...) - index = None # None is handled fine below + index = None # None is handled fine below - qname = mdp_kwargs.pop('sgename', qname) # compatibility for old scripts - qscript = mdp_kwargs.pop('sge', qscript) # compatibility for old scripts + qname = mdp_kwargs.pop("sgename", qname) # compatibility for old scripts + qscript = mdp_kwargs.pop("sge", qscript) # compatibility for old scripts qscript_template = config.get_template(qscript) mdp_template = config.get_template(mdp) - nsteps = int(float(runtime)/float(dt)) + nsteps = int(float(runtime) / float(dt)) - mdp = deffnm + '.mdp' - tpr = deffnm + '.tpr' - mainindex = deffnm + '.ndx' - final_structure = deffnm + '.gro' # guess... really depends on templates,could also be DEFFNM.pdb + mdp = deffnm + ".mdp" + tpr = deffnm + ".tpr" + mainindex = deffnm + ".ndx" + final_structure = ( + deffnm + ".gro" + ) # guess... really depends on templates,could also be DEFFNM.pdb # write the processed topology to the default output - mdp_parameters = {'nsteps':nsteps, 'dt':dt, 'pp': 'processed.top'} + mdp_parameters = {"nsteps": nsteps, "dt": dt, "pp": "processed.top"} mdp_parameters.update(mdp_kwargs) cbook.add_mdp_includes(topology, mdp_parameters) logger.info("[%(dirname)s] input mdp = %(mdp_template)r", vars()) with in_dir(dirname): - if not (mdp_parameters.get('Tcoupl','').lower() == 'no' or mainselection is None): - logger.info("[{dirname!s}] Automatic adjustment of T-coupling groups".format(**vars())) + if not ( + mdp_parameters.get("Tcoupl", "").lower() == "no" or mainselection is None + ): + logger.info( + "[{dirname!s}] Automatic adjustment of T-coupling groups".format( + **vars() + ) + ) # make index file in almost all cases; with mainselection == None the user # takes FULL control and also has to provide the template or index - groups = make_main_index(structure, selection=mainselection, - oldndx=index, ndx=mainindex) - natoms = {g['name']: float(g['natoms']) for g in groups} - tc_group_names = ('__main__', '__environment__') # defined in make_main_index() + groups = make_main_index( + structure, selection=mainselection, oldndx=index, ndx=mainindex + ) + natoms = {g["name"]: float(g["natoms"]) for g in groups} + tc_group_names = ( + "__main__", + "__environment__", + ) # defined in make_main_index() try: - x = natoms['__main__']/natoms['__environment__'] + x = natoms["__main__"] / natoms["__environment__"] except KeyError: - x = 0 # force using SYSTEM in code below - wmsg = "Missing __main__ and/or __environment__ index group.\n" \ - "This probably means that you have an atypical system. You can " \ - "set mainselection=None and provide your own mdp and index files " \ - "in order to set up temperature coupling.\n" \ - "If no T-coupling is required then set Tcoupl='no'.\n" \ - "For now we will just couple everything to 'System'." + x = 0 # force using SYSTEM in code below + wmsg = ( + "Missing __main__ and/or __environment__ index group.\n" + "This probably means that you have an atypical system. You can " + "set mainselection=None and provide your own mdp and index files " + "in order to set up temperature coupling.\n" + "If no T-coupling is required then set Tcoupl='no'.\n" + "For now we will just couple everything to 'System'." + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) if x < 0.1: # couple everything together - tau_t = firstof(mdp_parameters.pop('tau_t', 0.1)) - ref_t = firstof(mdp_parameters.pop('ref_t', 300)) + tau_t = firstof(mdp_parameters.pop("tau_t", 0.1)) + ref_t = firstof(mdp_parameters.pop("ref_t", 300)) # combine all in one T-coupling group - mdp_parameters['tc-grps'] = 'System' - mdp_parameters['tau_t'] = tau_t # this overrides the commandline! - mdp_parameters['ref_t'] = ref_t # this overrides the commandline! - mdp_parameters['gen-temp'] = mdp_parameters.pop('gen_temp', ref_t) - wmsg = "Size of __main__ is only %.1f%% of __environment__ so " \ - "we use 'System' for T-coupling and ref_t = %g K and " \ - "tau_t = %g 1/ps (can be changed in mdp_parameters).\n" \ - % (x * 100, ref_t, tau_t) + mdp_parameters["tc-grps"] = "System" + mdp_parameters["tau_t"] = tau_t # this overrides the commandline! + mdp_parameters["ref_t"] = ref_t # this overrides the commandline! + mdp_parameters["gen-temp"] = mdp_parameters.pop("gen_temp", ref_t) + wmsg = ( + "Size of __main__ is only %.1f%% of __environment__ so " + "we use 'System' for T-coupling and ref_t = %g K and " + "tau_t = %g 1/ps (can be changed in mdp_parameters).\n" + % (x * 100, ref_t, tau_t) + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) else: # couple protein and bath separately n_tc_groups = len(tc_group_names) - tau_t = asiterable(mdp_parameters.pop('tau_t', 0.1)) - ref_t = asiterable(mdp_parameters.pop('ref_t', 300)) + tau_t = asiterable(mdp_parameters.pop("tau_t", 0.1)) + ref_t = asiterable(mdp_parameters.pop("ref_t", 300)) if len(tau_t) != n_tc_groups: tau_t = n_tc_groups * [tau_t[0]] - wmsg = "%d coupling constants should have been supplied for tau_t. "\ + wmsg = ( + "%d coupling constants should have been supplied for tau_t. " "Using %f 1/ps for all of them." % (n_tc_groups, tau_t[0]) + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) if len(ref_t) != n_tc_groups: ref_t = n_tc_groups * [ref_t[0]] - wmsg = "%d temperatures should have been supplied for ref_t. "\ + wmsg = ( + "%d temperatures should have been supplied for ref_t. " "Using %g K for all of them." % (n_tc_groups, ref_t[0]) + ) logger.warning(wmsg) warnings.warn(wmsg, category=AutoCorrectionWarning) - mdp_parameters['tc-grps'] = tc_group_names - mdp_parameters['tau_t'] = tau_t - mdp_parameters['ref_t'] = ref_t - mdp_parameters['gen-temp'] = mdp_parameters.pop('gen_temp', ref_t[0]) + mdp_parameters["tc-grps"] = tc_group_names + mdp_parameters["tau_t"] = tau_t + mdp_parameters["ref_t"] = ref_t + mdp_parameters["gen-temp"] = mdp_parameters.pop("gen_temp", ref_t[0]) index = realpath(mainindex) - if mdp_parameters.get('Tcoupl','').lower() == 'no': + if mdp_parameters.get("Tcoupl", "").lower() == "no": logger.info("Tcoupl == no: disabling all temperature coupling mdp options") - mdp_parameters['tc-grps'] = "" - mdp_parameters['tau_t'] = "" - mdp_parameters['ref_t'] = "" - mdp_parameters['gen-temp'] = "" - if mdp_parameters.get('Pcoupl','').lower() == 'no': + mdp_parameters["tc-grps"] = "" + mdp_parameters["tau_t"] = "" + mdp_parameters["ref_t"] = "" + mdp_parameters["gen-temp"] = "" + if mdp_parameters.get("Pcoupl", "").lower() == "no": logger.info("Pcoupl == no: disabling all pressure coupling mdp options") - mdp_parameters['tau_p'] = "" - mdp_parameters['ref_p'] = "" - mdp_parameters['compressibility'] = "" + mdp_parameters["tau_p"] = "" + mdp_parameters["ref_p"] = "" + mdp_parameters["compressibility"] = "" unprocessed = cbook.edit_mdp(mdp_template, new_mdp=mdp, **mdp_parameters) check_mdpargs(unprocessed) gromacs.grompp(f=mdp, p=topology, c=structure, n=index, o=tpr, **unprocessed) runscripts = qsub.generate_submit_scripts( - qscript_template, deffnm=deffnm, jobname=qname, budget=budget, - startdir=startdir, mdrun_opts=mdrun_opts, walltime=walltime) + qscript_template, + deffnm=deffnm, + jobname=qname, + budget=budget, + startdir=startdir, + mdrun_opts=mdrun_opts, + walltime=walltime, + ) logger.info("[%(dirname)s] output mdp = %(mdp)r", vars()) logger.info("[%(dirname)s] output ndx = %(ndx)r", vars()) logger.info("[%(dirname)s] output tpr = %(tpr)r", vars()) logger.info("[%(dirname)s] output runscripts = %(runscripts)r", vars()) - logger.info("[%(dirname)s] All files set up for a run time of %(runtime)g ps " - "(dt=%(dt)g, nsteps=%(nsteps)g)" % vars()) - - kwargs = {'struct': realpath(os.path.join(dirname, final_structure)), # guess - 'top': topology, - 'ndx': index, # possibly mainindex - 'qscript': runscripts, - 'mainselection': mainselection, - 'deffnm': deffnm, # return deffnm (tpr = deffnm.tpr!) - } - kwargs.update(mdp_kwargs) # return extra mdp args so that one can use them for prod run + logger.info( + "[%(dirname)s] All files set up for a run time of %(runtime)g ps " + "(dt=%(dt)g, nsteps=%(nsteps)g)" % vars() + ) + + kwargs = { + "struct": realpath(os.path.join(dirname, final_structure)), # guess + "top": topology, + "ndx": index, # possibly mainindex + "qscript": runscripts, + "mainselection": mainselection, + "deffnm": deffnm, # return deffnm (tpr = deffnm.tpr!) + } + kwargs.update( + mdp_kwargs + ) # return extra mdp args so that one can use them for prod run return kwargs -def MD_restrained(dirname='MD_POSRES', **kwargs): +def MD_restrained(dirname="MD_POSRES", **kwargs): """Set up MD with position restraints. Additional itp files should be in the same directory as the top file. @@ -1039,30 +1267,33 @@ def MD_restrained(dirname='MD_POSRES', **kwargs): .. _`pressure coupling`: http://manual.gromacs.org/online/mdp_opt.html#pc """ - logger.info("[{dirname!s}] Setting up MD with position restraints...".format(**vars())) - kwargs.setdefault('struct', 'em/em.pdb') - kwargs.setdefault('qname', 'PR_GMX') - kwargs.setdefault('define', '-DPOSRES') + logger.info( + "[{dirname!s}] Setting up MD with position restraints...".format(**vars()) + ) + kwargs.setdefault("struct", "em/em.pdb") + kwargs.setdefault("qname", "PR_GMX") + kwargs.setdefault("define", "-DPOSRES") # reduce size of output files - kwargs.setdefault('nstxout', '50000') # trr pos - kwargs.setdefault('nstvout', '50000') # trr veloc - kwargs.setdefault('nstfout', '0') # trr forces - kwargs.setdefault('nstlog', '500') # log file - kwargs.setdefault('nstenergy', '2500') # edr energy - kwargs.setdefault('nstxtcout', '5000') # xtc pos + kwargs.setdefault("nstxout", "50000") # trr pos + kwargs.setdefault("nstvout", "50000") # trr veloc + kwargs.setdefault("nstfout", "0") # trr forces + kwargs.setdefault("nstlog", "500") # log file + kwargs.setdefault("nstenergy", "2500") # edr energy + kwargs.setdefault("nstxtcout", "5000") # xtc pos # try to get good pressure equilibration - kwargs.setdefault('refcoord_scaling', 'com') - kwargs.setdefault('Pcoupl', "Berendsen") + kwargs.setdefault("refcoord_scaling", "com") + kwargs.setdefault("Pcoupl", "Berendsen") - new_kwargs = _setup_MD(dirname, **kwargs) + new_kwargs = _setup_MD(dirname, **kwargs) # clean up output kwargs - new_kwargs.pop('define', None) # but make sure that -DPOSRES does not stay... - new_kwargs.pop('refcoord_scaling', None) - new_kwargs.pop('Pcoupl', None) + new_kwargs.pop("define", None) # but make sure that -DPOSRES does not stay... + new_kwargs.pop("refcoord_scaling", None) + new_kwargs.pop("Pcoupl", None) return new_kwargs -def MD(dirname='MD', **kwargs): + +def MD(dirname="MD", **kwargs): """Set up equilibrium MD. Additional itp files should be in the same directory as the top file. @@ -1118,10 +1349,9 @@ def MD(dirname='MD', **kwargs): """ logger.info("[{dirname!s}] Setting up MD...".format(**vars())) - kwargs.setdefault('struct', 'MD_POSRES/md.gro') - kwargs.setdefault('qname', 'MD_GMX') + kwargs.setdefault("struct", "MD_POSRES/md.gro") + kwargs.setdefault("qname", "MD_GMX") return _setup_MD(dirname, **kwargs) - # TODO: autorun (qv MI lipids/setup.pl) diff --git a/gromacs/tools.py b/gromacs/tools.py index 4db83438..ce897dee 100644 --- a/gromacs/tools.py +++ b/gromacs/tools.py @@ -131,52 +131,128 @@ logger = logging.getLogger("gromacs.tools") -V4TOOLS = ("g_cluster", "g_dyndom", "g_mdmat", "g_principal", "g_select", - "g_wham", "mdrun", "do_dssp", "g_clustsize", "g_enemat", "g_membed", - "g_protonate", "g_sgangle", "g_wheel", "mdrun_d", "editconf", - "g_confrms", "g_energy", "g_mindist", "g_rama", "g_sham", "g_x2top", - "mk_angndx", "eneconv", "g_covar", "g_filter", "g_morph", "g_rdf", - "g_sigeps", "genbox", "pdb2gmx", "g_anadock", "g_current", - "g_gyrate", "g_msd", "g_sorient", "genconf", "g_anaeig", "g_density", - "g_h2order", "g_nmeig", "g_rms", "g_spatial", "genion", "tpbconv", - "g_analyze", "g_densmap", "g_hbond", "g_nmens", "g_rmsdist", - "g_spol", "genrestr", "trjcat", "g_angle", "g_dielectric", "g_helix", - "g_nmtraj", "g_rmsf", "g_tcaf", "gmxcheck", "trjconv", "g_bar", - "g_dih", "g_helixorient", "g_order", "g_rotacf", "g_traj", "gmxdump", - "trjorder", "g_bond", "g_dipoles", "g_kinetics", "g_pme_error", - "g_rotmat", "g_tune_pme", "grompp", "g_bundle", "g_disre", "g_lie", - "g_polystat", "g_saltbr", "g_vanhove", "make_edi", "xpm2ps", "g_chi", - "g_dist", "g_luck", "g_potential", "g_sas", "g_velacc", "make_ndx") +V4TOOLS = ( + "g_cluster", + "g_dyndom", + "g_mdmat", + "g_principal", + "g_select", + "g_wham", + "mdrun", + "do_dssp", + "g_clustsize", + "g_enemat", + "g_membed", + "g_protonate", + "g_sgangle", + "g_wheel", + "mdrun_d", + "editconf", + "g_confrms", + "g_energy", + "g_mindist", + "g_rama", + "g_sham", + "g_x2top", + "mk_angndx", + "eneconv", + "g_covar", + "g_filter", + "g_morph", + "g_rdf", + "g_sigeps", + "genbox", + "pdb2gmx", + "g_anadock", + "g_current", + "g_gyrate", + "g_msd", + "g_sorient", + "genconf", + "g_anaeig", + "g_density", + "g_h2order", + "g_nmeig", + "g_rms", + "g_spatial", + "genion", + "tpbconv", + "g_analyze", + "g_densmap", + "g_hbond", + "g_nmens", + "g_rmsdist", + "g_spol", + "genrestr", + "trjcat", + "g_angle", + "g_dielectric", + "g_helix", + "g_nmtraj", + "g_rmsf", + "g_tcaf", + "gmxcheck", + "trjconv", + "g_bar", + "g_dih", + "g_helixorient", + "g_order", + "g_rotacf", + "g_traj", + "gmxdump", + "trjorder", + "g_bond", + "g_dipoles", + "g_kinetics", + "g_pme_error", + "g_rotmat", + "g_tune_pme", + "grompp", + "g_bundle", + "g_disre", + "g_lie", + "g_polystat", + "g_saltbr", + "g_vanhove", + "make_edi", + "xpm2ps", + "g_chi", + "g_dist", + "g_luck", + "g_potential", + "g_sas", + "g_velacc", + "make_ndx", +) #: dict of names in Gromacs 5 that correspond to an equivalent tool in #: in Gromacs 4. The names are literal Gromacs names. NAMES5TO4 = { # same name in both versions - 'grompp': 'grompp', - 'eneconv': 'eneconv', - 'editconf': 'editconf', - 'pdb2gmx': 'pdb2gmx', - 'trjcat': 'trjcat', - 'trjconv': 'trjconv', - 'trjorder': 'trjorder', - 'xpm2ps': 'xpm2ps', - 'mdrun': 'mdrun', - 'make_ndx': 'make_ndx', - 'make_edi': 'make_edi', - 'genrestr': 'genrestr', - 'genion': 'genion', - 'genconf': 'genconf', - 'do_dssp': 'do_dssp', - + "grompp": "grompp", + "eneconv": "eneconv", + "editconf": "editconf", + "pdb2gmx": "pdb2gmx", + "trjcat": "trjcat", + "trjconv": "trjconv", + "trjorder": "trjorder", + "xpm2ps": "xpm2ps", + "mdrun": "mdrun", + "make_ndx": "make_ndx", + "make_edi": "make_edi", + "genrestr": "genrestr", + "genion": "genion", + "genconf": "genconf", + "do_dssp": "do_dssp", # changed names - 'convert-tpr': 'tpbconv', - 'dump': 'gmxdump', - 'check': 'gmxcheck', - 'solvate': 'genbox', - 'distance': 'g_dist', - 'sasa': 'g_sas', - 'gangle': 'g_sgangle' + "convert-tpr": "tpbconv", + "dump": "gmxdump", + "check": "gmxcheck", + "solvate": "genbox", + "distance": "g_dist", + "sasa": "g_sas", + "gangle": "g_sgangle", } @@ -185,7 +261,7 @@ class GromacsToolLoadingError(Exception): class GromacsCommandMultiIndex(GromacsCommand): - """ Command class that accept multiple index files. + """Command class that accept multiple index files. It works combining multiple index files into a single temporary one so that tools that do not (yet) support multi index files as input can be @@ -193,6 +269,7 @@ class GromacsCommandMultiIndex(GromacsCommand): It creates a new file only if multiple index files are supplied. """ + def __init__(self, **kwargs): kwargs = self._fake_multi_ndx(**kwargs) super(GromacsCommandMultiIndex, self).__init__(**kwargs) @@ -202,35 +279,38 @@ def run(self, *args, **kwargs): return super(GromacsCommandMultiIndex, self).run(*args, **kwargs) def _fake_multi_ndx(self, **kwargs): - ndx = kwargs.get('n') - if not (ndx is None or isinstance(ndx, six.string_types)) and \ - len(ndx) > 1 and 's' in kwargs: - ndx.append(kwargs.get('s')) - kwargs['n'] = merge_ndx(*ndx) + ndx = kwargs.get("n") + if ( + not (ndx is None or isinstance(ndx, six.string_types)) + and len(ndx) > 1 + and "s" in kwargs + ): + ndx.append(kwargs.get("s")) + kwargs["n"] = merge_ndx(*ndx) return kwargs def tool_factory(clsname, name, driver, base=GromacsCommand): - """ Factory for GromacsCommand derived types. """ + """Factory for GromacsCommand derived types.""" clsdict = { - 'command_name': name, - 'driver': driver, - '__doc__': property(base._get_gmx_docs) + "command_name": name, + "driver": driver, + "__doc__": property(base._get_gmx_docs), } return type(clsname, (base,), clsdict) def make_valid_identifier(name): - """ Turns tool names into valid identifiers. + """Turns tool names into valid identifiers. :param name: tool name :return: valid identifier """ - return name.replace('-', '_').capitalize() + return name.replace("-", "_").capitalize() def find_executables(path): - """ Find executables in a path. + """Find executables in a path. Searches executables in a directory excluding some know commands unusable with GromacsWrapper. @@ -241,15 +321,25 @@ def find_executables(path): execs = [] for exe in os.listdir(path): fullexe = os.path.join(path, exe) - if (os.access(fullexe, os.X_OK) and not os.path.isdir(fullexe) and - exe not in ['GMXRC', 'GMXRC.bash', 'GMXRC.csh', 'GMXRC.zsh', - 'demux.pl', 'xplor2gmx.pl']): + if ( + os.access(fullexe, os.X_OK) + and not os.path.isdir(fullexe) + and exe + not in [ + "GMXRC", + "GMXRC.bash", + "GMXRC.csh", + "GMXRC.zsh", + "demux.pl", + "xplor2gmx.pl", + ] + ): execs.append(exe) return execs def load_v5_tools(): - """ Load Gromacs 2018/2016/5.x tools automatically using some heuristic. + """Load Gromacs 2018/2016/5.x tools automatically using some heuristic. Tries to load tools (1) using the driver from configured groups (2) and falls back to automatic detection from ``GMXBIN`` (3) then to rough guesses. @@ -262,35 +352,38 @@ def load_v5_tools(): drivers = config.get_tool_names() - if len(drivers) == 0 and 'GMXBIN' in os.environ: - drivers = find_executables(os.environ['GMXBIN']) + if len(drivers) == 0 and "GMXBIN" in os.environ: + drivers = find_executables(os.environ["GMXBIN"]) if len(drivers) == 0 or len(drivers) > 4: - drivers = ['gmx', 'gmx_d', 'gmx_mpi', 'gmx_mpi_d'] + drivers = ["gmx", "gmx_d", "gmx_mpi", "gmx_mpi_d"] - append = config.cfg.getboolean('Gromacs', 'append_suffix', fallback=True) + append = config.cfg.getboolean("Gromacs", "append_suffix", fallback=True) tools = {} for driver in drivers: - suffix = driver.partition('_')[2] + suffix = driver.partition("_")[2] try: - out = subprocess.check_output([driver, '-quiet', 'help', - 'commands']) + out = subprocess.check_output([driver, "-quiet", "help", "commands"]) for line in out.splitlines()[5:-1]: - line = str(line.decode('ascii')) # Python 3: byte string -> str, Python 2: normal string + line = str( + line.decode("ascii") + ) # Python 3: byte string -> str, Python 2: normal string if len(line) > 4: - if (line[4] != ' ') and (' ' in line[4:]): - name = line[4:line.index(' ', 4)] + if (line[4] != " ") and (" " in line[4:]): + name = line[4 : line.index(" ", 4)] fancy = make_valid_identifier(name) if suffix and append: - fancy = '{0!s}_{1!s}'.format(fancy, suffix) + fancy = "{0!s}_{1!s}".format(fancy, suffix) tools[fancy] = tool_factory(fancy, name, driver) except (subprocess.CalledProcessError, OSError): pass if not tools: - errmsg = "Failed to load 2018/2016/5.x tools (tried drivers: {})".format(drivers) + errmsg = "Failed to load 2018/2016/5.x tools (tried drivers: {})".format( + drivers + ) logger.debug(errmsg) raise GromacsToolLoadingError(errmsg) logger.debug("Loaded {0} v5 tools successfully!".format(len(tools))) @@ -298,7 +391,7 @@ def load_v5_tools(): def load_v4_tools(): - """ Load Gromacs 4.x tools automatically using some heuristic. + """Load Gromacs 4.x tools automatically using some heuristic. Tries to load tools (1) in configured tool groups (2) and fails back to automatic detection from ``GMXBIN`` (3) then to a prefilled list. @@ -311,8 +404,8 @@ def load_v4_tools(): names = config.get_tool_names() - if len(names) == 0 and 'GMXBIN' in os.environ: - names = find_executables(os.environ['GMXBIN']) + if len(names) == 0 and "GMXBIN" in os.environ: + names = find_executables(os.environ["GMXBIN"]) if len(names) == 0 or len(names) > len(V4TOOLS) * 4: names = list(V4TOOLS) @@ -333,7 +426,7 @@ def load_v4_tools(): def merge_ndx(*args): - """ Takes one or more index files and optionally one structure file and + """Takes one or more index files and optionally one structure file and returns a path for a new merged index file. :param args: index files and zero or one structure file @@ -342,34 +435,40 @@ def merge_ndx(*args): ndxs = [] struct = None for fname in args: - if fname.endswith('.ndx'): + if fname.endswith(".ndx"): ndxs.append(fname) else: if struct is not None: raise ValueError("only one structure file supported") struct = fname - fd, multi_ndx = tempfile.mkstemp(suffix='.ndx', prefix='multi_') + fd, multi_ndx = tempfile.mkstemp(suffix=".ndx", prefix="multi_") os.close(fd) atexit.register(os.unlink, multi_ndx) if struct: - make_ndx = registry['Make_ndx'](f=struct, n=ndxs, o=multi_ndx) + make_ndx = registry["Make_ndx"](f=struct, n=ndxs, o=multi_ndx) else: - make_ndx = registry['Make_ndx'](n=ndxs, o=multi_ndx) + make_ndx = registry["Make_ndx"](n=ndxs, o=multi_ndx) - _, _, _ = make_ndx(input=['q'], stdout=False, stderr=False) + _, _, _ = make_ndx(input=["q"], stdout=False, stderr=False) return multi_ndx # Load tools -if config.MAJOR_RELEASE in ('5', '2016', '2018'): - logger.debug("Trying to load configured Gromacs major release {0}".format( - config.MAJOR_RELEASE)) +if config.MAJOR_RELEASE in ("5", "2016", "2018"): + logger.debug( + "Trying to load configured Gromacs major release {0}".format( + config.MAJOR_RELEASE + ) + ) registry = load_v5_tools() -elif config.MAJOR_RELEASE == '4': - logger.debug("Trying to load configured Gromacs major release {0}".format( - config.MAJOR_RELEASE)) +elif config.MAJOR_RELEASE == "4": + logger.debug( + "Trying to load configured Gromacs major release {0}".format( + config.MAJOR_RELEASE + ) + ) registry = load_v4_tools() else: logger.debug("No major release configured: trying 2018/2016/5.x -> 4.x") @@ -403,19 +502,20 @@ def merge_ndx(*args): break else: # the common case of just adding the 'g_' - registry['G_{0!s}'.format(fancy.lower())] = registry[fancy] - + registry["G_{0!s}".format(fancy.lower())] = registry[fancy] # Patching up commands that may be useful to accept multiple index files -for name4, name5 in [('G_mindist', 'Mindist'), ('G_dist', 'Distance')]: +for name4, name5 in [("G_mindist", "Mindist"), ("G_dist", "Distance")]: if name4 in registry: cmd = registry[name4] - registry[name4] = tool_factory(name4, cmd.command_name, cmd.driver, - GromacsCommandMultiIndex) + registry[name4] = tool_factory( + name4, cmd.command_name, cmd.driver, GromacsCommandMultiIndex + ) if name5 in registry: registry[name5] = registry[name4] + # create a release "virtual command" (issue #161) class Release(object): """Release string of the currently loaded Gromacs version. @@ -449,13 +549,15 @@ class Release(object): .. versionadded:: 0.8.0 """ - gromacs_version = re.compile("^G[rR][oO][mM][aA][cC][sS] version:" - "\s*(VERSION)?\s*(?P.+)$") + + gromacs_version = re.compile( + "^G[rR][oO][mM][aA][cC][sS] version:" "\s*(VERSION)?\s*(?P.+)$" + ) def __init__(self): self.release = None try: - grompp = registry['Grompp']() + grompp = registry["Grompp"]() rc, out, err = grompp(version=True, stdout=False, stderr=False) output_lines = out.splitlines() + err.splitlines() except KeyError: @@ -465,7 +567,7 @@ def __init__(self): line = line.strip() m = self.gromacs_version.match(line) if m: - self.release = m.group('version') + self.release = m.group("version") break def __call__(self): @@ -497,9 +599,9 @@ def __str__(self): for name in six.iterkeys(registry): __doc__ += ".. class:: {0!s}\n :noindex:\n".format(name) -registry['Release'] = Release +registry["Release"] = Release # Finally add command classes to module's scope globals().update(registry) -__all__ = ['GromacsCommandMultiIndex', 'merge_ndx'] +__all__ = ["GromacsCommandMultiIndex", "merge_ndx"] __all__.extend(list(registry.keys())) diff --git a/gromacs/utilities.py b/gromacs/utilities.py index 389527ea..097760a9 100644 --- a/gromacs/utilities.py +++ b/gromacs/utilities.py @@ -110,18 +110,21 @@ class is derived from it. import numpy import logging -logger = logging.getLogger('gromacs.utilities') + +logger = logging.getLogger("gromacs.utilities") from .exceptions import AutoCorrectionWarning class AttributeDict(dict): """A dictionary with pythonic access to keys as attributes --- useful for interactive work.""" + def __getattribute__(self, x): try: - return super(AttributeDict,self).__getattribute__(x) + return super(AttributeDict, self).__getattribute__(x) except AttributeError: return self[x] + def __setattr__(self, name, value): try: super(AttributeDict, self).__setitem__(name, value) @@ -143,7 +146,7 @@ def autoconvert(s): """ if type(s) is not str: return s - for converter in int, float, str: # try them in increasing order of lenience + for converter in int, float, str: # try them in increasing order of lenience try: s = [converter(i) for i in s.split()] if len(s) == 1: @@ -154,8 +157,9 @@ def autoconvert(s): pass raise ValueError("Failed to autoconvert {0!r}".format(s)) + @contextmanager -def openany(datasource, mode='rt', reset=True): +def openany(datasource, mode="rt", reset=True): """Context manager for :func:`anyopen`. Open the `datasource` and close it when the context of the :keyword:`with` @@ -219,14 +223,15 @@ def openany(datasource, mode='rt', reset=True): # We are on python 2 and bz2.open is not available def bz2_open(filename, mode): """Open and uncompress a BZ2 file""" - mode = mode.replace('t', '').replace('b', '') + mode = mode.replace("t", "").replace("b", "") return bz2.BZ2File(filename, mode) + else: # We are on python 3 so we can use bz2.open bz2_open = bz2.open -def anyopen(datasource, mode='rt', reset=True): +def anyopen(datasource, mode="rt", reset=True): """Open datasource (gzipped, bzipped, uncompressed) and return a stream. `datasource` can be a filename or a stream (see :func:`isstream`). By @@ -257,9 +262,9 @@ def anyopen(datasource, mode='rt', reset=True): :func:`openany` to be used with the :keyword:`with` statement. """ - handlers = {'bz2': bz2_open, 'gz': gzip.open, '': open} + handlers = {"bz2": bz2_open, "gz": gzip.open, "": open} - if mode.startswith('r'): + if mode.startswith("r"): if isstream(datasource): stream = datasource try: @@ -273,20 +278,26 @@ def anyopen(datasource, mode='rt', reset=True): try: stream.seek(0) except (AttributeError, IOError): - warnings.warn("Stream {0}: not guaranteed to be at the beginning." - "".format(filename), - category=StreamWarning) + warnings.warn( + "Stream {0}: not guaranteed to be at the beginning." + "".format(filename), + category=StreamWarning, + ) else: stream = None filename = datasource - for ext in ('bz2', 'gz', ''): # file == '' should be last + for ext in ("bz2", "gz", ""): # file == '' should be last openfunc = handlers[ext] stream = _get_stream(datasource, openfunc, mode=mode) if stream is not None: break if stream is None: - raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename)) - elif mode.startswith('w') or mode.startswith('a'): # append 'a' not tested... + raise IOError( + errno.EIO, + "Cannot open file or stream in mode={mode!r}.".format(**vars()), + repr(filename), + ) + elif mode.startswith("w") or mode.startswith("a"): # append 'a' not tested... if isstream(datasource): stream = datasource try: @@ -297,16 +308,24 @@ def anyopen(datasource, mode='rt', reset=True): stream = None filename = datasource name, ext = os.path.splitext(filename) - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] - if not ext in ('bz2', 'gz'): - ext = '' # anything else but bz2 or gz is just a normal file + if not ext in ("bz2", "gz"): + ext = "" # anything else but bz2 or gz is just a normal file openfunc = handlers[ext] stream = openfunc(datasource, mode=mode) if stream is None: - raise IOError(errno.EIO, "Cannot open file or stream in mode={mode!r}.".format(**vars()), repr(filename)) + raise IOError( + errno.EIO, + "Cannot open file or stream in mode={mode!r}.".format(**vars()), + repr(filename), + ) else: - raise NotImplementedError("Sorry, mode={mode!r} is not implemented for {datasource!r}".format(**vars())) + raise NotImplementedError( + "Sorry, mode={mode!r} is not implemented for {datasource!r}".format( + **vars() + ) + ) try: stream.name = filename except (AttributeError, TypeError): @@ -314,7 +333,7 @@ def anyopen(datasource, mode='rt', reset=True): return stream -def _get_stream(filename, openfunction=open, mode='r'): +def _get_stream(filename, openfunction=open, mode="r"): """Return open stream if *filename* can be opened with *openfunction* or else ``None``.""" try: stream = openfunction(filename, mode=mode) @@ -322,10 +341,10 @@ def _get_stream(filename, openfunction=open, mode='r'): # An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this # case we have to ignore the error and return None. Second is when openfunction can't open the file because # either the file isn't there or the permissions don't allow access. - if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']: + if errno.errorcode[err.errno] in ["ENOENT", "EACCES"]: six.reraise(*sys.exc_info()) return None - if mode.startswith('r'): + if mode.startswith("r"): # additional check for reading (eg can we uncompress) --- is this needed? try: stream.readline() @@ -340,6 +359,7 @@ def _get_stream(filename, openfunction=open, mode='r'): stream = openfunction(filename, mode=mode) return stream + def hasmethod(obj, m): """Return ``True`` if object *obj* contains the method *m*. @@ -347,6 +367,7 @@ def hasmethod(obj, m): """ return hasattr(obj, m) and callable(getattr(obj, m)) + def isstream(obj): """Detect if `obj` is a stream. @@ -375,7 +396,8 @@ def isstream(obj): signature_methods = ("close",) alternative_methods = ( ("read", "readline", "readlines"), - ("write", "writeline", "writelines")) + ("write", "writeline", "writelines"), + ) # Must have ALL the signature methods for m in signature_methods: @@ -384,18 +406,38 @@ def isstream(obj): # Must have at least one complete set of alternative_methods alternative_results = [ numpy.all([hasmethod(obj, m) for m in alternatives]) - for alternatives in alternative_methods] + for alternatives in alternative_methods + ] return numpy.any(alternative_results) + # TODO: make it work for non-default charge state amino acids. #: translation table for 1-letter codes --> 3-letter codes #: .. Note: This does not work for HISB and non-default charge state aa! -amino_acid_codes = {'A':'ALA', 'C':'CYS', 'D':'ASP', 'E':'GLU', - 'F':'PHE', 'G':'GLY', 'H':'HIS', 'I':'ILE', - 'K':'LYS', 'L':'LEU', 'M':'MET', 'N':'ASN', - 'P':'PRO', 'Q':'GLN', 'R':'ARG', 'S':'SER', - 'T':'THR', 'V':'VAL', 'W':'TRP', 'Y':'TYR'} -inverse_aa_codes = {three: one for one,three in amino_acid_codes.items()} +amino_acid_codes = { + "A": "ALA", + "C": "CYS", + "D": "ASP", + "E": "GLU", + "F": "PHE", + "G": "GLY", + "H": "HIS", + "I": "ILE", + "K": "LYS", + "L": "LEU", + "M": "MET", + "N": "ASN", + "P": "PRO", + "Q": "GLN", + "R": "ARG", + "S": "SER", + "T": "THR", + "V": "VAL", + "W": "TRP", + "Y": "TYR", +} +inverse_aa_codes = {three: one for one, three in amino_acid_codes.items()} + def convert_aa_code(x): """Converts between 3-letter and 1-letter amino acid codes.""" @@ -404,8 +446,10 @@ def convert_aa_code(x): elif len(x) == 3: return inverse_aa_codes[x.upper()] else: - raise ValueError("Can only convert 1-letter or 3-letter amino acid codes, " - "not %r" % x) + raise ValueError( + "Can only convert 1-letter or 3-letter amino acid codes, " "not %r" % x + ) + @contextmanager def in_dir(directory, create=True): @@ -426,14 +470,19 @@ def in_dir(directory, create=True): if create and err.errno == errno.ENOENT: os.makedirs(directory) os.chdir(directory) - logger.info("Working in {directory!r} (newly created)...".format(**vars())) + logger.info( + "Working in {directory!r} (newly created)...".format(**vars()) + ) else: - logger.exception("Failed to start working in {directory!r}.".format(**vars())) + logger.exception( + "Failed to start working in {directory!r}.".format(**vars()) + ) raise yield os.getcwd() finally: os.chdir(startdir) + def realpath(*args): """Join all args and return the real path, rooted at /. @@ -443,8 +492,8 @@ def realpath(*args): """ if None in args: return None - return os.path.realpath( - os.path.expandvars(os.path.expanduser(os.path.join(*args)))) + return os.path.realpath(os.path.expandvars(os.path.expanduser(os.path.join(*args)))) + def find_first(filename, suffices=None): """Find first *filename* with a suffix from *suffices*. @@ -460,7 +509,7 @@ def find_first(filename, suffices=None): """ # struct is not reliable as it depends on qscript so now we just try everything... - root,extension = os.path.splitext(filename) + root, extension = os.path.splitext(filename) if suffices is None: suffices = [] else: @@ -472,14 +521,18 @@ def find_first(filename, suffices=None): return fn return None + def withextsep(extensions): """Return list in which each element is guaranteed to start with :data:`os.path.extsep`.""" + def dottify(x): if x.startswith(os.path.extsep): return x return os.path.extsep + x + return [dottify(x) for x in asiterable(extensions)] + def find_files(directory, pattern): """Find files recursively under *directory*, matching *pattern* (generator). @@ -494,6 +547,7 @@ def find_files(directory, pattern): filename = os.path.join(root, basename) yield filename + def which(program): """Determine full path of executable *program* on :envvar:`PATH`. @@ -517,6 +571,7 @@ def is_exe(fpath): return exe_file return None + class FileUtils(object): """Mixin class to provide additional file-related capabilities.""" @@ -538,11 +593,13 @@ def _init_filename(self, filename=None, ext=None): """ extension = ext or self.default_extension - filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True) + filename = self.filename( + filename, ext=extension, use_my_ext=True, set_default=True + ) #: Current full path of the object for reading and writing I/O. self.real_filename = os.path.realpath(filename) - def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False): + def filename(self, filename=None, ext=None, set_default=False, use_my_ext=False): """Supply a file name for the class object. Typical uses:: @@ -568,16 +625,18 @@ def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False): An empty string as *ext* = "" will suppress appending an extension. """ if filename is None: - if not hasattr(self,'_filename'): - self._filename = None # add attribute to class + if not hasattr(self, "_filename"): + self._filename = None # add attribute to class if self._filename: filename = self._filename else: - raise ValueError("A file name is required because no default file name was defined.") + raise ValueError( + "A file name is required because no default file name was defined." + ) my_ext = None else: filename, my_ext = os.path.splitext(filename) - if set_default: # replaces existing default file name + if set_default: # replaces existing default file name self._filename = filename if my_ext and use_my_ext: ext = my_ext @@ -588,7 +647,7 @@ def filename(self,filename=None,ext=None,set_default=False,use_my_ext=False): filename = filename + os.extsep + ext return filename - def check_file_exists(self, filename, resolve='exception', force=None): + def check_file_exists(self, filename, resolve="exception", force=None): """If a file exists then continue with the action specified in ``resolve``. ``resolve`` must be one of @@ -612,27 +671,31 @@ def check_file_exists(self, filename, resolve='exception', force=None): ``None`` ignored, do whatever *resolve* says """ + def _warn(x): msg = "File {0!r} already exists.".format(x) logger.warning(msg) warnings.warn(msg) return True + def _raise(x): msg = "File {0!r} already exists.".format(x) logger.error(msg) raise IOError(errno.EEXIST, x, msg) - solutions = {'ignore': lambda x: False, # file exists, but we pretend that it doesn't - 'indicate': lambda x: True, # yes, file exists - 'warn': _warn, - 'warning': _warn, - 'exception': _raise, - 'raise': _raise, - } + + solutions = { + "ignore": lambda x: False, # file exists, but we pretend that it doesn't + "indicate": lambda x: True, # yes, file exists + "warn": _warn, + "warning": _warn, + "exception": _raise, + "raise": _raise, + } if force is True: - resolve = 'ignore' + resolve = "ignore" elif force is False: - resolve = 'exception' + resolve = "exception" if not os.path.isfile(filename): return False @@ -647,13 +710,13 @@ def infix_filename(self, name, default, infix, ext=None): ext = oldext if ext.startswith(os.extsep): ext = ext[1:] - name = self.filename(p+infix, ext=ext) + name = self.filename(p + infix, ext=ext) return name def __repr__(self): fmt = "{0!s}(filename=%r)".format(self.__class__.__name__) try: - fn = self.filename() + fn = self.filename() except ValueError: fn = None return fmt % fn @@ -662,21 +725,23 @@ def __repr__(self): def iterable(obj): """Returns ``True`` if *obj* can be iterated over and is *not* a string.""" if isinstance(obj, string_types): - return False # avoid iterating over characters of a string - if hasattr(obj, 'next'): - return True # any iterator will do + return False # avoid iterating over characters of a string + if hasattr(obj, "next"): + return True # any iterator will do try: - len(obj) # anything else that might work + len(obj) # anything else that might work except TypeError: return False return True + def asiterable(obj): """Returns obj so that it can be iterated over; a string is *not* treated as iterable""" if not iterable(obj): obj = [obj] return obj + def firstof(obj): """Returns the first entry of a sequence or the obj. @@ -684,8 +749,10 @@ def firstof(obj): """ return asiterable(obj)[0] + # In utilities so that it can be safely used in tools, cbook, ... + def unlink_f(path): """Unlink path but do not complain if file does not exist.""" try: @@ -694,20 +761,23 @@ def unlink_f(path): if err.errno != errno.ENOENT: raise + def unlink_gmx(*args): """Unlink (remove) Gromacs file(s) and all corresponding backups.""" for path in args: unlink_f(path) unlink_gmx_backups(*args) + def unlink_gmx_backups(*args): """Unlink (rm) all backup files corresponding to the listed files.""" for path in args: dirname, filename = os.path.split(path) - fbaks = glob.glob(os.path.join(dirname, '#'+filename+'.*#')) + fbaks = glob.glob(os.path.join(dirname, "#" + filename + ".*#")) for bak in fbaks: unlink_f(bak) + def mkdir_p(path): """Create a directory *path* with subdirs but do not complain if it exists. @@ -719,6 +789,7 @@ def mkdir_p(path): if err.errno != errno.EEXIST: raise + def cat(f=None, o=None): """Concatenate files *f*=[...] and write to *o*""" # need f, o to be compatible with trjcat and eneconv @@ -727,10 +798,12 @@ def cat(f=None, o=None): target = o infiles = asiterable(f) logger.debug("cat {0!s} > {1!s} ".format(" ".join(infiles), target)) - with open(target, 'w') as out: - rc = subprocess.call(['cat'] + infiles, stdout=out) + with open(target, "w") as out: + rc = subprocess.call(["cat"] + infiles, stdout=out) if rc != 0: - msg = "failed with return code {0:d}: cat {1!r} > {2!r} ".format(rc, " ".join(infiles), target) + msg = "failed with return code {0:d}: cat {1!r} > {2!r} ".format( + rc, " ".join(infiles), target + ) logger.exception(msg) raise OSError(errno.EIO, msg, target) @@ -744,15 +817,18 @@ def activate_subplot(numPlot): """ # see http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg07156.html from pylab import gcf, axes + numPlot -= 1 # index is 0-based, plots are 1-based return axes(gcf().get_axes()[numPlot]) + def remove_legend(ax=None): """Remove legend for axes or gca. See http://osdir.com/ml/python.matplotlib.general/2005-07/msg00285.html """ from pylab import gca, draw + if ax is None: ax = gca() ax.legend_ = None @@ -777,17 +853,17 @@ def dhours(self): @property def dminutes(self): """Minutes component of the timedelta.""" - return self.seconds // 60 - 60*self.dhours + return self.seconds // 60 - 60 * self.dhours @property def dseconds(self): """Seconds component of the timedelta.""" - return self.seconds - 3600*self.dhours - 60*self.dminutes + return self.seconds - 3600 * self.dhours - 60 * self.dminutes @property def ashours(self): """Timedelta in (fractional) hours.""" - return 24*self.days + self.seconds / 3600.0 + return 24 * self.days + self.seconds / 3600.0 def strftime(self, fmt="%d:%H:%M:%S"): """Primitive string formatter. @@ -806,10 +882,10 @@ def strftime(self, fmt="%d:%H:%M:%S"): substitutions = { "%d": str(self.days), "%H": "{0:02d}".format(self.dhours), - "%h": str(24*self.days + self.dhours), + "%h": str(24 * self.days + self.dhours), "%M": "{0:02d}".format(self.dminutes), "%S": "{0:02d}".format(self.dseconds), - } + } s = fmt for search, replacement in substitutions.items(): s = s.replace(search, replacement) @@ -818,6 +894,7 @@ def strftime(self, fmt="%d:%H:%M:%S"): NUMBERED_PDB = re.compile(r"(?P.*\D)(?P\d+)\.(?Ppdb)") + def number_pdbs(*args, **kwargs): """Rename pdbs x1.pdb ... x345.pdb --> x0001.pdb ... x0345.pdb @@ -826,16 +903,16 @@ def number_pdbs(*args, **kwargs): - *format*: format string including keyword *num* ["%(num)04d"] """ - format = kwargs.pop('format', "%(num)04d") - name_format = "%(prefix)s" + format +".%(suffix)s" + format = kwargs.pop("format", "%(num)04d") + name_format = "%(prefix)s" + format + ".%(suffix)s" for f in itertools.chain.from_iterable(map(glob.glob, args)): m = NUMBERED_PDB.search(f) if m is None: continue - num = int(m.group('NUMBER')) - prefix = m.group('PREFIX') - suffix = m.group('SUFFIX') + num = int(m.group("NUMBER")) + prefix = m.group("PREFIX") + suffix = m.group("SUFFIX") newname = name_format % vars() logger.info("Renaming {f!r} --> {newname!r}".format(**vars())) try: diff --git a/scripts/gw-forcefield.py b/scripts/gw-forcefield.py index e7ec40d6..9b482f9c 100755 --- a/scripts/gw-forcefield.py +++ b/scripts/gw-forcefield.py @@ -7,75 +7,116 @@ def scale_angles(mol, angles): - new_angles = {} - for dh in mol.angles: - atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype() - atypes = [a.replace("_", "").replace("=","") for a in atypes] - for iswitch in range(16): - if (iswitch%2==0 ): - a1=atypes[0]; a2=atypes[1]; a3=atypes[2] - else: - a1=atypes[2]; a2=atypes[1]; a3=atypes[0] - - if((iswitch//2)%2==1): a1="X"; - if((iswitch//4)%2==1): a2="X"; - if((iswitch//8)%2==1): a3="X"; - key = "{0}-{1}-{2}-{3}".format(a1, a2, a3, dh.gromacs['func']) - if (key in angles): - for i, at in enumerate(angles[key]): - #new_angles.append(at) - new_angles[key] = at - break - return new_angles.values() + new_angles = {} + for dh in mol.angles: + atypes = ( + dh.atom1.get_atomtype(), + dh.atom2.get_atomtype(), + dh.atom3.get_atomtype(), + ) + atypes = [a.replace("_", "").replace("=", "") for a in atypes] + for iswitch in range(16): + if iswitch % 2 == 0: + a1 = atypes[0] + a2 = atypes[1] + a3 = atypes[2] + else: + a1 = atypes[2] + a2 = atypes[1] + a3 = atypes[0] + + if (iswitch // 2) % 2 == 1: + a1 = "X" + if (iswitch // 4) % 2 == 1: + a2 = "X" + if (iswitch // 8) % 2 == 1: + a3 = "X" + key = "{0}-{1}-{2}-{3}".format(a1, a2, a3, dh.gromacs["func"]) + if key in angles: + for i, at in enumerate(angles[key]): + # new_angles.append(at) + new_angles[key] = at + break + return new_angles.values() def scale_dihedrals(mol, dihedrals): - new_dihedrals = {} - for dh in mol.dihedrals: - atypes = dh.atom1.get_atomtype(), dh.atom2.get_atomtype(), dh.atom3.get_atomtype(), dh.atom4.get_atomtype() - atypes = [a.replace("_", "").replace("=","") for a in atypes] - for iswitch in range(32): - if (iswitch%2==0 ): - a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3] - else: - a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0] - - if((iswitch//2)%2==1): a1="X"; - if((iswitch//4)%2==1): a2="X"; - if((iswitch//8)%2==1): a3="X"; - if((iswitch//16)%2==1): a4="X"; - key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs['func']) - if (key in dihedrals): - for i, dt in enumerate(dihedrals[key]): - #new_dihedrals.append(dt) - new_dihedrals[key] = dt - break - - print(new_dihedrals) - return new_dihedrals.values() + new_dihedrals = {} + for dh in mol.dihedrals: + atypes = ( + dh.atom1.get_atomtype(), + dh.atom2.get_atomtype(), + dh.atom3.get_atomtype(), + dh.atom4.get_atomtype(), + ) + atypes = [a.replace("_", "").replace("=", "") for a in atypes] + for iswitch in range(32): + if iswitch % 2 == 0: + a1 = atypes[0] + a2 = atypes[1] + a3 = atypes[2] + a4 = atypes[3] + else: + a1 = atypes[3] + a2 = atypes[2] + a3 = atypes[1] + a4 = atypes[0] + + if (iswitch // 2) % 2 == 1: + a1 = "X" + if (iswitch // 4) % 2 == 1: + a2 = "X" + if (iswitch // 8) % 2 == 1: + a3 = "X" + if (iswitch // 16) % 2 == 1: + a4 = "X" + key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, dh.gromacs["func"]) + if key in dihedrals: + for i, dt in enumerate(dihedrals[key]): + # new_dihedrals.append(dt) + new_dihedrals[key] = dt + break + + print(new_dihedrals) + return new_dihedrals.values() def scale_impropers(mol, impropers): - new_impropers = {} - for im in mol.impropers: - atypes = im.atom1.get_atomtype(), im.atom2.get_atomtype(), im.atom3.get_atomtype(), im.atom4.get_atomtype() - atypes = [a.replace("_", "").replace("=","") for a in atypes] - for iswitch in range(32): - if (iswitch%2==0 ): - a1=atypes[0]; a2=atypes[1]; a3=atypes[2]; a4=atypes[3]; - else: - a1=atypes[3]; a2=atypes[2]; a3=atypes[1]; a4=atypes[0]; - if((iswitch/2)%2==1): a1="X"; - if((iswitch/4)%2==1): a2="X"; - if((iswitch/8)%2==1): a3="X"; - if((iswitch/16)%2==1): a4="X"; - key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs['func']) - if (key in impropers): - for i, imt in enumerate(impropers[key]): - new_impropers[key] = imt - break - print(new_impropers) - return new_impropers.values() + new_impropers = {} + for im in mol.impropers: + atypes = ( + im.atom1.get_atomtype(), + im.atom2.get_atomtype(), + im.atom3.get_atomtype(), + im.atom4.get_atomtype(), + ) + atypes = [a.replace("_", "").replace("=", "") for a in atypes] + for iswitch in range(32): + if iswitch % 2 == 0: + a1 = atypes[0] + a2 = atypes[1] + a3 = atypes[2] + a4 = atypes[3] + else: + a1 = atypes[3] + a2 = atypes[2] + a3 = atypes[1] + a4 = atypes[0] + if (iswitch / 2) % 2 == 1: + a1 = "X" + if (iswitch / 4) % 2 == 1: + a2 = "X" + if (iswitch / 8) % 2 == 1: + a3 = "X" + if (iswitch / 16) % 2 == 1: + a4 = "X" + key = "{0}-{1}-{2}-{3}-{4}".format(a1, a2, a3, a4, im.gromacs["func"]) + if key in impropers: + for i, imt in enumerate(impropers[key]): + new_impropers[key] = imt + break + print(new_impropers) + return new_impropers.values() parser = argparse.ArgumentParser() @@ -97,7 +138,9 @@ def scale_impropers(mol, impropers): # BONDTYPES # bondtypes = {tuple(sorted((b.atom1.atomtype, b.atom2.atomtype))) for b in mol.bonds} -bondtypes_dictionary = {tuple(sorted((bt.atype1, bt.atype2))): bt for bt in top.bondtypes} +bondtypes_dictionary = { + tuple(sorted((bt.atype1, bt.atype2))): bt for bt in top.bondtypes +} top.bondtypes = [bondtypes_dictionary[bt] for bt in bondtypes] # @@ -105,18 +148,22 @@ def scale_impropers(mol, impropers): # angletypes = {} for at in top.angletypes: - name = "{0}-{1}-{2}-{3}".format(at.atype1, at.atype2, at.atype3, at.gromacs['func']) - if not name in angletypes: angletypes[name] = [] - angletypes[name].append(at) + name = "{0}-{1}-{2}-{3}".format(at.atype1, at.atype2, at.atype3, at.gromacs["func"]) + if not name in angletypes: + angletypes[name] = [] + angletypes[name].append(at) # # Build dihedral dictionary # dihedraltypes = {} for dt in top.dihedraltypes: - name = "{0}-{1}-{2}-{3}-{4}".format(dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs['func']) - if not name in dihedraltypes: dihedraltypes[name] = [] - dihedraltypes[name].append(dt) + name = "{0}-{1}-{2}-{3}-{4}".format( + dt.atype1, dt.atype2, dt.atype3, dt.atype4, dt.gromacs["func"] + ) + if not name in dihedraltypes: + dihedraltypes[name] = [] + dihedraltypes[name].append(dt) print("Build dihedraltypes dictionary with {0} entries".format(len(dihedraltypes))) # @@ -124,9 +171,12 @@ def scale_impropers(mol, impropers): # impropertypes = {} for it in top.impropertypes: - name = "{0}-{1}-{2}-{3}-{4}".format(it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs['func']) - if not name in impropertypes: impropertypes[name] = [] - impropertypes[name].append(it) + name = "{0}-{1}-{2}-{3}-{4}".format( + it.atype1, it.atype2, it.atype3, it.atype4, it.gromacs["func"] + ) + if not name in impropertypes: + impropertypes[name] = [] + impropertypes[name].append(it) print("Build impropertypes dictionary with {0} entries".format(len(impropertypes))) top.angletypes = scale_angles(mol, angletypes) @@ -137,12 +187,15 @@ def scale_impropers(mol, impropers): top.cmaptypes = [] atomtypes = {at.atype for at in top.atomtypes} -pairtypes = [pt for pt in top.pairtypes if (pt.atype1 in atomtypes) and (pt.atype2 in atomtypes)] +pairtypes = [ + pt for pt in top.pairtypes if (pt.atype1 in atomtypes) and (pt.atype2 in atomtypes) +] top.pairtypes = pairtypes # Remove non-default moleculetypes for k in top.dict_molname_mol.keys(): - if k in [molname]: continue - del top.dict_molname_mol[k] + if k in [molname]: + continue + del top.dict_molname_mol[k] top.write(args.output) diff --git a/scripts/gw-join_parts.py b/scripts/gw-join_parts.py index 8657d85f..5cf4da98 100755 --- a/scripts/gw-join_parts.py +++ b/scripts/gw-join_parts.py @@ -28,17 +28,22 @@ import os.path import logging -logger = logging.getLogger("gromacs.app") +logger = logging.getLogger("gromacs.app") if __name__ == "__main__": from optparse import OptionParser parser = OptionParser(usage=usage) - parser.add_option("-B", "--basedir", dest="basedir", default=os.curdir, - metavar="DIR", - help="find trajectories in DIR [%default]") + parser.add_option( + "-B", + "--basedir", + dest="basedir", + default=os.curdir, + metavar="DIR", + help="find trajectories in DIR [%default]", + ) opts, args = parser.parse_args() fulldir = "full" @@ -55,13 +60,24 @@ logger.info("Processing PREFIX=%(prefix)r...", vars()) try: - gromacs.cbook.cat(prefix=prefix, dirname=opts.basedir, - partsdir=partsdir, fulldir=fulldir, - resolve_multi="guess") + gromacs.cbook.cat( + prefix=prefix, + dirname=opts.basedir, + partsdir=partsdir, + fulldir=fulldir, + resolve_multi="guess", + ) except: logger.fatal("Something went wrong during joining (see below)") - logger.fatal("To recover, manually move the processed parts from %r back to %r", partsdir_path, opts.basedir) - logger.fatal("It is also recommended to delete %(fulldir_path)r and start from the beginning", vars()) + logger.fatal( + "To recover, manually move the processed parts from %r back to %r", + partsdir_path, + opts.basedir, + ) + logger.fatal( + "It is also recommended to delete %(fulldir_path)r and start from the beginning", + vars(), + ) logger.exception("See stacktrace for details") raise diff --git a/scripts/gw-merge_topologies.py b/scripts/gw-merge_topologies.py index ea38c4e9..119dbef7 100755 --- a/scripts/gw-merge_topologies.py +++ b/scripts/gw-merge_topologies.py @@ -68,92 +68,146 @@ def get_no_atoms(molecule): - atoms = molecule.sections['header'].sections['moleculetype'].sections['atoms'].data.atomnr - # The first atom has to be '1' - assert np.min(atoms) == 1 - # Check if atomnumbering is contigous - assert not (range(np.min(atoms), np.max(atoms)+1) - atoms).all() - return len(atoms) - + atoms = ( + molecule.sections["header"] + .sections["moleculetype"] + .sections["atoms"] + .data.atomnr + ) + # The first atom has to be '1' + assert np.min(atoms) == 1 + # Check if atomnumbering is contigous + assert not (range(np.min(atoms), np.max(atoms) + 1) - atoms).all() + return len(atoms) + def add_comment(molecule, blocks, comment): - for block in blocks: - molecule.sections['header'].sections['moleculetype'].sections[block].data.comment[0] += comment + for block in blocks: + molecule.sections["header"].sections["moleculetype"].sections[ + block + ].data.comment[0] += comment + def merge_topologies(topol_list, topolout, blocks, name="MOL", dirty=True): - molecules = [] - for topol in topol_list: - molecule = gromacs.fileformats.itp.ITP(topol) - molecules.append(molecule) - - no_atoms = 0 - # - # * Shift all the atom numbering in all sections - # - for i, molecule in enumerate(molecules): - if dirty: - add_comment(molecule, blocks, " [from {0!s}]".format(topol_list[i])) - - # Adjust everything to the first molecule (i == 0), shift the atoms of the next molecules by this amount - if not i: - no_atoms = get_no_atoms(molecule) - continue - no_atoms0 = get_no_atoms(molecule) - # JD The setting could be generalized using :meth:`getattr()` but here, I've decided - # to keep some locality/redundancy, in case of a major blunder on my side. - molecule = molecule.sections['header'].sections['moleculetype'] - - molecule.sections['atoms'].data.atomnr += no_atoms - - molecule.sections['bonds'].data.ai += no_atoms - molecule.sections['bonds'].data.aj += no_atoms - - molecule.sections['angles'].data.ai += no_atoms - molecule.sections['angles'].data.aj += no_atoms - molecule.sections['angles'].data.ak += no_atoms - - molecule.sections['dihedrals'].data.ai += no_atoms - molecule.sections['dihedrals'].data.aj += no_atoms - molecule.sections['dihedrals'].data.ak += no_atoms - molecule.sections['dihedrals'].data.al += no_atoms - - molecule.sections['pairs'].data.ai += no_atoms - molecule.sections['pairs'].data.aj += no_atoms - - no_atoms += no_atoms0 - # - # * Merege all the topology sections.data - # - atoms = np.concatenate([m.sections['header'].sections['moleculetype'].sections['atoms'].data for m in molecules]) - bonds = np.concatenate([m.sections['header'].sections['moleculetype'].sections['bonds'].data for m in molecules]) - angle = np.concatenate([m.sections['header'].sections['moleculetype'].sections['angles'].data for m in molecules]) - dihed = np.concatenate([m.sections['header'].sections['moleculetype'].sections['dihedrals'].data for m in molecules]) - pairs = np.concatenate([m.sections['header'].sections['moleculetype'].sections['pairs'].data for m in molecules]) - - mol = molecules[0] - molecule = mol.sections['header'].sections['moleculetype'] - - molecule.sections['atoms'].set_data(atoms) - molecule.sections['bonds'].set_data(bonds) - molecule.sections['angles'].set_data(angle) - molecule.sections['dihedrals'].set_data(dihed) - molecule.sections['pairs'].set_data(pairs) - - molecule.data['name'] = name - - mol.write(topolout) + molecules = [] + for topol in topol_list: + molecule = gromacs.fileformats.itp.ITP(topol) + molecules.append(molecule) + + no_atoms = 0 + # + # * Shift all the atom numbering in all sections + # + for i, molecule in enumerate(molecules): + if dirty: + add_comment(molecule, blocks, " [from {0!s}]".format(topol_list[i])) + + # Adjust everything to the first molecule (i == 0), shift the atoms of the next molecules by this amount + if not i: + no_atoms = get_no_atoms(molecule) + continue + no_atoms0 = get_no_atoms(molecule) + # JD The setting could be generalized using :meth:`getattr()` but here, I've decided + # to keep some locality/redundancy, in case of a major blunder on my side. + molecule = molecule.sections["header"].sections["moleculetype"] + + molecule.sections["atoms"].data.atomnr += no_atoms + + molecule.sections["bonds"].data.ai += no_atoms + molecule.sections["bonds"].data.aj += no_atoms + + molecule.sections["angles"].data.ai += no_atoms + molecule.sections["angles"].data.aj += no_atoms + molecule.sections["angles"].data.ak += no_atoms + + molecule.sections["dihedrals"].data.ai += no_atoms + molecule.sections["dihedrals"].data.aj += no_atoms + molecule.sections["dihedrals"].data.ak += no_atoms + molecule.sections["dihedrals"].data.al += no_atoms + + molecule.sections["pairs"].data.ai += no_atoms + molecule.sections["pairs"].data.aj += no_atoms + + no_atoms += no_atoms0 + # + # * Merege all the topology sections.data + # + atoms = np.concatenate( + [ + m.sections["header"].sections["moleculetype"].sections["atoms"].data + for m in molecules + ] + ) + bonds = np.concatenate( + [ + m.sections["header"].sections["moleculetype"].sections["bonds"].data + for m in molecules + ] + ) + angle = np.concatenate( + [ + m.sections["header"].sections["moleculetype"].sections["angles"].data + for m in molecules + ] + ) + dihed = np.concatenate( + [ + m.sections["header"].sections["moleculetype"].sections["dihedrals"].data + for m in molecules + ] + ) + pairs = np.concatenate( + [ + m.sections["header"].sections["moleculetype"].sections["pairs"].data + for m in molecules + ] + ) + + mol = molecules[0] + molecule = mol.sections["header"].sections["moleculetype"] + + molecule.sections["atoms"].set_data(atoms) + molecule.sections["bonds"].set_data(bonds) + molecule.sections["angles"].set_data(angle) + molecule.sections["dihedrals"].set_data(dihed) + molecule.sections["pairs"].set_data(pairs) + + molecule.data["name"] = name + + mol.write(topolout) -def main(): - parser = argparse.ArgumentParser(description=desc, formatter_class=RawTextHelpFormatter) - parser.add_argument('-n', dest='name', metavar="MOL", default="MOL", help="Molecule name") - parser.add_argument('-d', dest='dirty',metavar="dirty", type=bool, default=True, help="Comment first atoms/terms with the input file they come from") - parser.add_argument('-p', dest='topol', metavar="topol.itp",default=["topol.itp"], nargs='*') - parser.add_argument('-b', dest='blocks', metavar="block", default=['atoms', 'bonds', 'angles', 'dihedrals', 'pairs'], nargs='*') - parser.add_argument('-po', dest='topolout', metavar="topolout.itp", default="topolout.itp") - args = parser.parse_args() - - merge_topologies(args.topol, args.topolout, args.blocks, args.name, args.dirty) +def main(): + parser = argparse.ArgumentParser( + description=desc, formatter_class=RawTextHelpFormatter + ) + parser.add_argument( + "-n", dest="name", metavar="MOL", default="MOL", help="Molecule name" + ) + parser.add_argument( + "-d", + dest="dirty", + metavar="dirty", + type=bool, + default=True, + help="Comment first atoms/terms with the input file they come from", + ) + parser.add_argument( + "-p", dest="topol", metavar="topol.itp", default=["topol.itp"], nargs="*" + ) + parser.add_argument( + "-b", + dest="blocks", + metavar="block", + default=["atoms", "bonds", "angles", "dihedrals", "pairs"], + nargs="*", + ) + parser.add_argument( + "-po", dest="topolout", metavar="topolout.itp", default="topolout.itp" + ) + args = parser.parse_args() + + merge_topologies(args.topol, args.topolout, args.blocks, args.name, args.dirty) if __name__ == "__main__": diff --git a/scripts/gw-partial_tempering.py b/scripts/gw-partial_tempering.py index 7396dd24..9e4b7d8e 100755 --- a/scripts/gw-partial_tempering.py +++ b/scripts/gw-partial_tempering.py @@ -1,4 +1,3 @@ - # GromacsWrapper: gw-partial_tempering.py # Copyright (c) 2009 Oliver Beckstein # Released under the GNU Public License 3 (or higher, your choice) @@ -7,20 +6,38 @@ from gromacs.scaling import partial_tempering import argparse -description=""" +description = """ Modify gromacs processed topology (processed.top, generated by `grompp -pp` command) for running with solute tempering replica exchange (REST2). """ + def parse_args(): - parser = argparse.ArgumentParser(description=description) - parser.add_argument("--scale_protein", type=float, help="scale protein interactions by this scaling factor (0-1)") - parser.add_argument("--scale_lipids", type=float, help="scale lipid interactions by this scaling factor (0-1)") - parser.add_argument("--scale_protein_lipids", type=float, help="scale lipid-lipid interactions by this scaling factor (0-1)") - parser.add_argument("input", help="input topology (processed.top)") - parser.add_argument("output", help="output topology") - parser.add_argument("--banned_lines", default="", help="line numbers of dihedrals/impropers that one wishes to exclude from scaling") - return parser.parse_args() + parser = argparse.ArgumentParser(description=description) + parser.add_argument( + "--scale_protein", + type=float, + help="scale protein interactions by this scaling factor (0-1)", + ) + parser.add_argument( + "--scale_lipids", + type=float, + help="scale lipid interactions by this scaling factor (0-1)", + ) + parser.add_argument( + "--scale_protein_lipids", + type=float, + help="scale lipid-lipid interactions by this scaling factor (0-1)", + ) + parser.add_argument("input", help="input topology (processed.top)") + parser.add_argument("output", help="output topology") + parser.add_argument( + "--banned_lines", + default="", + help="line numbers of dihedrals/impropers that one wishes to exclude from scaling", + ) + return parser.parse_args() + args = parse_args() -partial_tempering(args) \ No newline at end of file +partial_tempering(args) diff --git a/setup.py b/setup.py index 175dd7b5..e036acdb 100644 --- a/setup.py +++ b/setup.py @@ -13,59 +13,67 @@ long_description = readme.read() -setup(name="GromacsWrapper", - version=versioneer.get_version(), - cmdclass=versioneer.get_cmdclass(), - description="A Python wrapper around the Gromacs tools.", - long_description=long_description, - long_description_content_type="text/x-rst", - author="Oliver Beckstein", - author_email="orbeckst@gmail.com", - license="GPLv3", - url="https://github.com/Becksteinlab/GromacsWrapper", - download_url="https://github.com/Becksteinlab/GromacsWrapper/downloads", - keywords="science Gromacs analysis 'molecular dynamics'", - classifiers=[ - 'Development Status :: 4 - Beta', - 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: GNU General Public License (GPL)', - 'License :: OSI Approved :: BSD License', - 'Operating System :: POSIX', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: Microsoft :: Windows ', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - 'Programming Language :: Python :: 3.11', - 'Topic :: Scientific/Engineering :: Bio-Informatics', - 'Topic :: Scientific/Engineering :: Chemistry', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - packages=find_packages( - exclude=['scripts', 'tests', 'tests.*', 'extras', 'doc/examples']), - scripts=[ - 'scripts/gw-join_parts.py', - 'scripts/gw-merge_topologies.py', - 'scripts/gw-forcefield.py', - 'scripts/gw-partial_tempering.py', - ], - package_data={'gromacs': ['templates/*.sge', 'templates/*.pbs', # template files - 'templates/*.ll', 'templates/*.sh', - 'templates/*.mdp', 'templates/*.cfg' - ], - }, - install_requires=['numpy>=1.0', - 'six', # towards py 3 compatibility - 'numkit', # numerical helpers - 'matplotlib', - ], - tests_require=['pytest', 'numpy>=1.0', 'pandas>=0.17'], - zip_safe=True, - ) +setup( + name="GromacsWrapper", + version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), + description="A Python wrapper around the Gromacs tools.", + long_description=long_description, + long_description_content_type="text/x-rst", + author="Oliver Beckstein", + author_email="orbeckst@gmail.com", + license="GPLv3", + url="https://github.com/Becksteinlab/GromacsWrapper", + download_url="https://github.com/Becksteinlab/GromacsWrapper/downloads", + keywords="science Gromacs analysis 'molecular dynamics'", + classifiers=[ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: GNU General Public License (GPL)", + "License :: OSI Approved :: BSD License", + "Operating System :: POSIX", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows ", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Scientific/Engineering :: Bio-Informatics", + "Topic :: Scientific/Engineering :: Chemistry", + "Topic :: Software Development :: Libraries :: Python Modules", + ], + packages=find_packages( + exclude=["scripts", "tests", "tests.*", "extras", "doc/examples"] + ), + scripts=[ + "scripts/gw-join_parts.py", + "scripts/gw-merge_topologies.py", + "scripts/gw-forcefield.py", + "scripts/gw-partial_tempering.py", + ], + package_data={ + "gromacs": [ + "templates/*.sge", + "templates/*.pbs", # template files + "templates/*.ll", + "templates/*.sh", + "templates/*.mdp", + "templates/*.cfg", + ], + }, + install_requires=[ + "numpy>=1.0", + "six", # towards py 3 compatibility + "numkit", # numerical helpers + "matplotlib", + ], + tests_require=["pytest", "numpy>=1.0", "pandas>=0.17"], + zip_safe=True, +) diff --git a/tests/__init__.py b/tests/__init__.py index a8a6da4f..8515afc1 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -8,4 +8,5 @@ # # The following is left because (1) harmless and (2) documentation import matplotlib -matplotlib.use('agg') + +matplotlib.use("agg") diff --git a/tests/conftest.py b/tests/conftest.py index 1d701346..37a06690 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -23,26 +23,25 @@ def pytest_addoption(parser): """Add options to control Gromacs performance settings""" - group = parser.getgroup('gmx') + group = parser.getgroup("gmx") group.addoption( - "--low-performance", - action="store_true", - dest='low_performance', - help='Instruct Gromacs to run in low performance ' - 'mode (as defined in tests)', + "--low-performance", + action="store_true", + dest="low_performance", + help="Instruct Gromacs to run in low performance " "mode (as defined in tests)", ) group.addoption( - '--no-append-suffix', - action='store_false', - dest='append_suffix', - help='modify config file to set append_suffix to "no"' + "--no-append-suffix", + action="store_false", + dest="append_suffix", + help='modify config file to set append_suffix to "no"', ) group.addoption( - '--link-gmx-mpi', - action='store_true', - dest='link_gmx_mpi', - help='link the gmx executable to the home directory as gmx_mpi and ' - 'add this path as a "tools" in the config file' + "--link-gmx-mpi", + action="store_true", + dest="link_gmx_mpi", + help="link the gmx executable to the home directory as gmx_mpi and " + 'add this path as a "tools" in the config file', ) @@ -52,10 +51,10 @@ def low_performance(request): def gmx_mpi_linked(link): - gmx_exe = distutils.spawn.find_executable('gmx') - gmx_mpi = Path('~/gmx_mpi').expanduser() + gmx_exe = distutils.spawn.find_executable("gmx") + gmx_mpi = Path("~/gmx_mpi").expanduser() if not link: - return '' + return "" else: gmx_mpi.symlink_to(gmx_exe) return str(gmx_mpi.expanduser()) @@ -63,32 +62,32 @@ def gmx_mpi_linked(link): @pytest.fixture def modified_config(request): - link_gmx_mpi = request.config.getoption('link_gmx_mpi') - tools = str(Path('~/gmx_mpi').expanduser()) if link_gmx_mpi else '' - append_suffix = 'yes' if request.config.getoption('append_suffix') else 'no' + link_gmx_mpi = request.config.getoption("link_gmx_mpi") + tools = str(Path("~/gmx_mpi").expanduser()) if link_gmx_mpi else "" + append_suffix = "yes" if request.config.getoption("append_suffix") else "no" return tools, append_suffix, Path -path_config = Path('~/.gromacswrapper.cfg').expanduser() +path_config = Path("~/.gromacswrapper.cfg").expanduser() gw_config = ConfigParser() if path_config.exists(): gw_config.read(str(path_config.resolve())) config_existed = True else: - gw_config.read('gromacs/templates/gromacswrapper.cfg') + gw_config.read("gromacs/templates/gromacswrapper.cfg") config_existed = False -config_backup = path_config.with_suffix('.bak') +config_backup = path_config.with_suffix(".bak") def pytest_configure(config): - link_gmx_mpi = config.getoption('link_gmx_mpi') - append_suffix = 'yes' if config.getoption('append_suffix') else 'no' + link_gmx_mpi = config.getoption("link_gmx_mpi") + append_suffix = "yes" if config.getoption("append_suffix") else "no" if config_existed: shutil.copy(str(path_config), str(config_backup)) tools = gmx_mpi_linked(link_gmx_mpi) - gw_config.set('Gromacs', 'tools', tools) - gw_config.set('Gromacs', 'append_suffix', append_suffix) - with open(str(path_config), 'w') as config_file: + gw_config.set("Gromacs", "tools", tools) + gw_config.set("Gromacs", "append_suffix", append_suffix) + with open(str(path_config), "w") as config_file: gw_config.write(config_file) @@ -98,5 +97,5 @@ def pytest_unconfigure(config): else: os.remove(str(path_config)) if config.option.link_gmx_mpi: - gmx_mpi = Path('~/gmx_mpi').expanduser() + gmx_mpi = Path("~/gmx_mpi").expanduser() gmx_mpi.unlink() diff --git a/tests/datafiles.py b/tests/datafiles.py index 3aa65d12..0642e3a8 100644 --- a/tests/datafiles.py +++ b/tests/datafiles.py @@ -19,7 +19,6 @@ import os.path from pkg_resources import resource_filename + def datafile(name): return resource_filename(__name__, os.path.join("data", name)) - - diff --git a/tests/fileformats/test_convert.py b/tests/fileformats/test_convert.py index 1e4ec6b9..31a5a3b8 100644 --- a/tests/fileformats/test_convert.py +++ b/tests/fileformats/test_convert.py @@ -13,57 +13,58 @@ @pytest.mark.parametrize( - 's,expected', - [(100, 100), - ("Jabberwock", u"Jabberwock"), - (u"Ångström", u"Ångström"), - ] + "s,expected", + [ + (100, 100), + ("Jabberwock", "Jabberwock"), + ("Ångström", "Ångström"), + ], ) def test_to_unicode(s, expected): output = convert.to_unicode(s) assert output == expected + class TestAutoconverter(object): def _convert(self, s, **kwargs): ac = convert.Autoconverter(**kwargs) assert ac.active is True return ac.convert(s) - @pytest.mark.parametrize( - "s,expected", [ - ('foo bar 22 boing ---', ('foo', 'bar', 22, 'boing', None)), - ('1 2 3 4', (1, 2, 3, 4)), - ('1 2 3 4', (1, 2, 3, 4)), - ('True x X yes Present', (True, True, True, True, True)), - ('False no - None none', (False, False, False, False, False)) + "s,expected", + [ + ("foo bar 22 boing ---", ("foo", "bar", 22, "boing", None)), + ("1 2 3 4", (1, 2, 3, 4)), + ("1 2 3 4", (1, 2, 3, 4)), + ("True x X yes Present", (True, True, True, True, True)), + ("False no - None none", (False, False, False, False, False)), ], ) - @pytest.mark.parametrize('sep', (True, None)) + @pytest.mark.parametrize("sep", (True, None)) def test_convert_default(self, s, expected, sep): output = self._convert(s, sep=sep) assert_equal(output, expected) @pytest.mark.parametrize( - "s,expected", [ - ('1,2,3,4', (1, 2, 3, 4)), - ('1 2,3,4', ('1 2', 3, 4)), - ] + "s,expected", + [ + ("1,2,3,4", (1, 2, 3, 4)), + ("1 2,3,4", ("1 2", 3, 4)), + ], ) - def test_convert_default_sep(self, s, expected, sep=','): + def test_convert_default_sep(self, s, expected, sep=","): output = self._convert(s, sep=sep) assert_equal(output, expected) - @pytest.mark.parametrize( - "s,expected", [ - ('2.71213 3.14', (2.71213, 3.14)), - ('1000 -234 987654', (1000, -234, 987654)), - ] + "s,expected", + [ + ("2.71213 3.14", (2.71213, 3.14)), + ("1000 -234 987654", (1000, -234, 987654)), + ], ) - @pytest.mark.parametrize('sep', (True, None)) + @pytest.mark.parametrize("sep", (True, None)) def test_convert_numbers(self, s, expected, sep): output = self._convert(s, sep=sep) assert_almost_equal(output, expected) - - diff --git a/tests/fileformats/test_mdp.py b/tests/fileformats/test_mdp.py index dafc117f..afd76ba7 100644 --- a/tests/fileformats/test_mdp.py +++ b/tests/fileformats/test_mdp.py @@ -9,16 +9,18 @@ from ..datafiles import datafile + @pytest.fixture( - params=['original', 'written', 'no_autoconvert'], + params=["original", "written", "no_autoconvert"], ) def CUSTOM_EM_MDP(request, tmpdir): - autoconvert = not request.param == 'no_autoconvert' - mdp = gromacs.fileformats.mdp.MDP(datafile('custom_em.mdp'), - autoconvert=autoconvert) - if request.param == 'written': + autoconvert = not request.param == "no_autoconvert" + mdp = gromacs.fileformats.mdp.MDP( + datafile("custom_em.mdp"), autoconvert=autoconvert + ) + if request.param == "written": # to check that written mdp has same data - out = str(tmpdir.join('out.mdp')) + out = str(tmpdir.join("out.mdp")) mdp.write(out) mdp = gromacs.fileformats.mdp.MDP(out) return mdp, autoconvert @@ -29,80 +31,85 @@ def test_values(self, CUSTOM_EM_MDP): mdp, autoconvert = CUSTOM_EM_MDP if autoconvert: + def conv(val): return val + else: + def conv(val): return str(val) if autoconvert: - assert_equal(mdp['include'], ['-I.', '-I..', '-I../top']) - assert_equal(mdp['define'], '-DPOSRES') + assert_equal(mdp["include"], ["-I.", "-I..", "-I../top"]) + assert_equal(mdp["define"], "-DPOSRES") else: - assert mdp['include'] == '-I. -I.. -I../top' - assert mdp['define'] == '-DPOSRES' - assert mdp['integrator'] == 'cg' - assert mdp['emtol'] == conv(500) - assert mdp['emstep'] == conv(0.01) - assert mdp['nsteps'] == conv(1000) - assert mdp['nstcgsteep'] == conv(100) - assert mdp['constraints'] == 'none' - assert mdp['nstcomm'] == conv(1) - assert mdp['cutoff-scheme'] == 'Verlet' - assert mdp['vdwtype'] == 'cutoff' - assert mdp['coulombtype'] == 'PME' - assert mdp['ns_type'] == 'grid' - assert mdp['rlist'] == conv(1.0) - assert mdp['rcoulomb'] == conv(1.0) - assert mdp['rvdw'] == conv(1.0) - assert mdp['rvdw-switch'] == conv(0.8) - assert mdp['Tcoupl'] == 'no' - assert mdp['Pcoupl'] == 'no' - assert mdp['gen_vel'] == 'no' - assert mdp['nstxout'] == conv(0) + assert mdp["include"] == "-I. -I.. -I../top" + assert mdp["define"] == "-DPOSRES" + assert mdp["integrator"] == "cg" + assert mdp["emtol"] == conv(500) + assert mdp["emstep"] == conv(0.01) + assert mdp["nsteps"] == conv(1000) + assert mdp["nstcgsteep"] == conv(100) + assert mdp["constraints"] == "none" + assert mdp["nstcomm"] == conv(1) + assert mdp["cutoff-scheme"] == "Verlet" + assert mdp["vdwtype"] == "cutoff" + assert mdp["coulombtype"] == "PME" + assert mdp["ns_type"] == "grid" + assert mdp["rlist"] == conv(1.0) + assert mdp["rcoulomb"] == conv(1.0) + assert mdp["rvdw"] == conv(1.0) + assert mdp["rvdw-switch"] == conv(0.8) + assert mdp["Tcoupl"] == "no" + assert mdp["Pcoupl"] == "no" + assert mdp["gen_vel"] == "no" + assert mdp["nstxout"] == conv(0) def test_comments(self, CUSTOM_EM_MDP): mdp, _ = CUSTOM_EM_MDP - assert mdp['C0001'] == 'custom EM' - assert sum(1 for k in mdp if k.startswith('C0')) == 1 + assert mdp["C0001"] == "custom EM" + assert sum(1 for k in mdp if k.startswith("C0")) == 1 def test_blank_lines(self, CUSTOM_EM_MDP): mdp, _ = CUSTOM_EM_MDP - assert sum(1 for k in mdp if k.startswith('B0')) == 6 + assert sum(1 for k in mdp if k.startswith("B0")) == 6 def test_no_filename(self, tmpdir): mdp = gromacs.fileformats.mdp.MDP() - mdp['define'] = ['-DPOSRES', '-DTHIS'] - mdp['vdwtype'] = 'cutoff' - mdp['nsteps'] = 1234 + mdp["define"] = ["-DPOSRES", "-DTHIS"] + mdp["vdwtype"] = "cutoff" + mdp["nsteps"] = 1234 - out = str(tmpdir.join('out.mdp')) + out = str(tmpdir.join("out.mdp")) mdp.write(out) back = gromacs.fileformats.mdp.MDP(out) - assert_equal(back['define'], ['-DPOSRES', '-DTHIS']) - assert back['vdwtype'] == 'cutoff' - assert back['nsteps'] == 1234 + assert_equal(back["define"], ["-DPOSRES", "-DTHIS"]) + assert back["vdwtype"] == "cutoff" + assert back["nsteps"] == 1234 + @pytest.fixture def NONSENSE_MDP(tmpdir): - outfile = str(tmpdir.join('nonsense.mdp')) + outfile = str(tmpdir.join("nonsense.mdp")) - with open(datafile('custom_em.mdp'), 'r') as infile: + with open(datafile("custom_em.mdp"), "r") as infile: data = infile.read() - data += 'errors: plenty\n' + data += "errors: plenty\n" - with open(outfile, 'w') as out: + with open(outfile, "w") as out: out.write(data) return outfile def test_bad_mdp(NONSENSE_MDP): - with pytest.raises(gromacs.ParseError, - match="unknown line in mdp file, 'errors: plenty'"): + with pytest.raises( + gromacs.ParseError, match="unknown line in mdp file, 'errors: plenty'" + ): gromacs.fileformats.mdp.MDP(NONSENSE_MDP) diff --git a/tests/fileformats/test_ndx.py b/tests/fileformats/test_ndx.py index 333b615d..2b82811a 100644 --- a/tests/fileformats/test_ndx.py +++ b/tests/fileformats/test_ndx.py @@ -10,20 +10,18 @@ from ..datafiles import datafile -@pytest.fixture( - params=['original', 'nofilename', 'written'] -) +@pytest.fixture(params=["original", "nofilename", "written"]) def SIMPLE_NDX(request, tmpdir): - ndx = gromacs.fileformats.ndx.NDX(datafile('simple.ndx')) + ndx = gromacs.fileformats.ndx.NDX(datafile("simple.ndx")) - if request.param == 'written': - out = str(tmpdir.join('out.ndx')) + if request.param == "written": + out = str(tmpdir.join("out.ndx")) ndx.write(out) ndx = gromacs.fileformats.ndx.NDX(out) - elif request.param == 'nofilename': + elif request.param == "nofilename": ndx = gromacs.fileformats.ndx.NDX() - ndx.read(datafile('simple.ndx')) + ndx.read(datafile("simple.ndx")) return ndx @@ -31,18 +29,18 @@ def SIMPLE_NDX(request, tmpdir): def test_read(SIMPLE_NDX): ndx = SIMPLE_NDX - assert_equal(ndx['Oxygen'], [1, 4, 7]) - assert_equal(ndx['Hydrogen'], [2, 3, 5, 6, 8, 9]) + assert_equal(ndx["Oxygen"], [1, 4, 7]) + assert_equal(ndx["Hydrogen"], [2, 3, 5, 6, 8, 9]) def test_get(SIMPLE_NDX): - assert_equal(SIMPLE_NDX.get('Oxygen'), [1, 4, 7]) + assert_equal(SIMPLE_NDX.get("Oxygen"), [1, 4, 7]) def test_set(SIMPLE_NDX): - SIMPLE_NDX['Nitrogen'] = [10, 11, 12] + SIMPLE_NDX["Nitrogen"] = [10, 11, 12] - assert_equal(SIMPLE_NDX['Nitrogen'], [10, 11, 12]) + assert_equal(SIMPLE_NDX["Nitrogen"], [10, 11, 12]) def test_size(SIMPLE_NDX): @@ -50,8 +48,8 @@ def test_size(SIMPLE_NDX): def test_sizes(SIMPLE_NDX): - assert SIMPLE_NDX.sizes == {'Oxygen': 3, 'Hydrogen': 6} + assert SIMPLE_NDX.sizes == {"Oxygen": 3, "Hydrogen": 6} def test_groups(SIMPLE_NDX): - assert list(SIMPLE_NDX.groups) == ['Oxygen', 'Hydrogen'] + assert list(SIMPLE_NDX.groups) == ["Oxygen", "Hydrogen"] diff --git a/tests/fileformats/test_xpm.py b/tests/fileformats/test_xpm.py index 1db24a0c..dc5d1ba5 100644 --- a/tests/fileformats/test_xpm.py +++ b/tests/fileformats/test_xpm.py @@ -10,24 +10,28 @@ from ..datafiles import datafile + @pytest.fixture def ssfile(): return datafile("fileformats/ss.xpm.bz2") + @pytest.fixture def xpm(ssfile): return XPM(filename=ssfile) + @pytest.fixture def xpm_df(xpm): return xpm.to_df() + class TestXPM(object): def _run_tests(self, x): assert_equal(x.array.shape, (500, 769)) assert_equal(x.xvalues, np.arange(0, 500)) assert_equal(x.yvalues, np.arange(1, 770)) - assert_equal((x.array == 'A-Helix').sum(), 292829) + assert_equal((x.array == "A-Helix").sum(), 292829) def test_constructor(self, xpm): self._run_tests(xpm) @@ -44,6 +48,6 @@ def test_to_pd(self, xpm_df): assert_equal(xpm_df.shape, (500, 770)) def test_to_pd_types(self, xpm_df): - time = xpm_df['Time'] + time = xpm_df["Time"] assert len(time) == 500 assert time.dtype == np.dtype("int64") # true for this file diff --git a/tests/fileformats/test_xvg.py b/tests/fileformats/test_xvg.py index 60688e97..ec2ad17e 100644 --- a/tests/fileformats/test_xvg.py +++ b/tests/fileformats/test_xvg.py @@ -9,37 +9,38 @@ import gromacs.fileformats.xvg from gromacs.formats import XVG + @pytest.fixture(scope="module") def data(): data = np.random.normal(loc=2.1, scale=0.5, size=(6, 1000)) data[0] = 0.1 * np.arange(data.shape[1]) return data + @pytest.fixture(scope="module") -def correldata(omega=2*np.pi/5): +def correldata(omega=2 * np.pi / 5): t = np.linspace(-100, 100, 10000) - Y1 = 1 * np.sin(omega*t) - 4 - Y2 = -0.5 * np.sin(0.3*omega*t) + Y1 = 1 * np.sin(omega * t) - 4 + Y2 = -0.5 * np.sin(0.3 * omega * t) DY = np.random.normal(scale=2, size=(2, len(t))) return np.vstack([t, Y1 + DY[0], Y2 + DY[1]]) + class TestXVG_array(object): @pytest.fixture def xvg(self, data): return XVG(array=data.copy(), names="t,a,b,c,d,e") def test_names(self, xvg): - assert_equal(xvg.names, ['t', 'a', 'b', 'c', 'd', 'e']) + assert_equal(xvg.names, ["t", "a", "b", "c", "d", "e"]) def test_array(self, xvg, data): assert_equal(xvg.array.shape, data.shape) assert_almost_equal(xvg.array, data) - @pytest.mark.parametrize("name", - ("mean", "max", "min", "std")) + @pytest.mark.parametrize("name", ("mean", "max", "min", "std")) def test_props(self, xvg, data, name): - assert_almost_equal(getattr(xvg, name), - getattr(data[1:], name)(axis=1)) + assert_almost_equal(getattr(xvg, name), getattr(data[1:], name)(axis=1)) def test_write_read(self, xvg, tmpdir): fname = "random.xvg" @@ -61,9 +62,10 @@ def test_correl(self, correldata): assert_equal(sigma.shape, (2,)) assert_equal(tc.shape, (2,)) - @pytest.mark.parametrize('method', ('mean', 'circmean', 'min', 'max', - 'rms', 'percentile', 'smooth', - 'error')) + @pytest.mark.parametrize( + "method", + ("mean", "circmean", "min", "max", "rms", "percentile", "smooth", "error"), + ) def test_decimate(self, correldata, method, maxpoints=100): xvg = XVG(array=correldata) data = xvg.array @@ -87,10 +89,10 @@ def test_break_array(): angles = np.pi * np.array([-1.9, -1, -1, -0.5, 0, 0.9, 1.5, 2, -2, -1.4]) expected = np.pi * np.array([-1.9, -1, -1, -0.5, 0, 0.9, 1.5, 2, np.NAN, -2, -1.4]) other = np.ones_like(angles) - ma, mother = gromacs.fileformats.xvg.break_array(angles, - threshold=np.pi, other=other) + ma, mother = gromacs.fileformats.xvg.break_array( + angles, threshold=np.pi, other=other + ) assert isinstance(ma, np.ma.core.MaskedArray) assert isinstance(mother, np.ma.core.MaskedArray) assert_almost_equal(ma, expected) assert len(ma) == len(mother) - diff --git a/tests/fileformats/top/test_amber03star.py b/tests/fileformats/top/test_amber03star.py index 238deabc..7e52d86f 100644 --- a/tests/fileformats/top/test_amber03star.py +++ b/tests/fileformats/top/test_amber03star.py @@ -12,9 +12,25 @@ from .top import TopologyTest from ...datafiles import datafile -@pytest.mark.xfail(gromacs.release().startswith("2022"), - reason="issue https://github.com/Becksteinlab/GromacsWrapper/issues/236") + +@pytest.mark.xfail( + gromacs.release().startswith("2022"), + reason="issue https://github.com/Becksteinlab/GromacsWrapper/issues/236", +) class TestAmber03star(TopologyTest): - processed = datafile('fileformats/top/amber03star/processed.top') - conf = datafile('fileformats/top/amber03star/conf.gro') - molecules = ['Protein', 'SOL', 'IB+', 'CA', 'CL', 'NA', 'MG', 'K', 'RB', 'CS', 'LI', 'ZN'] + processed = datafile("fileformats/top/amber03star/processed.top") + conf = datafile("fileformats/top/amber03star/conf.gro") + molecules = [ + "Protein", + "SOL", + "IB+", + "CA", + "CL", + "NA", + "MG", + "K", + "RB", + "CS", + "LI", + "ZN", + ] diff --git a/tests/fileformats/top/test_amber03w.py b/tests/fileformats/top/test_amber03w.py index 36ce4286..93effe6f 100644 --- a/tests/fileformats/top/test_amber03w.py +++ b/tests/fileformats/top/test_amber03w.py @@ -12,9 +12,25 @@ from .top import TopologyTest from ...datafiles import datafile -@pytest.mark.xfail(gromacs.release().startswith("2022"), - reason="issue https://github.com/Becksteinlab/GromacsWrapper/issues/236") + +@pytest.mark.xfail( + gromacs.release().startswith("2022"), + reason="issue https://github.com/Becksteinlab/GromacsWrapper/issues/236", +) class TestAmber03w(TopologyTest): - processed = datafile('fileformats/top/amber03w/processed.top') - conf = datafile('fileformats/top/amber03w/conf.gro') - molecules = ['Protein_chain_A', 'SOL', 'IB+', 'CA', 'CL', 'NA', 'MG', 'K', 'RB', 'CS', 'LI', 'ZN'] + processed = datafile("fileformats/top/amber03w/processed.top") + conf = datafile("fileformats/top/amber03w/conf.gro") + molecules = [ + "Protein_chain_A", + "SOL", + "IB+", + "CA", + "CL", + "NA", + "MG", + "K", + "RB", + "CS", + "LI", + "ZN", + ] diff --git a/tests/fileformats/top/test_charmm22.py b/tests/fileformats/top/test_charmm22.py index fe4e79e6..0108d0cd 100644 --- a/tests/fileformats/top/test_charmm22.py +++ b/tests/fileformats/top/test_charmm22.py @@ -10,7 +10,8 @@ from .top import TopologyTest from ...datafiles import datafile + class TestCharmm22st(TopologyTest): - processed = datafile('fileformats/top/charmm22st/processed.top') - conf = datafile('fileformats/top/charmm22st/conf.gro') - molecules = ['SOL', 'Protein', 'Ion', 'Cal', 'Ces', 'CL', 'K', 'NA', 'ZN'] + processed = datafile("fileformats/top/charmm22st/processed.top") + conf = datafile("fileformats/top/charmm22st/conf.gro") + molecules = ["SOL", "Protein", "Ion", "Cal", "Ces", "CL", "K", "NA", "ZN"] diff --git a/tests/fileformats/top/top.py b/tests/fileformats/top/top.py index afeed389..4eef987e 100644 --- a/tests/fileformats/top/top.py +++ b/tests/fileformats/top/top.py @@ -20,181 +20,223 @@ from ...datafiles import datafile + def errmsg_helper(attr, attr1, attr2): - msg = ["attr = " + str(attr)] - for pair in zip(attr1, attr2): - if pair[0] == pair[1]: - continue - msg.append(str(pair)) - return "\n".join(msg) + msg = ["attr = " + str(attr)] + for pair in zip(attr1, attr2): + if pair[0] == pair[1]: + continue + msg.append(str(pair)) + return "\n".join(msg) + def grompp(f, c, p, prefix="topol", **kwargs): - s = prefix + '.tpr' - po = prefix + '.mdp' + s = prefix + ".tpr" + po = prefix + ".mdp" + + rc, output, junk = gromacs.grompp( + f=f, p=p, c=c, o=s, po=po, stdout=False, stderr=False, **kwargs + ) + assert rc == 0, "grompp -o {0} -po {1} -f {2} -c {3} -p {4} failed".format( + s, po, f, c, p + ) + return s - rc, output, junk = gromacs.grompp(f=f, p=p, c=c, o=s, po=po, stdout=False, stderr=False, - **kwargs) - assert rc == 0, \ - "grompp -o {0} -po {1} -f {2} -c {3} -p {4} failed".format(s, po, f, c, p) - return s def mdrun(s, prefix, nt=0): - o = prefix + '.trr' - rc, output, junk = gromacs.mdrun(v=True, s=s, o=o, stdout=False, stderr=False, nt=nt) - assert rc == 0, "mdrun failed" - return o + o = prefix + ".trr" + rc, output, junk = gromacs.mdrun( + v=True, s=s, o=o, stdout=False, stderr=False, nt=nt + ) + assert rc == 0, "mdrun failed" + return o -def rerun_energy(s, o, prefix, nt=0): - e = prefix + '.edr' - rc, output, junk = gromacs.mdrun(v=True, s=s, rerun=o, e=e, stdout=False, stderr=False, nt=nt) - assert rc == 0, "mdrun failed" - xvg = prefix + '.xvg' - rc, output, junk = gromacs.g_energy(f=e, o=xvg, - input=("Proper-Dih.", "Improper-Dih.", "CMAP-Dih.", - "LJ-14", "Coulomb-14", "LJ-(SR)", "Coulomb-(SR)", - "Coul.-recip.", "Potential"), stdout=False, stderr=False) - assert rc == 0, "g_energy failed" +def rerun_energy(s, o, prefix, nt=0): + e = prefix + ".edr" + rc, output, junk = gromacs.mdrun( + v=True, s=s, rerun=o, e=e, stdout=False, stderr=False, nt=nt + ) + assert rc == 0, "mdrun failed" + + xvg = prefix + ".xvg" + rc, output, junk = gromacs.g_energy( + f=e, + o=xvg, + input=( + "Proper-Dih.", + "Improper-Dih.", + "CMAP-Dih.", + "LJ-14", + "Coulomb-14", + "LJ-(SR)", + "Coulomb-(SR)", + "Coul.-recip.", + "Potential", + ), + stdout=False, + stderr=False, + ) + assert rc == 0, "g_energy failed" + + return XVG(xvg).to_df() - return XVG(xvg).to_df() class TopologyTest(object): - mdp = datafile('fileformats/top/grompp.mdp') - - def test_basic(self): - path = self.processed - top = TOP(path) - assert list(top.dict_molname_mol.keys()) == self.molecules - - def test_equal(self): - """ - Load the same topology twice and check if __eq__ comparisions work - """ - path = self.processed - - top1 = TOP(path) - top2 = TOP(path) - - attrs1 = [section for section in top1.found_sections if "types" in section] - attrs2 = [section for section in top1.found_sections if "types" in section] - - assert attrs1 == attrs2, "sections are not equal" - - for attr in attrs1: - assert getattr(top1, attr) == getattr(top2, attr), \ - "{0} not identical".format(attr) - - # def test_parameter_types(self): - # """Test if all the parameter types are the same across two topologies - # """ - # pass - - # def test_parameters(self): - # """Test if per-molecule parameters and parameter type assignments are identical - # """ - # pass - - # def test_molecule_parameters(self): - # """Called by `test_parameters()` for each molecule in the system - # """ - - def test_read(self, tmpdir): - path = self.processed - try: - top1 = TOP(path) - except Exception as err: - raise AssertionError("Failed to read {0}. Raised:\n{1}".format( - path, str(err))) - attrs1 = [section for section in top1.found_sections if "types" in section] - assert attrs1 - - def test_read_write(self, tmpdir): - """Read a topology, write it out, and read in the output again. - Writing the topology out should make no change to the topology. - """ - path = self.processed - with tmpdir.as_cwd(): - filename1 = 'processed_1.top' - filename2 = 'processed_2.top' - - top1 = TOP(path) - top1.write(filename1) - - # make life harder, write out again - top2 = TOP(filename1) - top2.write(filename2) - - top2 = TOP(filename2) - - attrs1 = [section for section in top1.found_sections if "types" in section] - attrs2 = [section for section in top1.found_sections if "types" in section] - - assert attrs1 == attrs2 - - #attrs1 = ['atomtypes', 'pairtypes', 'bondtypes', 'constrainttypes', 'angletypes', 'dihedraltypes', 'dihedraltypes', 'cmaptypes'] - #attrs1 = ['atomtypes',] - - for attr in attrs1: - attr1 = getattr(top1, attr) - attr2 = getattr(top2, attr) - assert attr1 == attr2, errmsg_helper(attr, attr1, attr2) - - def test_grompp(self, tmpdir): - """Check if grompp can be run successfully at all""" - f = self.mdp - c = self.conf - p = self.processed - with tmpdir.as_cwd(): - o = 'topol.tpr' - po = 'mdout.mdp' - rc, output, junk = gromacs.grompp(f=f, p=p, c=c, o=o, po=po, stdout=False, stderr=False) - assert rc == 0, "grompp -f {0} -o {1} ... failed to run".format(f, o) - - def test_mdrun(self, tmpdir, low_performance): - """Check if grompp can be run successfully at all""" - # set low_performance with - # - # pytest --low-performance gromacs/tests - # - f = self.mdp - c = self.conf - processed = self.processed - - nt = 2 if low_performance else 0 - - with tmpdir.as_cwd(): - tpr = grompp(f, c, processed, prefix="reference") - reference_trr = mdrun(tpr, prefix="reference", nt=nt) - df1 = rerun_energy(tpr, reference_trr, prefix="reference", nt=nt) - - scaled = "scaled.top" - kwargs = dict(banned_lines='', topfile=processed, outfile=scaled, - scale_lipids=1.0, scale_protein=1.0) - scaling.partial_tempering(**kwargs) - - assert os.path.exists(scaled), "failed to produce {0}".format(scaled) - - tpr = grompp(f, c, scaled, prefix="scaled") - df2 = rerun_energy(tpr, reference_trr, prefix="scaled", nt=nt) - - assert_frame_equal(df1, df2, check_names=True, check_like=True) - - scaled = "scaled.top" - kwargs = dict(banned_lines='', topfile=processed, - outfile=scaled, scale_lipids=1.0, scale_protein=0.5) - scaling.partial_tempering(**kwargs) - tpr = grompp(f, c, scaled, prefix="scaled", maxwarn=1) - df3 = rerun_energy(tpr, reference_trr, prefix="scaled", nt=nt) - # print(df1, df1.columns) - # print(df3, df3.columns) - unscaled_terms = ['Time (ps)', 'Improper Dih.'] - scaled_terms = ['Proper Dih.'] - - assert_frame_equal(df1[unscaled_terms], - df3[unscaled_terms], - check_names=True, check_like=True) - assert_frame_equal(df1[scaled_terms], - 2*df3[scaled_terms], - check_names=True, check_like=True) - - + mdp = datafile("fileformats/top/grompp.mdp") + + def test_basic(self): + path = self.processed + top = TOP(path) + assert list(top.dict_molname_mol.keys()) == self.molecules + + def test_equal(self): + """ + Load the same topology twice and check if __eq__ comparisions work + """ + path = self.processed + + top1 = TOP(path) + top2 = TOP(path) + + attrs1 = [section for section in top1.found_sections if "types" in section] + attrs2 = [section for section in top1.found_sections if "types" in section] + + assert attrs1 == attrs2, "sections are not equal" + + for attr in attrs1: + assert getattr(top1, attr) == getattr( + top2, attr + ), "{0} not identical".format(attr) + + # def test_parameter_types(self): + # """Test if all the parameter types are the same across two topologies + # """ + # pass + + # def test_parameters(self): + # """Test if per-molecule parameters and parameter type assignments are identical + # """ + # pass + + # def test_molecule_parameters(self): + # """Called by `test_parameters()` for each molecule in the system + # """ + + def test_read(self, tmpdir): + path = self.processed + try: + top1 = TOP(path) + except Exception as err: + raise AssertionError( + "Failed to read {0}. Raised:\n{1}".format(path, str(err)) + ) + attrs1 = [section for section in top1.found_sections if "types" in section] + assert attrs1 + + def test_read_write(self, tmpdir): + """Read a topology, write it out, and read in the output again. + Writing the topology out should make no change to the topology. + """ + path = self.processed + with tmpdir.as_cwd(): + filename1 = "processed_1.top" + filename2 = "processed_2.top" + + top1 = TOP(path) + top1.write(filename1) + + # make life harder, write out again + top2 = TOP(filename1) + top2.write(filename2) + + top2 = TOP(filename2) + + attrs1 = [section for section in top1.found_sections if "types" in section] + attrs2 = [section for section in top1.found_sections if "types" in section] + + assert attrs1 == attrs2 + + # attrs1 = ['atomtypes', 'pairtypes', 'bondtypes', 'constrainttypes', 'angletypes', 'dihedraltypes', 'dihedraltypes', 'cmaptypes'] + # attrs1 = ['atomtypes',] + + for attr in attrs1: + attr1 = getattr(top1, attr) + attr2 = getattr(top2, attr) + assert attr1 == attr2, errmsg_helper(attr, attr1, attr2) + + def test_grompp(self, tmpdir): + """Check if grompp can be run successfully at all""" + f = self.mdp + c = self.conf + p = self.processed + with tmpdir.as_cwd(): + o = "topol.tpr" + po = "mdout.mdp" + rc, output, junk = gromacs.grompp( + f=f, p=p, c=c, o=o, po=po, stdout=False, stderr=False + ) + assert rc == 0, "grompp -f {0} -o {1} ... failed to run".format(f, o) + + def test_mdrun(self, tmpdir, low_performance): + """Check if grompp can be run successfully at all""" + # set low_performance with + # + # pytest --low-performance gromacs/tests + # + f = self.mdp + c = self.conf + processed = self.processed + + nt = 2 if low_performance else 0 + + with tmpdir.as_cwd(): + tpr = grompp(f, c, processed, prefix="reference") + reference_trr = mdrun(tpr, prefix="reference", nt=nt) + df1 = rerun_energy(tpr, reference_trr, prefix="reference", nt=nt) + + scaled = "scaled.top" + kwargs = dict( + banned_lines="", + topfile=processed, + outfile=scaled, + scale_lipids=1.0, + scale_protein=1.0, + ) + scaling.partial_tempering(**kwargs) + + assert os.path.exists(scaled), "failed to produce {0}".format(scaled) + + tpr = grompp(f, c, scaled, prefix="scaled") + df2 = rerun_energy(tpr, reference_trr, prefix="scaled", nt=nt) + + assert_frame_equal(df1, df2, check_names=True, check_like=True) + + scaled = "scaled.top" + kwargs = dict( + banned_lines="", + topfile=processed, + outfile=scaled, + scale_lipids=1.0, + scale_protein=0.5, + ) + scaling.partial_tempering(**kwargs) + tpr = grompp(f, c, scaled, prefix="scaled", maxwarn=1) + df3 = rerun_energy(tpr, reference_trr, prefix="scaled", nt=nt) + # print(df1, df1.columns) + # print(df3, df3.columns) + unscaled_terms = ["Time (ps)", "Improper Dih."] + scaled_terms = ["Proper Dih."] + + assert_frame_equal( + df1[unscaled_terms], + df3[unscaled_terms], + check_names=True, + check_like=True, + ) + assert_frame_equal( + df1[scaled_terms], + 2 * df3[scaled_terms], + check_names=True, + check_like=True, + ) diff --git a/tests/test_cbook.py b/tests/test_cbook.py index 984aa1be..8c46692c 100644 --- a/tests/test_cbook.py +++ b/tests/test_cbook.py @@ -25,37 +25,62 @@ def simulation(tmpdir): f = gromacs.setup.topology(struct=pdb, ff="oplsaa", water="tip4p") yield f -@pytest.mark.xfail(gromacs.release.startswith("2020.6"), - reason="pdb2gmx 2020.6 fails to build the TIP4P waters") + +@pytest.mark.xfail( + gromacs.release.startswith("2020.6"), + reason="pdb2gmx 2020.6 fails to build the TIP4P waters", +) def test_grompp_qtot(tmpdir, simulation): with tmpdir.mkdir("qtot").as_cwd(): - with open('none.mdp', 'w') as mdp: - mdp.write('; empty mdp file\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n') - qtot = cbook.grompp_qtot(f="none.mdp", c=simulation['struct'], p=simulation['top'], - stdout=False, maxwarn=10) - assert_almost_equal(qtot, -4, decimal=5, - err_msg="grompp_qtot() failed to compute total charge correctly") - -@pytest.mark.xfail(gromacs.release.startswith("2020.6"), - reason="pdb2gmx 2020.6 fails to build the TIP4P waters") + with open("none.mdp", "w") as mdp: + mdp.write("; empty mdp file\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n") + qtot = cbook.grompp_qtot( + f="none.mdp", + c=simulation["struct"], + p=simulation["top"], + stdout=False, + maxwarn=10, + ) + assert_almost_equal( + qtot, + -4, + decimal=5, + err_msg="grompp_qtot() failed to compute total charge correctly", + ) + + +@pytest.mark.xfail( + gromacs.release.startswith("2020.6"), + reason="pdb2gmx 2020.6 fails to build the TIP4P waters", +) def test_portable_topology(tmpdir, simulation): with tmpdir.mkdir("processed").as_cwd(): - pptopol = cbook.create_portable_topology(simulation['top'], simulation['struct']) + pptopol = cbook.create_portable_topology( + simulation["top"], simulation["struct"] + ) # correct filename assert os.path.split(pptopol)[-1].startswith("pp_") - lines = open(pptopol).readlines() + lines = open(pptopol).readlines() subsections_only = [line.strip() for line in lines if line.startswith("[")] assert re.match(r";\s+File 'system.top' was generated", lines[1]) assert re.match(r";\s+This is a standalone topology file", lines[6]) - for subsection in ("[ defaults ]", "[ atomtypes ]", "[ bondtypes ]", - "[ constrainttypes ]", "[ angletypes ]", "[ dihedraltypes ]", - "[ moleculetype ]", "[ atoms ]", "[ bonds ]", "[ pairs ]", - "[ angles ]", "[ dihedrals ]", - "[ system ]", "[ molecules ]"): + for subsection in ( + "[ defaults ]", + "[ atomtypes ]", + "[ bondtypes ]", + "[ constrainttypes ]", + "[ angletypes ]", + "[ dihedraltypes ]", + "[ moleculetype ]", + "[ atoms ]", + "[ bonds ]", + "[ pairs ]", + "[ angles ]", + "[ dihedrals ]", + "[ system ]", + "[ molecules ]", + ): assert subsection in subsections_only - - - diff --git a/tests/test_collections.py b/tests/test_collections.py index b6ee6f83..9877a7d4 100644 --- a/tests/test_collections.py +++ b/tests/test_collections.py @@ -13,23 +13,25 @@ import gromacs -@pytest.fixture(scope="function", - params=[ - [0, "foo", None, 2.7e-2, "foo"], - (0, "foo", None, 2.7e-2, "foo"), - set([0, "foo", None, 2.7e-2]), - ['ant', 'boar', 'ape', 'gnu'], - [['ant', 'spider'], ['boar', 'ape', 'gnu']] - ]) +@pytest.fixture( + scope="function", + params=[ + [0, "foo", None, 2.7e-2, "foo"], + (0, "foo", None, 2.7e-2, "foo"), + set([0, "foo", None, 2.7e-2]), + ["ant", "boar", "ape", "gnu"], + [["ant", "spider"], ["boar", "ape", "gnu"]], + ], +) def things(request): stuff = request.param return stuff, gromacs.collections.Collection(stuff) -@pytest.fixture(scope="function", - params=[ - ['ant', 'boar', 'ape', 'gnu'], - [u'åmeise', u'Beißfliege', u'Ürmelchen'] - ]) + +@pytest.fixture( + scope="function", + params=[["ant", "boar", "ape", "gnu"], ["åmeise", "Beißfliege", "Ürmelchen"]], +) def textthings(request): stuff = request.param return stuff, gromacs.collections.Collection(stuff) @@ -56,18 +58,15 @@ def test_save_load(self, things, tmpdir): assert newcollection.tolist() == list(seq) assert newcollection == collection - @pytest.mark.parametrize('method,args', - [ - ('startswith', (u'å',)), - ('upper', ()), - ('capitalize', ()) - ]) + @pytest.mark.parametrize( + "method,args", [("startswith", ("å",)), ("upper", ()), ("capitalize", ())] + ) def test_method_pass_through(self, textthings, method, args): seq, collection = textthings results = getattr(collection, method)(*args) assert results == [getattr(elem, method)(*args) for elem in seq] - @pytest.mark.parametrize('attribute', ['__doc__']) + @pytest.mark.parametrize("attribute", ["__doc__"]) def test_attribute_pass_through(self, textthings, attribute): _, collection = textthings results = getattr(collection, attribute) diff --git a/tests/test_config.py b/tests/test_config.py index 9ebb33bb..10b42686 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -24,11 +24,11 @@ @pytest.fixture def GMXRC(): # Try using GMXRC in config file: - GMXRC = gromacs.config.cfg.get('Gromacs', 'gmxrc') + GMXRC = gromacs.config.cfg.get("Gromacs", "gmxrc") if GMXRC: return GMXRC # get GMXRC from installed Gromacs conda package - for gmxexe in ('gmx', 'gmx_d', 'gmx_mpi', 'gmx_mpi_d', 'grompp', 'mdrun'): + for gmxexe in ("gmx", "gmx_d", "gmx_mpi", "gmx_mpi_d", "grompp", "mdrun"): path = gromacs.utilities.which(gmxexe) if path is not None: break @@ -36,7 +36,7 @@ def GMXRC(): raise RuntimeError("Cannot find Gromacs installation") bindir = os.path.dirname(path) - GMXRC = os.path.join(bindir, 'GMXRC') + GMXRC = os.path.join(bindir, "GMXRC") if not os.path.exists(GMXRC): raise IOError(errno.ENOENT, "Could not find Gromacs setup file", GMXRC) return GMXRC @@ -55,8 +55,14 @@ def temp_environ(): def test_set_gmxrc_environment(GMXRC): # not threadsafe: function modifies the global process environment - gmx_envvars = ('GMXBIN', 'GMXLDLIB', 'GMXMAN', 'GMXDATA', - 'GMXPREFIX', 'GROMACS_DIR') + gmx_envvars = ( + "GMXBIN", + "GMXLDLIB", + "GMXMAN", + "GMXDATA", + "GMXPREFIX", + "GROMACS_DIR", + ) with temp_environ() as environ: # clean environment so that we can detect changes @@ -70,8 +76,7 @@ def test_set_gmxrc_environment(GMXRC): gromacs.config.set_gmxrc_environment(GMXRC) newvars = set(environ) - set(before) for envvar in gmx_envvars: - assert envvar in newvars, \ - "GMX environment variable was not added correctly" + assert envvar in newvars, "GMX environment variable was not added correctly" def test_check_setup(): @@ -82,16 +87,16 @@ def test_check_setup(): def test_get_configuration(): cfg = gromacs.config.get_configuration() # could test more variables - assert cfg.getpath('DEFAULT', 'configdir') - assert isinstance(cfg.getboolean('Gromacs', 'append_suffix'), bool) + assert cfg.getpath("DEFAULT", "configdir") + assert isinstance(cfg.getboolean("Gromacs", "append_suffix"), bool) def test_modified_config(modified_config): tools, append_suffix, Path = modified_config - if tools != '': - assert Path('~/gmx_mpi').expanduser().exists() - assert gromacs.config.cfg.get('Gromacs', 'tools') == tools - assert gromacs.config.cfg.get('Gromacs', 'append_suffix') == append_suffix + if tools != "": + assert Path("~/gmx_mpi").expanduser().exists() + assert gromacs.config.cfg.get("Gromacs", "tools") == tools + assert gromacs.config.cfg.get("Gromacs", "append_suffix") == append_suffix def test_get_boolean(): @@ -99,19 +104,18 @@ def test_get_boolean(): # ConfigParser.getboolean code. # These tests should be unnecessary for the Python 3 version of the code. cfg = gromacs.config.cfg - assert isinstance(cfg.getboolean('Gromacs', 'append_suffix'), bool) - assert isinstance(cfg.getboolean('Gromacs', 'append_suffix', - fallback=True), bool) + assert isinstance(cfg.getboolean("Gromacs", "append_suffix"), bool) + assert isinstance(cfg.getboolean("Gromacs", "append_suffix", fallback=True), bool) with pytest.raises(ValueError): - cfg.getboolean('DEFAULT', 'configdir') + cfg.getboolean("DEFAULT", "configdir") with pytest.raises(ValueError): - cfg.getboolean('DEFAULT', 'configdir', fallback=True) + cfg.getboolean("DEFAULT", "configdir", fallback=True) with pytest.raises(NoOptionError): - cfg.getboolean('Gromacs', 'bool') + cfg.getboolean("Gromacs", "bool") with pytest.raises(NoSectionError): - cfg.getboolean('Not a section', 'bool') - cfg.set('Gromacs', 'bool', '') - cfg.remove_option('Gromacs', 'bool') - assert cfg.getboolean('Gromacs', 'bool', fallback=True) is True + cfg.getboolean("Not a section", "bool") + cfg.set("Gromacs", "bool", "") + cfg.remove_option("Gromacs", "bool") + assert cfg.getboolean("Gromacs", "bool", fallback=True) is True with pytest.raises(NoOptionError): - cfg.getboolean('Gromacs', 'bool') + cfg.getboolean("Gromacs", "bool") diff --git a/tests/test_core.py b/tests/test_core.py index 3971edd5..967bd4f5 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -13,14 +13,13 @@ import gromacs + # use 'ls' as command and only use common BSD/GNU options @pytest.fixture -def command(command="ls", - args=('-a', '-1'), - options={'l': True, 'v': True}): - Command_cls = type(command.capitalize(), - (gromacs.core.Command,), - {'command_name': command}) +def command(command="ls", args=("-a", "-1"), options={"l": True, "v": True}): + Command_cls = type( + command.capitalize(), (gromacs.core.Command,), {"command_name": command} + ) return Command_cls(*args, **options) @@ -32,7 +31,7 @@ def test_run_default(self, command): assert not err def test_run_with_args(self, command): - rc, out, err = command('-d', os.path.curdir, b=True, F=True ) + rc, out, err = command("-d", os.path.curdir, b=True, F=True) assert rc == 0 assert not out assert not err @@ -44,32 +43,38 @@ def test_run_capture_stdout(self, command): assert not err def test_run_capture_stderr(self, command): - rc, out, err = command('/this_does_not_exist_Foo_Bar', stderr=False) + rc, out, err = command("/this_does_not_exist_Foo_Bar", stderr=False) assert rc > 0 assert not out assert err - @pytest.mark.parametrize('inp', - ("not_used", - ("not", "used"), - ("unicode", u"Ångström", u"Planck_constant_over_two_π__ℏ"), - )) + @pytest.mark.parametrize( + "inp", + ( + "not_used", + ("not", "used"), + ("unicode", "Ångström", "Planck_constant_over_two_π__ℏ"), + ), + ) def test_run_with_input(self, command, inp): rc, out, err = command(stdout=False, stderr=False, input=inp) assert rc == 0 assert out assert not err - @pytest.mark.parametrize('inp', - ("not_used", - ("not", "used"), - ("unicode", u"Ångström", u"Planck_constant_over_two_π__ℏ"), - )) + @pytest.mark.parametrize( + "inp", + ( + "not_used", + ("not", "used"), + ("unicode", "Ångström", "Planck_constant_over_two_π__ℏ"), + ), + ) def test_Popen_with_input(self, command, inp): po = command.Popen(stdout=False, stderr=False, input=inp) - inp_string = u"\n".join(gromacs.utilities.asiterable(inp)) + u"\n" + inp_string = "\n".join(gromacs.utilities.asiterable(inp)) + "\n" if six.PY2: - assert po.input == inp_string.encode('utf-8') + assert po.input == inp_string.encode("utf-8") else: assert po.input == inp_string diff --git a/tests/test_log.py b/tests/test_log.py index 720ed281..5bb5b421 100644 --- a/tests/test_log.py +++ b/tests/test_log.py @@ -8,12 +8,14 @@ # This is almost certainly not thread/parallel safe. + @pytest.fixture def logger_with_file(tmp_path): - logfile = tmp_path / 'gmx.log' + logfile = tmp_path / "gmx.log" logger = gromacs.log.create("GMX", logfile=str(logfile)) return logfile, logger + def test_create(logger_with_file): logfile, logger = logger_with_file logger.info("Jabberwock") @@ -22,6 +24,7 @@ def test_create(logger_with_file): assert "Jabberwock" in txt assert "Cheshire Cat" in txt + def test_clear_handlers(logger_with_file): logfile, logger = logger_with_file gromacs.log.clear_handlers(logger) @@ -29,12 +32,14 @@ def test_clear_handlers(logger_with_file): txt = logfile.read_text() assert "Dodo" not in txt + def test_NullHandler(): h = gromacs.log.NullHandler() logger = logging.getLogger("GMX") logger.addHandler(h) logger.warning("screaming in silence") - assert True # not sure what to test here + assert True # not sure what to test here + @pytest.fixture def gromacs_logger(tmp_path): @@ -45,18 +50,23 @@ def gromacs_logger(tmp_path): gromacs.stop_logging() return logfile + def _assert_msg_in_log(logfile, msg): output = logfile.read_text() assert msg in output + def test_start_logger(gromacs_logger): - _assert_msg_in_log(gromacs_logger, - "GromacsWrapper {} STARTED".format(gromacs.__version__)) + _assert_msg_in_log( + gromacs_logger, "GromacsWrapper {} STARTED".format(gromacs.__version__) + ) + def test_using_logger(gromacs_logger): - _assert_msg_in_log(gromacs_logger, - "Running a test for logging") + _assert_msg_in_log(gromacs_logger, "Running a test for logging") + def test_stop_logger(gromacs_logger): - _assert_msg_in_log(gromacs_logger, - "GromacsWrapper {} STOPPED".format(gromacs.__version__)) + _assert_msg_in_log( + gromacs_logger, "GromacsWrapper {} STOPPED".format(gromacs.__version__) + ) diff --git a/tests/test_qsub.py b/tests/test_qsub.py index b1c84644..29f71d42 100644 --- a/tests/test_qsub.py +++ b/tests/test_qsub.py @@ -3,7 +3,8 @@ import gromacs.qsub -def test_queuing_systems(known=("Sun Gridengine", "PBS", "LoadLeveler", 'Slurm')): + +def test_queuing_systems(known=("Sun Gridengine", "PBS", "LoadLeveler", "Slurm")): assert len(gromacs.qsub.queuing_systems) == len(known) for qs in gromacs.qsub.queuing_systems: assert qs.name in known @@ -15,11 +16,16 @@ def test_queuing_systems(known=("Sun Gridengine", "PBS", "LoadLeveler", 'Slurm') except NotImplementedError: pass -@pytest.mark.parametrize("scriptfile,name", [ - ("foo.sge", "Sun Gridengine"), - ("foo.pbs", "PBS"), - ("foo.ll", "LoadLeveler"), - ("foo.slu", "Slurm")]) + +@pytest.mark.parametrize( + "scriptfile,name", + [ + ("foo.sge", "Sun Gridengine"), + ("foo.pbs", "PBS"), + ("foo.ll", "LoadLeveler"), + ("foo.slu", "Slurm"), + ], +) def test_detect_queuing_system(scriptfile, name): qs = gromacs.qsub.detect_queuing_system(scriptfile) assert qs.name == name diff --git a/tests/test_run.py b/tests/test_run.py index 985de84f..d4693fc3 100644 --- a/tests/test_run.py +++ b/tests/test_run.py @@ -11,6 +11,7 @@ import gromacs.run + class Test_check_mdrun_success(object): @staticmethod def test_no_logfile(): @@ -18,42 +19,52 @@ def test_no_logfile(): @staticmethod def test_success_Gromacs4(): - assert gromacs.run.check_mdrun_success(datafile('gromacs4_success.log')) is True + assert gromacs.run.check_mdrun_success(datafile("gromacs4_success.log")) is True @staticmethod def test_incomplete_Gromacs4(): - assert gromacs.run.check_mdrun_success(datafile('gromacs4_incomplete.log')) is False + assert ( + gromacs.run.check_mdrun_success(datafile("gromacs4_incomplete.log")) + is False + ) @staticmethod def test_success_Gromacs5(): - assert gromacs.run.check_mdrun_success(datafile('gromacs5_success.log')) is True + assert gromacs.run.check_mdrun_success(datafile("gromacs5_success.log")) is True @staticmethod def test_incomplete_Gromacs5(): - assert gromacs.run.check_mdrun_success(datafile('gromacs5_incomplete.log')) is False + assert ( + gromacs.run.check_mdrun_success(datafile("gromacs5_incomplete.log")) + is False + ) + # The following tests need an existing Gromacs environment. They should run # with either Gromacs 4 or Gromacs 5 + def test_MDRunner(): try: mdrun = gromacs.run.MDrunner() except OSError: raise RuntimeError("This test requires a Gromacs environment.") - rc = mdrun.run(mdrunargs={'version': True}) + rc = mdrun.run(mdrunargs={"version": True}) assert rc == 0, "mdrun failed to run through MDrunner" + class Test_find_gromacs_command(object): # Gromacs 4 or Gromacs 5 (in this order) commands = ["grompp", "gmx grompp"] def test_find(self): driver, name = gromacs.run.find_gromacs_command(self.commands) - assert driver in (None, "gmx"), \ - "find_gromacs_command() did not identify a driver" - assert name == self.commands[0], \ - "find_gromacs_command() did not find a command" + assert driver in ( + None, + "gmx", + ), "find_gromacs_command() did not identify a driver" + assert name == self.commands[0], "find_gromacs_command() did not find a command" @staticmethod def test_raises_ValueError(): @@ -64,5 +75,7 @@ def test_raises_ValueError(): def test_get_double_or_single_prec_mdrun(): # tests only ship with single prec mdrun mdrun = gromacs.run.get_double_or_single_prec_mdrun() - assert mdrun.command_name in ("mdrun", "mdrun_d"), \ - "gromacs.run.get_double_or_single_prec_mdrun() could not find any mdrun" + assert mdrun.command_name in ( + "mdrun", + "mdrun_d", + ), "gromacs.run.get_double_or_single_prec_mdrun() could not find any mdrun" diff --git a/tests/test_setup.py b/tests/test_setup.py index 801f58a7..6c657ef9 100644 --- a/tests/test_setup.py +++ b/tests/test_setup.py @@ -15,8 +15,11 @@ from .datafiles import datafile -@pytest.mark.xfail(gromacs.release.startswith("2020.6"), - reason="pdb2gmx 2020.6 fails to build the TIP4P waters") + +@pytest.mark.xfail( + gromacs.release.startswith("2020.6"), + reason="pdb2gmx 2020.6 fails to build the TIP4P waters", +) def test_trj_compact_main(tmpdir): pdb = datafile("1ake_A.pdb") top = tmpdir.mkdir("top") @@ -25,91 +28,99 @@ def test_trj_compact_main(tmpdir): outfile = "compact.pdb" with top.as_cwd(): f = gromacs.setup.topology(struct=pdb, ff="oplsaa", water="tip4p") - with open(mdpfile, 'w') as mdp: - mdp.write('; empty mdp file\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n') + with open(mdpfile, "w") as mdp: + mdp.write("; empty mdp file\nrcoulomb = 1\nrvdw = 1\nrlist = 1\n") gromacs.grompp(f=mdpfile, o=tprfile, c=f["struct"], p=f["top"]) - gromacs.setup.trj_compact_main(s=tprfile, f=f["struct"], o=outfile, - input=("protein", "system")) + gromacs.setup.trj_compact_main( + s=tprfile, f=f["struct"], o=outfile, input=("protein", "system") + ) assert os.path.exists(outfile) + @pytest.fixture(scope="session") def topology(tmpdir_factory, struct=datafile("1ake_A_protein.pdb")): # note: use protein-only input 1ake_A_protein.pdb because solvation fails # if crystal waters are included (in 1ake_A.pdb) - TMPDIR = tmpdir_factory.mktemp('1ake') + TMPDIR = tmpdir_factory.mktemp("1ake") with TMPDIR.as_cwd(): topol_args = gromacs.setup.topology(struct=struct, ff="oplsaa", water="tip4p") return TMPDIR, topol_args + @pytest.fixture(scope="session") def solvate(topology): TMPDIR, topol_args = topology with TMPDIR.as_cwd(): - solvate_args = gromacs.setup.solvate(concentration=0.15, - water="tip4p", - **topol_args) + solvate_args = gromacs.setup.solvate( + concentration=0.15, water="tip4p", **topol_args + ) return TMPDIR, solvate_args + @pytest.fixture(scope="session") def energy_minimize(solvate, low_performance): # run short energy minimization with cheapest minimizer TMPDIR, solvate_args = solvate nt = 2 if low_performance else 0 with TMPDIR.as_cwd(): - em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt}, - integrator="steep", - emtol=5000, - maxwarn=1, - **solvate_args) + em_args = gromacs.setup.energy_minimize( + mdrun_args={"nt": nt}, + integrator="steep", + emtol=5000, + maxwarn=1, + **solvate_args + ) return TMPDIR, em_args def test_topology(topology): TMPDIR, topol_args = topology - top = topol_args['top'] - struct = topol_args['struct'] - posres = topol_args['posres'] + top = topol_args["top"] + struct = topol_args["struct"] + posres = topol_args["posres"] assert os.path.exists(top) assert os.path.exists(struct) assert os.path.exists(posres) # add more tests for content of files! + def test_solvate(solvate): TMPDIR, solvate_args = solvate - assert_almost_equal(solvate_args['qtot'], 0.0) - assert os.path.exists(solvate_args['struct']) - assert os.path.exists(solvate_args['ndx']) - assert solvate_args['mainselection'] == '"Protein"' + assert_almost_equal(solvate_args["qtot"], 0.0) + assert os.path.exists(solvate_args["struct"]) + assert os.path.exists(solvate_args["ndx"]) + assert solvate_args["mainselection"] == '"Protein"' # add more tests for content of files! + def test_energy_minimize(energy_minimize): TMPDIR, em_args = energy_minimize - assert os.path.exists(em_args['struct']) - assert os.path.exists(em_args['top']) - assert em_args['mainselection'] == '"Protein"' + assert os.path.exists(em_args["struct"]) + assert os.path.exists(em_args["top"]) + assert em_args["mainselection"] == '"Protein"' # add more tests for content of files! -def test_energy_minimize_custom_mdp(solvate, low_performance, - mdp=datafile("custom_em.mdp")): + +def test_energy_minimize_custom_mdp( + solvate, low_performance, mdp=datafile("custom_em.mdp") +): TMPDIR, solvate_args = solvate nt = 2 if low_performance else 0 with TMPDIR.as_cwd(): try: - em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt}, - mdp=mdp, - emtol=5000, - **solvate_args) + em_args = gromacs.setup.energy_minimize( + mdrun_args={"nt": nt}, mdp=mdp, emtol=5000, **solvate_args + ) except gromacs.exceptions.GromacsError as err: # sometimes the em does not converge at all, e.g. 5.02988e+04 on atom 3277; # (happens on Travis Linux with Gromacs 4.6.5 but not locally or on Travis OSX) so we # re-run with a ridiculous tolerance so that we can at least test that the whole # function can run to completion - em_args = gromacs.setup.energy_minimize(mdrun_args={'nt': nt}, - mdp=mdp, - emtol=6e4, - **solvate_args) - assert os.path.exists(em_args['struct']) - assert os.path.exists(em_args['top']) - assert em_args['mainselection'] == '"Protein"' + em_args = gromacs.setup.energy_minimize( + mdrun_args={"nt": nt}, mdp=mdp, emtol=6e4, **solvate_args + ) + assert os.path.exists(em_args["struct"]) + assert os.path.exists(em_args["top"]) + assert em_args["mainselection"] == '"Protein"' # add more tests for content of files! diff --git a/tests/test_tools.py b/tests/test_tools.py index 58a28858..2add3965 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -14,8 +14,7 @@ aliased_tool_names = list(gromacs.tools.NAMES5TO4.values()) -@pytest.fixture(scope="function", - params=set(common_tool_names + aliased_tool_names)) +@pytest.fixture(scope="function", params=set(common_tool_names + aliased_tool_names)) def gromacs_tool(request): return getattr(gromacs, request.param) @@ -24,7 +23,8 @@ def test_tools_help(gromacs_tool): rc, out, err = gromacs_tool(h=True, stdout=False, stderr=False) assert rc == 0, "Gromacs command {0} failed".format(gromacs_tool.command_name) assert out + err, "Gromacs command {0} produced no output for -h".format( - gromacs_tool.command_name) + gromacs_tool.command_name + ) def test_failure_raises(): @@ -48,9 +48,10 @@ def test_failure_ignore(): except Exception as err: raise AssertionError("Should have ignored exception {}".format(err)) + class TestRelease(object): # add tested releases here - major_releases = ('4', '5', '2016', '2018', '2019', '2020', '2021', '2022') + major_releases = ("4", "5", "2016", "2018", "2019", "2020", "2021", "2022") def test_release(self): assert gromacs.release().startswith(self.major_releases) diff --git a/tests/test_utilities.py b/tests/test_utilities.py index f15f336e..d8707bef 100644 --- a/tests/test_utilities.py +++ b/tests/test_utilities.py @@ -15,21 +15,24 @@ import gromacs.utilities + @pytest.fixture def string_buffer(): return StringIO() + def test_which(name="cat"): path = gromacs.utilities.which(name) assert os.path.basename(path) == name -@pytest.mark.parametrize('path', ["~/whatever", "$HOME/whatever"]) +@pytest.mark.parametrize("path", ["~/whatever", "$HOME/whatever"]) def test_realpath(path): abspath = gromacs.utilities.realpath(path) assert abspath.startswith(os.path.sep) assert abspath.startswith(os.path.expanduser("~")) + class TestAttributeDict(object): def setup(self): self.d = gromacs.utilities.AttributeDict(foo="bar", baz="boing") @@ -38,16 +41,16 @@ def test_attribute_get(self): assert self.d.foo == "bar" def test_dict_get(self): - assert self.d['foo'] == "bar" - assert self.d.get('foo') == "bar" + assert self.d["foo"] == "bar" + assert self.d.get("foo") == "bar" def test_attribute_set(self): self.d.gargl = "blaster" assert self.d.gargl == "blaster" def test_dict_set(self): - self.d['gargl'] = "blaster" - assert self.d['gargl'] == "blaster" + self.d["gargl"] = "blaster" + assert self.d["gargl"] == "blaster" def test_pickle(self): try: @@ -62,30 +65,54 @@ def test_pickle(self): assert set(d.values()) == set(self.d.values()) -@pytest.fixture(params=[(42, int),("42", int), ([42],int) , - (2.7, float), ("2.7", float), ([2.7],float), - ("jabberwock", str),(["foo","bar"], str), - ("42 42", np.integer),("2.7 2.7",float),("foo bar",str)], - ids=["int -> int", "str -> int","int list -> int list", - "float -> float", "str -> float", "float list -> float list", - "str -> str","str list -> str list", - "str -> int list","str -> float list","str -> str list"]) +@pytest.fixture( + params=[ + (42, int), + ("42", int), + ([42], int), + (2.7, float), + ("2.7", float), + ([2.7], float), + ("jabberwock", str), + (["foo", "bar"], str), + ("42 42", np.integer), + ("2.7 2.7", float), + ("foo bar", str), + ], + ids=[ + "int -> int", + "str -> int", + "int list -> int list", + "float -> float", + "str -> float", + "float list -> float list", + "str -> str", + "str list -> str list", + "str -> int list", + "str -> float list", + "str -> str list", + ], +) def conversions(request): value, target_type = request.param x = gromacs.utilities.autoconvert(value) return x, value, target_type + def test_autoconvert(conversions): x, value, target_type = conversions if isinstance(x, Iterable): x = x[0] - assert isinstance(x, target_type), \ - "Failed to convert '{0}' to type {1}".format(value, target_type) + assert isinstance(x, target_type), "Failed to convert '{0}' to type {1}".format( + value, target_type + ) + @pytest.fixture(params=["", "gz", "bz2"]) def openanyfilename(request): return "lifeofbrian.txt." + request.param + class TestOpenAny(object): quote = """There shall, in that time, be rumours of things going astray, erm, and there shall be a great confusion as to where things really are, @@ -98,8 +125,8 @@ class TestOpenAny(object): def test_file(self, tmpdir, openanyfilename): filename = openanyfilename name, ext = os.path.splitext(filename) - if ext in ['.gz', '.bz2']: - quote = self.quote.encode('utf8') + if ext in [".gz", ".bz2"]: + quote = self.quote.encode("utf8") else: quote = self.quote @@ -133,7 +160,7 @@ def test_stream_read(self, string_buffer): @pytest.fixture def pdb_files(tmpdir): for i in [1, 15, 300]: - tmpdir.join('myfile{}.pdb'.format(i)).write('foo\nbar\n') + tmpdir.join("myfile{}.pdb".format(i)).write("foo\nbar\n") currdir = os.getcwd() try: @@ -143,57 +170,60 @@ def pdb_files(tmpdir): os.chdir(currdir) -@pytest.mark.parametrize('args', [ - ('myfile*.pdb',), - ('myfile1.pdb', 'myfile15.pdb', 'myfile300.pdb'), - ('myfile1.pdb', 'myfile*.pdb'), - ('myfile*.pdb', 'myotherfiles*.pdb'), -]) +@pytest.mark.parametrize( + "args", + [ + ("myfile*.pdb",), + ("myfile1.pdb", "myfile15.pdb", "myfile300.pdb"), + ("myfile1.pdb", "myfile*.pdb"), + ("myfile*.pdb", "myotherfiles*.pdb"), + ], +) def test_number_pdbs(pdb_files, args): gromacs.utilities.number_pdbs(*args) - assert os.path.exists('myfile0001.pdb') - assert os.path.exists('myfile0015.pdb') - assert os.path.exists('myfile0300.pdb') + assert os.path.exists("myfile0001.pdb") + assert os.path.exists("myfile0015.pdb") + assert os.path.exists("myfile0300.pdb") def test_cat(tmpdir): # assert cat.noise == miaow # assert not cat.noise == woof - f1 = tmpdir.join('file1.txt') - f1.write('foo\n') - f2 = tmpdir.join('file2.txt') - f2.write('bar\n') - out = tmpdir.join('out.txt') + f1 = tmpdir.join("file1.txt") + f1.write("foo\n") + f2 = tmpdir.join("file2.txt") + f2.write("bar\n") + out = tmpdir.join("out.txt") gromacs.utilities.cat([str(f1), str(f2)], str(out)) - assert out.read() == 'foo\nbar\n' + assert out.read() == "foo\nbar\n" def test_cat_fail(tmpdir): with pytest.raises(OSError): - gromacs.utilities.cat(['not_here.txt'], str(tmpdir.join('new.txt'))) + gromacs.utilities.cat(["not_here.txt"], str(tmpdir.join("new.txt"))) def test_cat_None_out(tmpdir): - gromacs.utilities.cat(['some_file.txt'], None) + gromacs.utilities.cat(["some_file.txt"], None) def test_cat_None_in(tmpdir): - gromacs.utilities.cat(None, str(tmpdir.join('out.txt'))) + gromacs.utilities.cat(None, str(tmpdir.join("out.txt"))) - assert not os.path.exists(str(tmpdir.join('out.txt'))) + assert not os.path.exists(str(tmpdir.join("out.txt"))) @pytest.fixture def unlink_files(tmpdir): - tmpdir.join('hello.mdp').write('foo\n') - tmpdir.join('#hello.mdp.1#').write('foo\n') - tmpdir.join('#hello.mdp.2#').write('foo\n') + tmpdir.join("hello.mdp").write("foo\n") + tmpdir.join("#hello.mdp.1#").write("foo\n") + tmpdir.join("#hello.mdp.2#").write("foo\n") - tmpdir.join('out.gro').write('bar\n') - tmpdir.join('#out.gro.1#').write('bar\n') - tmpdir.join('#out.gro.2#').write('bar\n') + tmpdir.join("out.gro").write("bar\n") + tmpdir.join("#out.gro.1#").write("bar\n") + tmpdir.join("#out.gro.2#").write("bar\n") currdir = os.getcwd() try: @@ -204,58 +234,70 @@ def unlink_files(tmpdir): def test_unlink(unlink_files): - assert os.path.exists('out.gro') + assert os.path.exists("out.gro") - gromacs.utilities.unlink_f('out.gro') + gromacs.utilities.unlink_f("out.gro") - assert not os.path.exists('out.gro') + assert not os.path.exists("out.gro") def test_unlink_nonexistant(unlink_files): - assert not os.path.exists('out.xtc') - gromacs.utilities.unlink_f('out.xtc') + assert not os.path.exists("out.xtc") + gromacs.utilities.unlink_f("out.xtc") def test_unlink_gmx_backups(unlink_files): - gromacs.utilities.unlink_gmx_backups('hello.mdp') + gromacs.utilities.unlink_gmx_backups("hello.mdp") - assert os.path.exists('hello.mdp') - assert not os.path.exists('#hello.mdp.1#') - assert not os.path.exists('#hello.mdp.2#') - assert os.path.exists('out.gro') - assert os.path.exists('#out.gro.1#') + assert os.path.exists("hello.mdp") + assert not os.path.exists("#hello.mdp.1#") + assert not os.path.exists("#hello.mdp.2#") + assert os.path.exists("out.gro") + assert os.path.exists("#out.gro.1#") def test_unlink_gmx(unlink_files): - gromacs.utilities.unlink_gmx('hello.mdp') - assert not os.path.exists('hello.mdp') - assert not os.path.exists('#hello.mdp.1#') - assert not os.path.exists('#hello.mdp.2#') - assert os.path.exists('out.gro') - assert os.path.exists('#out.gro.1#') - - -@pytest.mark.parametrize('iterable,expected', [ - ('this', 'this'), - (['this', 'that'], 'this'), - ([1, 2, 3], 1), - (np.arange(4), 0), -]) + gromacs.utilities.unlink_gmx("hello.mdp") + assert not os.path.exists("hello.mdp") + assert not os.path.exists("#hello.mdp.1#") + assert not os.path.exists("#hello.mdp.2#") + assert os.path.exists("out.gro") + assert os.path.exists("#out.gro.1#") + + +@pytest.mark.parametrize( + "iterable,expected", + [ + ("this", "this"), + (["this", "that"], "this"), + ([1, 2, 3], 1), + (np.arange(4), 0), + ], +) def test_firstof(iterable, expected): assert gromacs.utilities.firstof(iterable) == expected -@pytest.mark.parametrize('val,ref', [ - ('a', 'ALA'), ('A', 'ALA'), - ('ala', 'A'), ('ALA', 'A'), ('Ala', 'A'), - ('Q', 'GLN'), ('q', 'GLN'), - ('GLN', 'Q'), ('gln', 'Q'), ('Gln', 'Q'), -]) +@pytest.mark.parametrize( + "val,ref", + [ + ("a", "ALA"), + ("A", "ALA"), + ("ala", "A"), + ("ALA", "A"), + ("Ala", "A"), + ("Q", "GLN"), + ("q", "GLN"), + ("GLN", "Q"), + ("gln", "Q"), + ("Gln", "Q"), + ], +) def test_conv_aa_code(val, ref): assert gromacs.utilities.convert_aa_code(val) == ref -@pytest.mark.parametrize('val', ['ALAA', '']) +@pytest.mark.parametrize("val", ["ALAA", ""]) def test_conv_aa_code_VE(val): with pytest.raises(ValueError): gromacs.utilities.convert_aa_code(val) @@ -264,34 +306,39 @@ def test_conv_aa_code_VE(val): @pytest.fixture def fileutil(): class MyFileUtil(gromacs.utilities.FileUtils): - default_extension = '.test' + default_extension = ".test" def __init__(self): - self._init_filename(filename='simple.test') + self._init_filename(filename="simple.test") return MyFileUtil() -@pytest.mark.parametrize('filename,ext,ref', [ - (None, None, 'simple'), - ('other', None, 'other'), - (None, 'pdf', 'simple.pdf'), - ('other', 'pdf', 'other.pdf'), -]) +@pytest.mark.parametrize( + "filename,ext,ref", + [ + (None, None, "simple"), + ("other", None, "other"), + (None, "pdf", "simple.pdf"), + ("other", "pdf", "other.pdf"), + ], +) def test_FileUtils_filename(fileutil, filename, ext, ref): assert fileutil.filename(filename=filename, ext=ext) == ref + def test_FileUtils_filename_VE(fileutil): del fileutil._filename with pytest.raises(ValueError): fileutil.filename() + @pytest.fixture def fileutil_withfiles(fileutil, tmpdir): curr = os.getcwd() - tmpdir.join('exists.txt').write('hello\n') + tmpdir.join("exists.txt").write("hello\n") try: os.chdir(str(tmpdir)) @@ -300,29 +347,28 @@ def fileutil_withfiles(fileutil, tmpdir): os.chdir(curr) -@pytest.mark.parametrize('fn', ['exists.txt', 'nonexistant.txt']) +@pytest.mark.parametrize("fn", ["exists.txt", "nonexistant.txt"]) def test_check_file_exists_ignore(fileutil_withfiles, fn): - assert fileutil_withfiles.check_file_exists(fn, resolve='ignore') is False + assert fileutil_withfiles.check_file_exists(fn, resolve="ignore") is False -@pytest.mark.parametrize('fn', ['exists.txt', 'nonexistant.txt']) +@pytest.mark.parametrize("fn", ["exists.txt", "nonexistant.txt"]) def test_check_file_exists_force(fileutil_withfiles, fn): assert fileutil_withfiles.check_file_exists(fn, force=True) is False -@pytest.mark.parametrize('fn,ref', [('exists.txt', True), - ('nonexistant.txt', False)]) +@pytest.mark.parametrize("fn,ref", [("exists.txt", True), ("nonexistant.txt", False)]) def test_check_file_exists_indicate(fileutil_withfiles, fn, ref): - assert fileutil_withfiles.check_file_exists(fn, resolve='indicate') is ref + assert fileutil_withfiles.check_file_exists(fn, resolve="indicate") is ref -@pytest.mark.parametrize('resolve', ['warn', 'warning']) +@pytest.mark.parametrize("resolve", ["warn", "warning"]) def test_check_file_exists_warn(fileutil_withfiles, resolve): with pytest.warns(UserWarning): - fileutil_withfiles.check_file_exists('exists.txt', resolve=resolve) + fileutil_withfiles.check_file_exists("exists.txt", resolve=resolve) -@pytest.mark.parametrize('resolve', ['exception', 'raise']) +@pytest.mark.parametrize("resolve", ["exception", "raise"]) def test_check_file_exists_raise(fileutil_withfiles, resolve): with pytest.raises(IOError): - fileutil_withfiles.check_file_exists('exists.txt', resolve=resolve) + fileutil_withfiles.check_file_exists("exists.txt", resolve=resolve) diff --git a/tests/test_version.py b/tests/test_version.py index b9ef0e97..bfa7e067 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -7,6 +7,7 @@ import gromacs + def test_version(): release = gromacs.__version__ assert isinstance(release, str) diff --git a/versioneer.py b/versioneer.py index 64fea1c8..2b545405 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,4 +1,3 @@ - # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. @@ -277,6 +276,7 @@ """ from __future__ import print_function + try: import configparser except ImportError: @@ -308,11 +308,13 @@ def get_root(): setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): - err = ("Versioneer was unable to run the project root directory. " - "Versioneer requires setup.py to be executed from " - "its immediate directory (like 'python setup.py COMMAND'), " - "or in a way that lets it use sys.argv[0] to find the root " - "(like 'python path/to/setup.py COMMAND').") + err = ( + "Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND')." + ) raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools @@ -325,8 +327,10 @@ def get_root(): me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: - print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + print( + "Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(me), versioneer_py) + ) except NameError: pass return root @@ -348,6 +352,7 @@ def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None + cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" @@ -372,17 +377,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -390,10 +396,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -418,7 +427,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, return stdout, p.returncode -LONG_VERSION_PY['git'] = ''' +LONG_VERSION_PY[ + "git" +] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build @@ -993,7 +1004,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1013,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1010,19 +1021,26 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): - r = ref[len(tag_prefix):] + r = ref[len(tag_prefix) :] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -1037,8 +1055,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,10 +1063,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s*" % tag_prefix, + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -1072,17 +1098,16 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: - git_describe = git_describe[:git_describe.rindex("-dirty")] + git_describe = git_describe[: git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out return pieces # tag @@ -1091,10 +1116,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % ( + full_tag, + tag_prefix, + ) return pieces - pieces["closest-tag"] = full_tag[len(tag_prefix):] + pieces["closest-tag"] = full_tag[len(tag_prefix) :] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) @@ -1105,13 +1132,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -1167,16 +1194,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix) :], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -1205,11 +1238,13 @@ def versions_from_file(filename): contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") - mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: - mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", - contents, re.M | re.S) + mo = re.search( + r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S + ) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) @@ -1218,8 +1253,7 @@ def versions_from_file(filename): def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) - contents = json.dumps(versions, sort_keys=True, - indent=1, separators=(",", ": ")) + contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) @@ -1251,8 +1285,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -1366,11 +1399,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -1390,9 +1425,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } class VersioneerBadRootError(Exception): @@ -1415,8 +1454,9 @@ def get_versions(verbose=False): handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose - assert cfg.versionfile_source is not None, \ - "please set versioneer.versionfile_source" + assert ( + cfg.versionfile_source is not None + ), "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) @@ -1470,9 +1510,13 @@ def get_versions(verbose=False): if verbose: print("unable to compute version") - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, "error": "unable to compute version", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } def get_version(): @@ -1521,6 +1565,7 @@ def run(self): print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools @@ -1553,14 +1598,15 @@ def run(self): # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: - target_versionfile = os.path.join(self.build_lib, - cfg.versionfile_build) + target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe + # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1581,17 +1627,21 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["build_exe"] = cmd_build_exe del cmds["build_py"] - if 'py2exe' in sys.modules: # py2exe enabled? + if "py2exe" in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: @@ -1610,13 +1660,17 @@ def run(self): os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % - {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments @@ -1643,8 +1697,10 @@ def make_release_tree(self, base_dir, files): # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) - write_to_version_file(target_versionfile, - self._versioneer_generated_versions) + write_to_version_file( + target_versionfile, self._versioneer_generated_versions + ) + cmds["sdist"] = cmd_sdist return cmds @@ -1699,11 +1755,13 @@ def do_setup(): root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, - configparser.NoOptionError) as e: + except ( + EnvironmentError, + configparser.NoSectionError, + configparser.NoOptionError, + ) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): - print("Adding sample versioneer config to setup.cfg", - file=sys.stderr) + print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) @@ -1712,15 +1770,18 @@ def do_setup(): print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] - f.write(LONG % {"DOLLAR": "$", - "STYLE": cfg.style, - "TAG_PREFIX": cfg.tag_prefix, - "PARENTDIR_PREFIX": cfg.parentdir_prefix, - "VERSIONFILE_SOURCE": cfg.versionfile_source, - }) - - ipy = os.path.join(os.path.dirname(cfg.versionfile_source), - "__init__.py") + f.write( + LONG + % { + "DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + } + ) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: @@ -1762,8 +1823,10 @@ def do_setup(): else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) + print( + " appending versionfile_source ('%s') to MANIFEST.in" + % cfg.versionfile_source + ) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: From fc8cce931d57caaf357c51159dae3e584fee4c5f Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 25 Aug 2023 18:40:19 -0400 Subject: [PATCH 2/6] updated docs for black - update CHANGES - update documentation and README (with badge) --- CHANGES | 7 +++++++ README.rst | 14 +++++++++----- doc/sphinx/source/installation.txt | 7 ++++++- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/CHANGES b/CHANGES index 325e3061..298547e3 100644 --- a/CHANGES +++ b/CHANGES @@ -2,6 +2,13 @@ CHANGELOG for GromacsWrapper ============================== +2023-??-?? 0.8.5 +orbeckst + +* use black for uniformly formatted code (#246) +* minor doc/installation updates + + 2023-03-13 0.8.4 orbeckst diff --git a/README.rst b/README.rst index c5c82667..f3c50266 100644 --- a/README.rst +++ b/README.rst @@ -7,7 +7,7 @@ README: GromacsWrapper ======================== -|build| |cov| |docs| |zenodo| |PRsWelcome| |anaconda| +|build| |cov| |docs| |zenodo| |black| |PRsWelcome| |anaconda| A primitive Python wrapper around the Gromacs_ tools. The library is tested with GROMACS 4.6.5, 2018.x, 2019.x, 2020.x, 2021.x, 2022.x (and 5.x @@ -50,6 +50,9 @@ running simulations with sensible parameters. .. |anaconda| image:: https://anaconda.org/conda-forge/gromacswrapper/badges/version.svg :target: https://anaconda.org/conda-forge/gromacswrapper :alt: Anaconda.org package +.. |black| image:: https://img.shields.io/badge/code%20style-black-000000.svg + :target: https://github.com/psf/black + :alt: black @@ -136,15 +139,16 @@ wonderful ways. Please report issues through the `Issue Tracker`_. To use the *development code base*: checkout the ``main`` branch:: - git clone https://github.com/Becksteinlab/GromacsWrapper.git - cd GromacsWrapper + git clone https://github.com/Becksteinlab/GromacsWrapper.git and install :: - python setup.py install - + pip install GromacsWrapper/ +Code contributions are welcome. We use `black`_ for uniform code +formatting so please install black_ and run it on your code. +.. _`black`: https://github.com/psf/black Download and Availability ========================= diff --git a/doc/sphinx/source/installation.txt b/doc/sphinx/source/installation.txt index 8844dcfc..83aed698 100644 --- a/doc/sphinx/source/installation.txt +++ b/doc/sphinx/source/installation.txt @@ -77,7 +77,7 @@ or install from the unpacked source:: tar -zxvf GromacsWrapper-0.8.3.tar.gz cd GromacsWrapper-0.8.3 - python setup.py install + pip install . @@ -94,6 +94,11 @@ and checkout the *main* branch:: git clone https://github.com/Becksteinlab/GromacsWrapper.git cd GromacsWrapper +Code contributions are welcome. We use `black`_ for uniform code +formatting so please install black_ and run it on your code. + +.. _`black`: https://github.com/psf/black + Requirements ============ From b358058a7b39bdc5235561adc558542dd81fc5a6 Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 25 Aug 2023 18:41:06 -0400 Subject: [PATCH 3/6] add black linter GitHub action workflow --- .github/workflows/black.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/workflows/black.yaml diff --git a/.github/workflows/black.yaml b/.github/workflows/black.yaml new file mode 100644 index 00000000..8e1278e3 --- /dev/null +++ b/.github/workflows/black.yaml @@ -0,0 +1,10 @@ +name: Lint (black) + +on: [push, pull_request] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: psf/black@stable \ No newline at end of file From 0de56b004dac74ae0221592293823b83c82a3c9a Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 15 Sep 2023 10:16:38 -0400 Subject: [PATCH 4/6] blackened merged files --- tests/fileformats/top/test_amber03star.py | 9 +++++---- tests/fileformats/top/test_amber03w.py | 9 +++++---- tests/fileformats/top/top.py | 6 +++--- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/tests/fileformats/top/test_amber03star.py b/tests/fileformats/top/test_amber03star.py index 26020a6f..254c62fe 100644 --- a/tests/fileformats/top/test_amber03star.py +++ b/tests/fileformats/top/test_amber03star.py @@ -31,8 +31,9 @@ class TestAmber03star(TopologyTest): "ZN", ] - @pytest.mark.xfail(gromacs.release().startswith(("2022", "2023")), - reason="issue #236 https://github.com/Becksteinlab/GromacsWrapper/issues/236") + @pytest.mark.xfail( + gromacs.release().startswith(("2022", "2023")), + reason="issue #236 https://github.com/Becksteinlab/GromacsWrapper/issues/236", + ) def test_mdrun(self, tmpdir, low_performance): - super(TestAmber03star, self).test_mdrun(tmpdir, low_performance) - + super(TestAmber03star, self).test_mdrun(tmpdir, low_performance) diff --git a/tests/fileformats/top/test_amber03w.py b/tests/fileformats/top/test_amber03w.py index 964bdfeb..3581cc96 100644 --- a/tests/fileformats/top/test_amber03w.py +++ b/tests/fileformats/top/test_amber03w.py @@ -31,8 +31,9 @@ class TestAmber03w(TopologyTest): "ZN", ] - @pytest.mark.xfail(gromacs.release().startswith(("2022", "2023")), - reason="issue #236 https://github.com/Becksteinlab/GromacsWrapper/issues/236") + @pytest.mark.xfail( + gromacs.release().startswith(("2022", "2023")), + reason="issue #236 https://github.com/Becksteinlab/GromacsWrapper/issues/236", + ) def test_mdrun(self, tmpdir, low_performance): - super(TestAmber03w, self).test_mdrun(tmpdir, low_performance) - + super(TestAmber03w, self).test_mdrun(tmpdir, low_performance) diff --git a/tests/fileformats/top/top.py b/tests/fileformats/top/top.py index 2b128912..a447ec57 100644 --- a/tests/fileformats/top/top.py +++ b/tests/fileformats/top/top.py @@ -12,10 +12,10 @@ from numpy.testing import assert_array_equal try: - from pandas.testing import assert_frame_equal + from pandas.testing import assert_frame_equal except ImportError: - # old versions of pandas - from pandas.util.testing import assert_frame_equal + # old versions of pandas + from pandas.util.testing import assert_frame_equal import pytest From 825de45a2e3f42028cecc9d9046891c871cdea4c Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 15 Sep 2023 22:19:27 -0400 Subject: [PATCH 5/6] explicitly mark unicode strings for Python 2.7 black only supports Python 3.x and removed unicode strings: this commit puts these essential string prefixes back (otherwise Python 2.7 tests will not pass) --- tests/fileformats/test_convert.py | 2 +- tests/test_core.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/fileformats/test_convert.py b/tests/fileformats/test_convert.py index 31a5a3b8..182211e5 100644 --- a/tests/fileformats/test_convert.py +++ b/tests/fileformats/test_convert.py @@ -17,7 +17,7 @@ [ (100, 100), ("Jabberwock", "Jabberwock"), - ("Ångström", "Ångström"), + (u"Ångström", u"Ångström"), ], ) def test_to_unicode(s, expected): diff --git a/tests/test_core.py b/tests/test_core.py index 967bd4f5..f24ec54b 100644 --- a/tests/test_core.py +++ b/tests/test_core.py @@ -53,7 +53,7 @@ def test_run_capture_stderr(self, command): ( "not_used", ("not", "used"), - ("unicode", "Ångström", "Planck_constant_over_two_π__ℏ"), + ("unicode", u"Ångström", u"Planck_constant_over_two_π__ℏ"), ), ) def test_run_with_input(self, command, inp): @@ -67,7 +67,7 @@ def test_run_with_input(self, command, inp): ( "not_used", ("not", "used"), - ("unicode", "Ångström", "Planck_constant_over_two_π__ℏ"), + ("unicode", u"Ångström", u"Planck_constant_over_two_π__ℏ"), ), ) def test_Popen_with_input(self, command, inp): From e96eae088ae6b5654bd4eae5ddc76cba2d8736ae Mon Sep 17 00:00:00 2001 From: Oliver Beckstein Date: Fri, 15 Sep 2023 22:41:32 -0400 Subject: [PATCH 6/6] black configuration - ignore files with specific Python 2.7 code to protect it from black - ignore the black commit for git blame --- .git-blame-ignore-revs | 1 + pyproject.toml | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 .git-blame-ignore-revs create mode 100644 pyproject.toml diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000..f1fb75d3 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1 @@ +0de56b004dac74ae0221592293823b83c82a3c9a diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d8e8dfbf --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +extend-exclude = "tests/(test_core|fileformats/test_convert)\\.py" \ No newline at end of file