diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
deleted file mode 100644
index c76ae2462be..00000000000
--- a/.github/ISSUE_TEMPLATE/bug-report.md
+++ /dev/null
@@ -1,31 +0,0 @@
----
-name: Bug Report
-about: Give as much information as you can, and be sure to search existing
- Issues first.
-title: ''
-labels: bug
-assignees: ''
-
----
-
-**Describe the bug**
-
-
-**Release version(s) and/or repository branch(es) affected?**
-
-
-**Steps to reproduce the bug**
-
-
-**Expected behavior**
-
-
-**Screenshots**
-
-
-**Additional context**
-
-
-**Pull requests welcome!**
-This is an Open Source project - please consider contributing a bug fix
-yourself (please read `CONTRIBUTING.md` before starting any work though).
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
deleted file mode 100644
index 47f9ba534f5..00000000000
--- a/.github/ISSUE_TEMPLATE/feature-request.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-name: Feature Request
-about: Give as much information as you can, and be sure to search existing
- Issues first.
-title: ''
-labels: ''
-assignees: ''
-
----
-
-**Describe exactly what you would like to see in an upcoming release**
-
-
-**Additional context**
-
-
-**Pull requests welcome!**
-
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
deleted file mode 100644
index 4159478a9da..00000000000
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-**Check List**
-
-- [ ] I have read `CONTRIBUTING.md` and added my name as a Code Contributor.
-- [ ] Contains logically grouped changes (else tidy your branch by rebase).
-- [ ] Does not contain off-topic changes (use other PRs for other changes).
-- [ ] Applied any dependency changes to both `setup.cfg` and `conda-environment.yml`.
-- [ ] Tests are included (or explain why tests are not needed).
-- [ ] `CHANGES.md` entry included if this is a change that can affect users
-- [ ] [Cylc-Doc](https://github.com/cylc/cylc-doc) pull request opened if required at cylc/cylc-doc/pull/XXXX.
-- [ ] If this is a bug fix, PR should be raised against the relevant `?.?.x` branch.
diff --git a/.github/workflows/2_auto_publish_release.yml b/.github/workflows/2_auto_publish_release.yml
index ad9afbc56e0..b5ddafb9d15 100644
--- a/.github/workflows/2_auto_publish_release.yml
+++ b/.github/workflows/2_auto_publish_release.yml
@@ -39,7 +39,7 @@ jobs:
uses: cylc/release-actions/build-python-package@v1
- name: Publish distribution to PyPI
- uses: pypa/gh-action-pypi-publish@v1.6.4
+ uses: pypa/gh-action-pypi-publish@v1.8.5
with:
user: __token__ # uses the API token feature of PyPI - least permissions possible
password: ${{ secrets.PYPI_TOKEN }}
diff --git a/.github/workflows/test_fast.yml b/.github/workflows/test_fast.yml
index 1bbf67b699d..78e1add6d3a 100644
--- a/.github/workflows/test_fast.yml
+++ b/.github/workflows/test_fast.yml
@@ -35,20 +35,6 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- - name: Brew Install
- if: startsWith(matrix.os, 'macos')
- run: |
- # speed up install (https://docs.brew.sh/Manpage#environment)
- export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1
- echo "[command]brew update"
- brew update
- echo "[command]brew install ..."
- brew install bash coreutils
- # add GNU coreutils and sed to the user PATH
- # (see instructions in brew install output)
- echo "$(brew --prefix)/opt/coreutils/libexec/gnubin" \
- >> "${GITHUB_PATH}"
-
- name: Apt-Get Install
if: startsWith(matrix.os, 'ubuntu')
run: |
@@ -93,7 +79,7 @@ jobs:
run: |
pytest tests/integration
- - name: Upload artifact
+ - name: Upload failed tests artifact
if: failure()
uses: actions/upload-artifact@v3
with:
@@ -105,15 +91,35 @@ jobs:
coverage xml
coverage report
+ - name: Upload coverage artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: coverage_${{ matrix.os }}_py-${{ matrix.python-version }}
+ path: coverage.xml
+ retention-days: 7
+
+ - name: Linkcheck
+ if: startsWith(matrix.python-version, '3.10')
+ run: pytest -m linkcheck --dist=load tests/unit
+
+ codecov:
+ needs: test
+ runs-on: ubuntu-latest
+ timeout-minutes: 2
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Download coverage artifacts
+ uses: actions/download-artifact@v3
+
- name: Codecov upload
uses: codecov/codecov-action@v3
with:
- name: '${{ github.workflow }} ${{ matrix.os }} py-${{ matrix.python-version }}'
+ name: ${{ github.workflow }}
flags: fast-tests
fail_ci_if_error: true
verbose: true
- token: ${{ secrets.CODECOV_TOKEN }} # Token not required for public repos, but might reduce chance of random 404 error?
-
- - name: Linkcheck
- if: startsWith(matrix.python-version, '3.10')
- run: pytest -m linkcheck --dist=load tests/unit
+ # Token not required for public repos, but avoids upload failure due
+ # to rate-limiting (but not for PRs opened from forks)
+ token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.github/workflows/test_functional.yml b/.github/workflows/test_functional.yml
index 21967c85f76..3514713b7e9 100644
--- a/.github/workflows/test_functional.yml
+++ b/.github/workflows/test_functional.yml
@@ -248,14 +248,13 @@ jobs:
-exec echo '====== {} ======' \; -exec cat '{}' \;
- name: Set artifact upload name
- if: failure() && steps.test.outcome == 'failure'
id: uploadname
run: |
# artifact name cannot contain '/' characters
CID="$(sed 's|/|-|g' <<< "${{ matrix.name || matrix.chunk }}")"
echo "uploadname=$CID" >> $GITHUB_OUTPUT
- - name: Upload artifact
+ - name: Upload failed tests artifact
if: failure() && steps.test.outcome == 'failure'
uses: actions/upload-artifact@v3
with:
@@ -294,11 +293,31 @@ jobs:
coverage xml
coverage report
+ - name: Upload coverage artifact
+ uses: actions/upload-artifact@v3
+ with:
+ name: coverage_${{ steps.uploadname.outputs.uploadname }}
+ path: coverage.xml
+ retention-days: 7
+
+ codecov:
+ needs: test
+ runs-on: ubuntu-latest
+ timeout-minutes: 2
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Download coverage artifacts
+ uses: actions/download-artifact@v3
+
- name: Codecov upload
uses: codecov/codecov-action@v3
with:
- name: '${{ github.workflow }} ${{ matrix.name }} ${{ matrix.chunk }}'
+ name: ${{ github.workflow }}
flags: functional-tests
fail_ci_if_error: true
verbose: true
- token: ${{ secrets.CODECOV_TOKEN }} # Token not required for public repos, but might reduce chance of random 404 error?
+ # Token not required for public repos, but avoids upload failure due
+ # to rate-limiting (but not for PRs opened from forks)
+ token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/CHANGES.md b/CHANGES.md
index 5410f3c2376..cf9346d61b0 100644
--- a/CHANGES.md
+++ b/CHANGES.md
@@ -9,6 +9,27 @@ creating a new release entry be sure to copy & paste the span tag with the
`actions:bind` attribute, which is used by a regex to find the text to be
updated. Only the first match gets replaced, so it's fine to leave the old
ones in. -->
+-------------------------------------------------------------------------------
+## __cylc-8.2.0 (Upcoming)__
+
+### Enhancements
+
+[#5461](https://github.com/cylc/cylc-flow/pull/5461) - Preserve colour
+formatting when starting workflows in distributed mode using `run hosts`.
+
+[#5291](https://github.com/cylc/cylc-flow/pull/5291) - re-implement old-style
+clock triggers as wall_clock xtriggers.
+
+[#5439](https://github.com/cylc/cylc-flow/pull/5439) - Small CLI short option chages:
+Add the `-n` short option for `--workflow-name` to `cylc vip`; rename the `-n`
+short option for `--no-detach` to `-N`; add `-r` as a short option for
+`--run-name`.
+
+### Fixes
+
+[#5458](https://github.com/cylc/cylc-flow/pull/5458) - Fix a small bug
+causing option parsing to fail with Cylc Reinstall.
+
-------------------------------------------------------------------------------
## __cylc-8.1.3 (Upcoming)__
@@ -27,10 +48,20 @@ Fixes a possible scheduler traceback observed with remote task polling.
absence of `job name length maximum` in PBS platform settings would cause
Cylc to crash when preparing the job script.
+[#5343](https://github.com/cylc/cylc-flow/pull/5343) - Fix a bug causing
+platform names to be checked as if they were hosts.
+
[#5359](https://github.com/cylc/cylc-flow/pull/5359) - Fix bug where viewing
a workflow's log in the GUI or using `cylc cat-log` would prevent `cylc clean`
from working.
+-------------------------------------------------------------------------------
+## __cylc-8.2.0 (Coming Soon)__
+
+### Fixes
+[#5328](https://github.com/cylc/cylc-flow/pull/5328) -
+Efficiency improvements to reduce task management overheads on the Scheduler.
+
-------------------------------------------------------------------------------
## __cylc-8.1.2 (Released 2023-02-20)__
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 28c7dd5a96c..33a9163ba42 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,28 +1,37 @@
# Cylc: How to Contribute
-## Report Bugs
+Thanks for you interest in the Cylc project!
-Report bugs by opening an issue at [Cylc Issues on
-Github](https://github.com/cylc/cylc-flow/issues). Give the Cylc version
-affected by the bug (you should test the latest release if possible) and a
-recipe to reproduce the problem.
+Contributions are welcome, please open an issue to discuss changes before
+raising a pull request.
-## Request Enhancements
+You can also get in touch via:
+
+* The developers chat: [![chat](https://img.shields.io/matrix/cylc-general:matrix.org)](https://matrix.to/#/#cylc-general:matrix.org)
+* The forum: [![forum](https://img.shields.io/discourse/https/cylc.discourse.group/posts.svg)](https://cylc.discourse.group/)
+
+
+## New Contributors
+
+Please read the [CLA](#contributor-licence-agreement-and-certificate-of-origin).
+
+Please add your name to the
+[Code Contributors](#code-contributors) section of this file as part of your
+first Pull Request (for each Cylc repository you contribute to).
-Request enhancements by opening an issue at [Cylc Issues @
-Github](https://github.com/cylc/cylc-flow/issues). Describe your use case in
-detail.
## Contribute Code
-All contributions to Cylc are made via Pull Requests against the *master*
-branch of [cylc/cylc-flow](https://github.com/cylc/cylc-flow). Non-trivial
-developments must be discussed and agreed in advance in a [Cylc
-Issue](https://github.com/cylc/cylc-flow/issues) as the team may not be able to
-consider large changes that appear out of the blue. New contributors should
-add their details to the [Code Contributors](#code-contributors) section of
-this file as part of their first Pull Request, and reviewers are responsible
-for checking this before merging the new branch.
+**Enhancements** are made on the `master` branch.
+
+**Bugfixes** are made on the branch of the same name as the issue's milestone.
+E.G. if the issue is on the `8.0.x` milestone, branch off of `8.0.x` to
+develop your bugfix, then raise the pull request against the `8.0.x` branch.
+We will later merge the `8.0.x` branch into `master`.
+
+Feel free to ask questions on the issue or developers chat if unsure about
+anything.
+
## Code Contributors
@@ -78,6 +87,7 @@ requests_).
(All contributors are identifiable with email addresses in the git version
control logs or otherwise.)
+
## Contributor Licence Agreement and Certificate of Origin
By making a contribution to this project, I certify that:
diff --git a/README.md b/README.md
index dd88c391caf..b3976ff265d 100644
--- a/README.md
+++ b/README.md
@@ -53,6 +53,12 @@ cylc play example
cylc tui example
```
+### The Cylc Ecosystem
+
+- [cylc-flow](https://github.com/cylc/cylc-flow) - The core Cylc Scheduler for defining and running workflows.
+- [cylc-uiserver](https://github.com/cylc/cylc-uiserver) - The web-based Cylc graphical user interface for monitoring and controlling workflows.
+- [cylc-rose](https://github.com/cylc/cylc-rose) - Provides integration with [Rose](http://metomi.github.io/rose/).
+
### Migrating From Cylc 7
[Migration Guide](https://cylc.github.io/cylc-doc/stable/html/7-to-8/index.html)
diff --git a/conda-environment.yml b/conda-environment.yml
index 2a33dada664..20f9ae11f8d 100644
--- a/conda-environment.yml
+++ b/conda-environment.yml
@@ -2,7 +2,6 @@ name: cylc-dev
channels:
- conda-forge
dependencies:
- - aiofiles >=0.7.0,<0.8.0
- ansimarkup >=1.0.0
- async-timeout>=3.0.0
- colorama >=0.4,<1.0
@@ -15,7 +14,7 @@ dependencies:
- protobuf >=4.21.2,<4.22.0
- psutil >=5.6.0
- python
- - pyzmq >=22,<23
+ - pyzmq >=22
- setuptools >=49, <67
- urwid >=2,<3
# Add # [py<3.11] for tomli once Python 3.11 Released
diff --git a/cylc/flow/__init__.py b/cylc/flow/__init__.py
index d5c6af6bcbf..600ad50c87f 100644
--- a/cylc/flow/__init__.py
+++ b/cylc/flow/__init__.py
@@ -46,7 +46,7 @@ def environ_init():
environ_init()
-__version__ = '8.1.3.dev'
+__version__ = '8.2.0.dev'
def iter_entry_points(entry_point_name):
diff --git a/cylc/flow/async_util.py b/cylc/flow/async_util.py
index aa62b39acc7..73826ffe3ce 100644
--- a/cylc/flow/async_util.py
+++ b/cylc/flow/async_util.py
@@ -16,12 +16,11 @@
"""Utilities for use with asynchronous code."""
import asyncio
+from functools import partial, wraps
import os
from pathlib import Path
from typing import List, Union
-from aiofiles.os import wrap # type: ignore[attr-defined]
-
from cylc.flow import LOG
@@ -60,12 +59,12 @@ class _AsyncPipe:
"""
def __init__(
- self,
- func,
- args=None,
- kwargs=None,
- filter_stop=True,
- preserve_order=True
+ self,
+ func,
+ args=None,
+ kwargs=None,
+ filter_stop=True,
+ preserve_order=True
):
self.func = func
self.args = args or ()
@@ -393,9 +392,6 @@ def _pipe(func):
return _pipe
-async_listdir = wrap(os.listdir)
-
-
async def scandir(path: Union[Path, str]) -> List[Path]:
"""Asynchronous directory listing (performs os.listdir in an executor)."""
return [
@@ -449,3 +445,23 @@ async def unordered_map(coroutine, iterator):
)
for task in done:
yield task._args, task.result()
+
+
+def make_async(fcn):
+ """Make a synchronous function async by running it in an executor.
+
+ The default asyncio executor is the ThreadPoolExecutor so this essentially
+ syntactic sugar for running the wrapped function in a thread.
+ """
+ @wraps(fcn)
+ async def _fcn(*args, executor=None, **kwargs):
+ nonlocal fcn
+ return await asyncio.get_event_loop().run_in_executor(
+ executor,
+ partial(fcn, *args, **kwargs),
+ )
+
+ return _fcn
+
+
+async_listdir = make_async(os.listdir)
diff --git a/cylc/flow/broadcast_mgr.py b/cylc/flow/broadcast_mgr.py
index 534fd85fa60..81504055052 100644
--- a/cylc/flow/broadcast_mgr.py
+++ b/cylc/flow/broadcast_mgr.py
@@ -18,6 +18,7 @@
import re
from copy import deepcopy
from threading import RLock
+from typing import Optional, TYPE_CHECKING
from cylc.flow import LOG
from cylc.flow.broadcast_report import (
@@ -27,12 +28,15 @@
get_broadcast_bad_options_report,
)
from cylc.flow.cfgspec.workflow import SPEC
-from cylc.flow.id import Tokens
from cylc.flow.cycling.loader import get_point, standardise_point_string
from cylc.flow.exceptions import PointParsingError
from cylc.flow.parsec.util import listjoin
from cylc.flow.parsec.validate import BroadcastConfigValidator
+if TYPE_CHECKING:
+ from cylc.flow.id import Tokens
+
+
ALL_CYCLE_POINTS_STRS = ["*", "all-cycle-points", "all-cycles"]
@@ -156,28 +160,21 @@ def expire_broadcast(self, cutoff=None, **kwargs):
return (None, {"expire": [cutoff]})
return self.clear_broadcast(point_strings=point_strings, **kwargs)
- def get_broadcast(self, task_id=None):
+ def get_broadcast(self, tokens: 'Optional[Tokens]' = None) -> dict:
"""Retrieve all broadcast variables that target a given task ID."""
- if task_id == "None":
- task_id = None
- if not task_id:
+ if tokens is None or tokens == 'None':
# all broadcasts requested
return self.broadcasts
- try:
- tokens = Tokens(task_id, relative=True)
- name = tokens['task']
- point_string = tokens['cycle']
- except ValueError:
- raise Exception("Can't split task_id %s" % task_id)
-
- ret = {}
+ ret: dict = {}
# The order is:
# all:root -> all:FAM -> ... -> all:task
# -> tag:root -> tag:FAM -> ... -> tag:task
- for cycle in ALL_CYCLE_POINTS_STRS + [point_string]:
+ for cycle in ALL_CYCLE_POINTS_STRS + [tokens['cycle']]:
if cycle not in self.broadcasts:
continue
- for namespace in reversed(self.linearized_ancestors[name]):
+ for namespace in reversed(
+ self.linearized_ancestors[tokens['task']]
+ ):
if namespace in self.broadcasts[cycle]:
addict(ret, self.broadcasts[cycle][namespace])
return ret
diff --git a/cylc/flow/cfgspec/globalcfg.py b/cylc/flow/cfgspec/globalcfg.py
index 669e4ec6464..ea353484fe4 100644
--- a/cylc/flow/cfgspec/globalcfg.py
+++ b/cylc/flow/cfgspec/globalcfg.py
@@ -1399,6 +1399,17 @@ def default_for(
desc=f'''
{LOG_RETR_SETTINGS['retrieve job logs command']}
+ .. note::
+ The default command (``rsync -a``) means that the retrieved
+ files (and the directories above including ``job/log``) get
+ the same permissions as on the remote host. This can cause
+ problems if the remote host uses different permissions to
+ the scheduler host (e.g. no world read access). To avoid
+ this problem you can set the command to
+ ``rsync -a --no-p --no-g --chmod=ugo=rwX`` which means the
+ retrieved files get the default permissions used on the
+ scheduler host.
+
.. versionchanged:: 8.0.0
{REPLACES}``global.rc[hosts][]retrieve job logs
diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py
index dc7e4feb625..9a94f1783d5 100644
--- a/cylc/flow/cfgspec/workflow.py
+++ b/cylc/flow/cfgspec/workflow.py
@@ -720,10 +720,11 @@ def get_script_common_text(this: str, example: Optional[str] = None):
.. deprecated:: 8.0.0
- Please read :ref:`Section External Triggers` before
- using the older clock triggers described in this section.
+ These are now auto-upgraded to the newer wall_clock xtriggers
+ (see :ref:`Section External Triggers`). The old way defining
+ clock-triggers will be removed in an upcoming Cylc version.
- Clock-trigger tasks (see :ref:`ClockTriggerTasks`) wait on a wall
+ Clock-triggered tasks (see :ref:`ClockTriggerTasks`) wait on a wall
clock time specified as an offset from their own cycle point.
Example:
diff --git a/cylc/flow/clean.py b/cylc/flow/clean.py
new file mode 100644
index 00000000000..3658a35f835
--- /dev/null
+++ b/cylc/flow/clean.py
@@ -0,0 +1,495 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Functionality for workflow removal."""
+
+import glob
+import os
+import sqlite3
+from collections import deque
+from contextlib import suppress
+from functools import partial
+from pathlib import Path
+from random import shuffle
+from subprocess import (
+ DEVNULL,
+ PIPE,
+ Popen,
+)
+from time import sleep
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Container,
+ Deque,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Set,
+ Union,
+)
+
+from cylc.flow import LOG
+from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
+from cylc.flow.exceptions import (
+ CylcError,
+ InputError,
+ PlatformError,
+ PlatformLookupError,
+ ServiceFileError,
+)
+from cylc.flow.pathutil import (
+ get_workflow_run_dir,
+ parse_rm_dirs,
+ remove_dir_and_target,
+ remove_dir_or_file,
+ remove_empty_parents,
+)
+from cylc.flow.platforms import (
+ get_host_from_platform,
+ get_install_target_to_platforms_map,
+ get_localhost_install_target,
+)
+from cylc.flow.remote import construct_ssh_cmd
+from cylc.flow.rundb import CylcWorkflowDAO
+from cylc.flow.workflow_files import (
+ WorkflowFiles,
+ detect_old_contact_file,
+ get_symlink_dirs,
+ get_workflow_srv_dir,
+ infer_latest_run,
+ validate_workflow_name,
+)
+
+if TYPE_CHECKING:
+ from optparse import Values
+
+
+class RemoteCleanQueueTuple(NamedTuple):
+ proc: 'Popen[str]'
+ install_target: str
+ platforms: List[Dict[str, Any]]
+
+
+async def get_contained_workflows(partial_id) -> List[str]:
+ """Return the sorted names of any workflows in a directory.
+
+ Args:
+ path: Absolute path to the dir.
+ scan_depth: How many levels deep to look inside the dir.
+ """
+ from cylc.flow.network.scan import scan
+ run_dir = Path(get_workflow_run_dir(partial_id))
+ # Note: increased scan depth for safety
+ scan_depth = glbl_cfg().get(['install', 'max depth']) + 1
+ return sorted(
+ [i['name'] async for i in scan(scan_dir=run_dir, max_depth=scan_depth)]
+ )
+
+
+def _clean_check(opts: 'Values', reg: str, run_dir: Path) -> None:
+ """Check whether a workflow can be cleaned.
+
+ Args:
+ reg: Workflow name.
+ run_dir: Path to the workflow run dir on the filesystem.
+ """
+ validate_workflow_name(reg)
+ reg = os.path.normpath(reg)
+ # Thing to clean must be a dir or broken symlink:
+ if not run_dir.is_dir() and not run_dir.is_symlink():
+ raise FileNotFoundError(f"No directory to clean at {run_dir}")
+ db_path = (
+ run_dir / WorkflowFiles.Service.DIRNAME / WorkflowFiles.Service.DB
+ )
+ if opts.local_only and not db_path.is_file():
+ # Will reach here if this is cylc clean re-invoked on remote host
+ # (workflow DB only exists on scheduler host); don't need to worry
+ # about contact file.
+ return
+ try:
+ detect_old_contact_file(reg)
+ except ServiceFileError as exc:
+ raise ServiceFileError(
+ f"Cannot clean running workflow {reg}.\n\n{exc}"
+ )
+
+
+def init_clean(id_: str, opts: 'Values') -> None:
+ """Initiate the process of removing a stopped workflow from the local
+ scheduler filesystem and remote hosts.
+
+ Args:
+ id_: Workflow ID.
+ opts: CLI options object for cylc clean.
+
+ """
+ local_run_dir = Path(get_workflow_run_dir(id_))
+ with suppress(InputError):
+ local_run_dir, id_ = infer_latest_run(
+ local_run_dir, implicit_runN=False, warn_runN=False
+ )
+ try:
+ _clean_check(opts, id_, local_run_dir)
+ except FileNotFoundError as exc:
+ LOG.info(exc)
+ return
+
+ # Parse --rm option to make sure it's valid
+ rm_dirs = parse_rm_dirs(opts.rm_dirs) if opts.rm_dirs else None
+
+ if not opts.local_only:
+ platform_names = None
+ db_file = Path(get_workflow_srv_dir(id_), 'db')
+ if not db_file.is_file():
+ # no DB -> do nothing
+ if opts.remote_only:
+ raise ServiceFileError(
+ f"No workflow database for {id_} - cannot perform "
+ "remote clean"
+ )
+ LOG.info(
+ f"No workflow database for {id_} - will only clean locally"
+ )
+ else:
+ # DB present -> load platforms
+ try:
+ platform_names = get_platforms_from_db(local_run_dir)
+ except ServiceFileError as exc:
+ raise ServiceFileError(f"Cannot clean {id_} - {exc}")
+ except sqlite3.OperationalError as exc:
+ # something went wrong with the query
+ # e.g. the table/field we need isn't there
+ LOG.warning(
+ 'This database is either corrupted or not compatible with'
+ ' this version of "cylc clean".'
+ '\nTry using the version of Cylc the workflow was last ran'
+ ' with to remove it.'
+ '\nOtherwise please delete the database file.'
+ )
+ raise ServiceFileError(f"Cannot clean {id_} - {exc}")
+
+ if platform_names and platform_names != {'localhost'}:
+ remote_clean(
+ id_, platform_names, opts.rm_dirs, opts.remote_timeout
+ )
+
+ if not opts.remote_only:
+ # Must be after remote clean
+ clean(id_, local_run_dir, rm_dirs)
+
+
+def clean(id_: str, run_dir: Path, rm_dirs: Optional[Set[str]] = None) -> None:
+ """Remove a stopped workflow from the local filesystem only.
+
+ Deletes the workflow run directory and any symlink dirs, or just the
+ specified sub dirs if rm_dirs is specified.
+
+ Note: if the run dir has already been manually deleted, it will not be
+ possible to clean any symlink dirs.
+
+ Args:
+ id_: Workflow ID.
+ run_dir: Absolute path of the workflow's run dir.
+ rm_dirs: Set of sub dirs to remove instead of the whole run dir.
+
+ """
+ symlink_dirs = get_symlink_dirs(id_, run_dir)
+ if rm_dirs is not None:
+ # Targeted clean
+ for pattern in rm_dirs:
+ _clean_using_glob(run_dir, pattern, symlink_dirs)
+ else:
+ # Wholesale clean
+ LOG.debug(f"Cleaning {run_dir}")
+ for symlink in symlink_dirs:
+ # Remove /cylc-run//
+ remove_dir_and_target(run_dir / symlink)
+ if '' not in symlink_dirs:
+ # if run dir isn't a symlink dir and hasn't been deleted yet
+ remove_dir_and_target(run_dir)
+
+ # Tidy up if necessary
+ # Remove `runN` symlink if it's now broken
+ runN = run_dir.parent / WorkflowFiles.RUN_N
+ if (
+ runN.is_symlink() and
+ not run_dir.exists() and
+ os.readlink(str(runN)) == run_dir.name
+ ):
+ runN.unlink()
+ # Remove _cylc-install if it's the only thing left
+ cylc_install_dir = run_dir.parent / WorkflowFiles.Install.DIRNAME
+ for entry in run_dir.parent.iterdir():
+ if entry == cylc_install_dir:
+ continue
+ break
+ else: # no break
+ if cylc_install_dir.is_dir():
+ remove_dir_or_file(cylc_install_dir)
+ # Remove any empty parents of run dir up to ~/cylc-run/
+ remove_empty_parents(run_dir, id_)
+ for symlink, target in symlink_dirs.items():
+ # Remove empty parents of symlink target up to /cylc-run/
+ remove_empty_parents(target, Path(id_, symlink))
+
+
+def glob_in_run_dir(
+ run_dir: Union[Path, str], pattern: str, symlink_dirs: Container[Path]
+) -> List[Path]:
+ """Execute a (recursive) glob search in the given run directory.
+
+ Returns list of any absolute paths that match the pattern. However:
+ * Does not follow symlinks (apart from the spcedified symlink dirs).
+ * Also does not return matching subpaths of matching directories (because
+ that would be redundant).
+
+ Args:
+ run_dir: Absolute path of the workflow run dir.
+ pattern: The glob pattern.
+ symlink_dirs: Absolute paths to the workflow's symlink dirs.
+ """
+ # Note: use os.path.join, not pathlib, to preserve trailing slash if
+ # present in pattern
+ pattern = os.path.join(glob.escape(str(run_dir)), pattern)
+ # Note: don't use pathlib.Path.glob() because when you give it an exact
+ # filename instead of pattern, it doesn't return broken symlinks
+ matches = sorted(Path(i) for i in glob.iglob(pattern, recursive=True))
+ # sort guarantees parents come before their children
+ if len(matches) == 1 and not os.path.lexists(matches[0]):
+ # https://bugs.python.org/issue35201
+ return []
+ results: List[Path] = []
+ subpath_excludes: Set[Path] = set()
+ for path in matches:
+ for rel_ancestor in reversed(path.relative_to(run_dir).parents):
+ ancestor = run_dir / rel_ancestor
+ if ancestor in subpath_excludes:
+ break
+ if ancestor.is_symlink() and ancestor not in symlink_dirs:
+ # Do not follow non-standard symlinks
+ subpath_excludes.add(ancestor)
+ break
+ if not symlink_dirs and (ancestor in results):
+ # We can be sure all subpaths of this ancestor are redundant
+ subpath_excludes.add(ancestor)
+ break
+ if ancestor == path.parent: # noqa: SIM102
+ # Final iteration over ancestors
+ if ancestor in matches and path not in symlink_dirs:
+ # Redundant (but don't exclude subpaths in case any of the
+ # subpaths are std symlink dirs)
+ break
+ else: # No break
+ results.append(path)
+ return results
+
+
+def _clean_using_glob(
+ run_dir: Path, pattern: str, symlink_dirs: Iterable[str]
+) -> None:
+ """Delete the files/dirs in the run dir that match the pattern.
+
+ Does not follow symlinks (apart from the standard symlink dirs).
+
+ Args:
+ run_dir: Absolute path of workflow run dir.
+ pattern: The glob pattern.
+ symlink_dirs: Paths of the workflow's symlink dirs relative to
+ the run dir.
+ """
+ abs_symlink_dirs = tuple(sorted(
+ (run_dir / d for d in symlink_dirs),
+ reverse=True # ordered by deepest to shallowest
+ ))
+ matches = glob_in_run_dir(run_dir, pattern, abs_symlink_dirs)
+ if not matches:
+ LOG.info(f"No files matching '{pattern}' in {run_dir}")
+ return
+ # First clean any matching symlink dirs
+ for path in abs_symlink_dirs:
+ if path in matches:
+ remove_dir_and_target(path)
+ if path == run_dir:
+ # We have deleted the run dir
+ return
+ matches.remove(path)
+ # Now clean the rest
+ for path in matches:
+ remove_dir_or_file(path)
+
+
+def remote_clean(
+ reg: str,
+ platform_names: Iterable[str],
+ rm_dirs: Optional[List[str]] = None,
+ timeout: str = '120'
+) -> None:
+ """Run subprocesses to clean workflows on remote install targets
+ (skip localhost), given a set of platform names to look up.
+
+ Args:
+ reg: Workflow name.
+ platform_names: List of platform names to look up in the global
+ config, in order to determine the install targets to clean on.
+ rm_dirs: Sub dirs to remove instead of the whole run dir.
+ timeout: Number of seconds to wait before cancelling.
+ """
+ try:
+ install_targets_map = (
+ get_install_target_to_platforms_map(platform_names))
+ except PlatformLookupError as exc:
+ raise PlatformLookupError(
+ f"Cannot clean {reg} on remote platforms as the workflow database "
+ f"is out of date/inconsistent with the global config - {exc}")
+ queue: Deque[RemoteCleanQueueTuple] = deque()
+ remote_clean_cmd = partial(
+ _remote_clean_cmd, reg=reg, rm_dirs=rm_dirs, timeout=timeout
+ )
+ for target, platforms in install_targets_map.items():
+ if target == get_localhost_install_target():
+ continue
+ shuffle(platforms)
+ LOG.info(
+ f"Cleaning {reg} on install target: "
+ f"{platforms[0]['install target']}"
+ )
+ # Issue ssh command:
+ queue.append(
+ RemoteCleanQueueTuple(
+ remote_clean_cmd(platform=platforms[0]), target, platforms
+ )
+ )
+ failed_targets: Dict[str, PlatformError] = {}
+ # Handle subproc pool results almost concurrently:
+ while queue:
+ item = queue.popleft()
+ ret_code = item.proc.poll()
+ if ret_code is None: # proc still running
+ queue.append(item)
+ continue
+ out, err = item.proc.communicate()
+ if out:
+ LOG.info(f"[{item.install_target}]\n{out}")
+ if ret_code:
+ this_platform = item.platforms.pop(0)
+ excp = PlatformError(
+ PlatformError.MSG_TIDY,
+ this_platform['name'],
+ cmd=item.proc.args,
+ ret_code=ret_code,
+ out=out,
+ err=err,
+ )
+ if ret_code == 255 and item.platforms:
+ # SSH error; try again using the next platform for this
+ # install target
+ LOG.debug(excp)
+ queue.append(
+ item._replace(
+ proc=remote_clean_cmd(platform=item.platforms[0])
+ )
+ )
+ else: # Exhausted list of platforms
+ failed_targets[item.install_target] = excp
+ elif err:
+ # Only show stderr from remote host in debug mode if ret code 0
+ # because stderr often contains useless stuff like ssh login
+ # messages
+ LOG.debug(f"[{item.install_target}]\n{err}")
+ sleep(0.2)
+ if failed_targets:
+ for target, excp in failed_targets.items():
+ LOG.error(
+ f"Could not clean {reg} on install target: {target}\n{excp}"
+ )
+ raise CylcError(f"Remote clean failed for {reg}")
+
+
+def _remote_clean_cmd(
+ reg: str,
+ platform: Dict[str, Any],
+ rm_dirs: Optional[List[str]],
+ timeout: str
+) -> 'Popen[str]':
+ """Remove a stopped workflow on a remote host.
+
+ Call "cylc clean --local-only" over ssh and return the subprocess.
+
+ Args:
+ reg: Workflow name.
+ platform: Config for the platform on which to remove the workflow.
+ rm_dirs: Sub dirs to remove instead of the whole run dir.
+ timeout: Number of seconds to wait before cancelling the command.
+
+ Raises:
+ NoHostsError: If the platform is not contactable.
+
+ """
+ LOG.debug(
+ f"Cleaning {reg} on install target: {platform['install target']} "
+ f"(using platform: {platform['name']})"
+ )
+ cmd = ['clean', '--local-only', reg]
+ if rm_dirs is not None:
+ for item in rm_dirs:
+ cmd.extend(['--rm', item])
+ cmd = construct_ssh_cmd(
+ cmd,
+ platform,
+ get_host_from_platform(platform),
+ timeout=timeout,
+ set_verbosity=True,
+ )
+ LOG.debug(" ".join(cmd))
+ return Popen( # nosec
+ cmd,
+ stdin=DEVNULL,
+ stdout=PIPE,
+ stderr=PIPE,
+ text=True,
+ )
+ # * command constructed by internal interface
+
+
+def get_platforms_from_db(run_dir: Path) -> Set[str]:
+ """Return the set of names of platforms (that jobs ran on) from the DB.
+
+ Warning:
+ This does NOT upgrade the workflow database!
+
+ We could upgrade the DB for backward compatiblity, but we haven't
+ got any upgraders for this table yet so there's no point.
+
+ Note that upgrading the DB here would not help with forward
+ compatibility. We can't apply upgraders which don't exist yet.
+
+ Args:
+ run_dir: The workflow run directory.
+
+ Raises:
+ sqlite3.OperationalError: in the event the table/field required for
+ cleaning is not present.
+
+ """
+ with CylcWorkflowDAO(
+ run_dir / WorkflowFiles.Service.DIRNAME / WorkflowFiles.Service.DB
+ ) as pri_dao:
+ platform_names = pri_dao.select_task_job_platforms()
+
+ return platform_names
diff --git a/cylc/flow/config.py b/cylc/flow/config.py
index a549c9ed771..818fa6c62a5 100644
--- a/cylc/flow/config.py
+++ b/cylc/flow/config.py
@@ -113,7 +113,6 @@
from optparse import Values
from cylc.flow.cycling import IntervalBase, PointBase, SequenceBase
-
RE_CLOCK_OFFSET = re.compile(
rf'''
^
@@ -125,6 +124,7 @@
''',
re.X,
)
+
RE_EXT_TRIGGER = re.compile(
r'''
^
@@ -260,7 +260,6 @@ def __init__(
'SequenceBase', Set[Tuple[str, str, bool, bool]]
] = {}
self.taskdefs: Dict[str, TaskDef] = {}
- self.clock_offsets = {}
self.expiration_offsets = {}
self.ext_triggers = {} # Old external triggers (client/server)
self.xtrigger_mgr = xtrigger_mgr
@@ -501,14 +500,10 @@ def __init__(
# (sub-family)
continue
result.append(member + extn)
- if s_type == 'clock-trigger':
- self.clock_offsets[member] = offset_interval
if s_type == 'clock-expire':
self.expiration_offsets[member] = offset_interval
if s_type == 'external-trigger':
self.ext_triggers[member] = ext_trigger_msg
- elif s_type == 'clock-trigger':
- self.clock_offsets[name] = offset_interval
elif s_type == 'clock-expire':
self.expiration_offsets[name] = offset_interval
elif s_type == 'external-trigger':
@@ -555,6 +550,8 @@ def __init__(
raise WorkflowConfigError(
"external triggers must be used only once.")
+ self.upgrade_clock_triggers()
+
self.leaves = self.get_task_name_list()
for ancestors in self.runtime['first-parent ancestors'].values():
try:
@@ -1746,7 +1743,6 @@ def generate_triggers(self, lexpression, left_nodes, right, seq,
)
for label in xtrig_labels:
-
try:
xtrig = self.cfg['scheduling']['xtriggers'][label]
except KeyError:
@@ -2284,8 +2280,6 @@ def _get_taskdef(self, name: str) -> TaskDef:
# TODO - put all taskd.foo items in a single config dict
- if name in self.clock_offsets:
- taskd.clocktrigger_offset = self.clock_offsets[name]
if name in self.expiration_offsets:
taskd.expiration_offset = self.expiration_offsets[name]
if name in self.ext_triggers:
@@ -2436,3 +2430,34 @@ def check_for_owner(tasks: Dict) -> None:
for task, _ in list(owners.items())[:5]:
msg += f'\n * {task}"'
raise WorkflowConfigError(msg)
+
+ def upgrade_clock_triggers(self):
+ """Convert old-style clock triggers to clock xtriggers.
+
+ [[special tasks]]
+ clock-trigger = foo(PT1D)
+
+ becomes:
+
+ [[xtriggers]]
+ _cylc_wall_clock_foo = wallclock(PT1D)
+
+ Not done by parsec upgrade because the graph has to be parsed first.
+ """
+ for item in self.cfg['scheduling']['special tasks']['clock-trigger']:
+ match = RE_CLOCK_OFFSET.match(item)
+ # (Already validated during "special tasks" parsing above.)
+ task_name, offset = match.groups()
+ # Derive an xtrigger label.
+ label = '_'.join(('_cylc', 'wall_clock', task_name))
+ # Define the xtrigger function.
+ xtrig = SubFuncContext(label, 'wall_clock', [], {})
+ xtrig.func_kwargs["offset"] = offset
+ if self.xtrigger_mgr is None:
+ XtriggerManager.validate_xtrigger(label, xtrig, self.fdir)
+ else:
+ self.xtrigger_mgr.add_trig(label, xtrig, self.fdir)
+ # Add it to the task, for each sequence that the task appears in.
+ taskdef = self.get_taskdef(task_name)
+ for seq in taskdef.sequences:
+ taskdef.add_xtrig_label(label, seq)
diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto
index 346f0f0159b..10623314ae6 100644
--- a/cylc/flow/data_messages.proto
+++ b/cylc/flow/data_messages.proto
@@ -194,12 +194,6 @@ message PbOutput {
optional double time = 4;
}
-message PbClockTrigger {
- optional double time = 1;
- optional string time_string = 2;
- optional bool satisfied = 3;
-}
-
message PbTrigger {
optional string id = 1;
optional string label = 2;
@@ -226,7 +220,6 @@ message PbTaskProxy {
repeated string edges = 18;
repeated string ancestors = 19;
optional string flow_nums = 20;
- optional PbClockTrigger clock_trigger = 22;
map external_triggers = 23;
map xtriggers = 24;
optional bool is_queued = 25;
diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py
index ece523ed3b0..f4963ac11f6 100644
--- a/cylc/flow/data_messages_pb2.py
+++ b/cylc/flow/data_messages_pb2.py
@@ -14,7 +14,7 @@
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xb9\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputs\"\xab\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtime\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"|\n\x0ePbClockTrigger\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x18\n\x0btime_string\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0e\n\x0c_time_stringB\x0c\n\n_satisfied\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa6\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12+\n\rclock_trigger\x18\x16 \x01(\x0b\x32\x0f.PbClockTriggerH\x0b\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0c\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\r\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\x0e\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x10\n\x0e_clock_triggerB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtime\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\x84\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtime\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3')
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xf2\x0b\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_total\"\xb9\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputs\"\xab\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x12\n\nextra_logs\x18\x1d \x03(\t\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtime\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xe7\x07\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtime\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\x84\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtime\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3')
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'data_messages_pb2', globals())
@@ -59,44 +59,42 @@
_PBPREREQUISITE._serialized_end=4362
_PBOUTPUT._serialized_start=4365
_PBOUTPUT._serialized_end=4505
- _PBCLOCKTRIGGER._serialized_start=4507
- _PBCLOCKTRIGGER._serialized_end=4631
- _PBTRIGGER._serialized_start=4634
- _PBTRIGGER._serialized_end=4799
- _PBTASKPROXY._serialized_start=4802
- _PBTASKPROXY._serialized_end=5864
- _PBTASKPROXY_OUTPUTSENTRY._serialized_start=5472
- _PBTASKPROXY_OUTPUTSENTRY._serialized_end=5529
- _PBTASKPROXY_EXTERNALTRIGGERSENTRY._serialized_start=5531
- _PBTASKPROXY_EXTERNALTRIGGERSENTRY._serialized_end=5598
- _PBTASKPROXY_XTRIGGERSENTRY._serialized_start=5600
- _PBTASKPROXY_XTRIGGERSENTRY._serialized_end=5660
- _PBFAMILY._serialized_start=5867
- _PBFAMILY._serialized_end=6195
- _PBFAMILYPROXY._serialized_start=6198
- _PBFAMILYPROXY._serialized_end=6970
+ _PBTRIGGER._serialized_start=4508
+ _PBTRIGGER._serialized_end=4673
+ _PBTASKPROXY._serialized_start=4676
+ _PBTASKPROXY._serialized_end=5675
+ _PBTASKPROXY_OUTPUTSENTRY._serialized_start=5301
+ _PBTASKPROXY_OUTPUTSENTRY._serialized_end=5358
+ _PBTASKPROXY_EXTERNALTRIGGERSENTRY._serialized_start=5360
+ _PBTASKPROXY_EXTERNALTRIGGERSENTRY._serialized_end=5427
+ _PBTASKPROXY_XTRIGGERSENTRY._serialized_start=5429
+ _PBTASKPROXY_XTRIGGERSENTRY._serialized_end=5489
+ _PBFAMILY._serialized_start=5678
+ _PBFAMILY._serialized_end=6006
+ _PBFAMILYPROXY._serialized_start=6009
+ _PBFAMILYPROXY._serialized_end=6781
_PBFAMILYPROXY_STATETOTALSENTRY._serialized_start=1382
_PBFAMILYPROXY_STATETOTALSENTRY._serialized_end=1432
- _PBEDGE._serialized_start=6973
- _PBEDGE._serialized_end=7161
- _PBEDGES._serialized_start=7163
- _PBEDGES._serialized_end=7286
- _PBENTIREWORKFLOW._serialized_start=7289
- _PBENTIREWORKFLOW._serialized_end=7531
- _EDELTAS._serialized_start=7534
- _EDELTAS._serialized_end=7709
- _FDELTAS._serialized_start=7712
- _FDELTAS._serialized_end=7891
- _FPDELTAS._serialized_start=7894
- _FPDELTAS._serialized_end=8084
- _JDELTAS._serialized_start=8087
- _JDELTAS._serialized_end=8260
- _TDELTAS._serialized_start=8263
- _TDELTAS._serialized_end=8438
- _TPDELTAS._serialized_start=8441
- _TPDELTAS._serialized_end=8627
- _WDELTAS._serialized_start=8630
- _WDELTAS._serialized_end=8825
- _ALLDELTAS._serialized_start=8828
- _ALLDELTAS._serialized_end=9037
+ _PBEDGE._serialized_start=6784
+ _PBEDGE._serialized_end=6972
+ _PBEDGES._serialized_start=6974
+ _PBEDGES._serialized_end=7097
+ _PBENTIREWORKFLOW._serialized_start=7100
+ _PBENTIREWORKFLOW._serialized_end=7342
+ _EDELTAS._serialized_start=7345
+ _EDELTAS._serialized_end=7520
+ _FDELTAS._serialized_start=7523
+ _FDELTAS._serialized_end=7702
+ _FPDELTAS._serialized_start=7705
+ _FPDELTAS._serialized_end=7895
+ _JDELTAS._serialized_start=7898
+ _JDELTAS._serialized_end=8071
+ _TDELTAS._serialized_start=8074
+ _TDELTAS._serialized_end=8249
+ _TPDELTAS._serialized_start=8252
+ _TPDELTAS._serialized_end=8438
+ _WDELTAS._serialized_start=8441
+ _WDELTAS._serialized_end=8636
+ _ALLDELTAS._serialized_start=8639
+ _ALLDELTAS._serialized_end=8848
# @@protoc_insertion_point(module_scope)
diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py
index 0d5eb64bf37..2fb5369d76e 100644
--- a/cylc/flow/data_store_mgr.py
+++ b/cylc/flow/data_store_mgr.py
@@ -61,7 +61,13 @@
from copy import deepcopy
import json
from time import time
-from typing import Union, Tuple, TYPE_CHECKING
+from typing import (
+ Any,
+ Optional,
+ TYPE_CHECKING,
+ Tuple,
+ Union,
+)
import zlib
from cylc.flow import __version__ as CYLC_VERSION, LOG
@@ -99,8 +105,7 @@
from cylc.flow.wallclock import (
TIME_ZONE_LOCAL_INFO,
TIME_ZONE_UTC_INFO,
- get_utc_mode,
- get_time_string_from_unix_time as time2str
+ get_utc_mode
)
if TYPE_CHECKING:
@@ -686,17 +691,17 @@ def generate_definition_elements(self):
self.parents = parents
def increment_graph_window(
- self,
- source_tokens,
- point,
- flow_nums,
- edge_distance=0,
- active_id=None,
- descendant=False,
- is_parent=False,
- is_manual_submit=False,
- itask=None
- ):
+ self,
+ source_tokens: Tokens,
+ point,
+ flow_nums,
+ edge_distance=0,
+ active_id: Optional[str] = None,
+ descendant=False,
+ is_parent=False,
+ is_manual_submit=False,
+ itask=None
+ ) -> None:
"""Generate graph window about active task proxy to n-edge-distance.
A recursive function, that creates a node then moves to children and
@@ -721,7 +726,6 @@ def increment_graph_window(
Active/Other task proxy, passed in with pool invocation.
Returns:
-
None
"""
@@ -770,6 +774,8 @@ def increment_graph_window(
edge_distance += 1
# Don't expand window about orphan task.
+ child_tokens: Tokens
+ parent_tokens: Tokens
if not is_orphan:
tdef = self.schd.config.taskdefs[source_tokens['task']]
if graph_children is None:
@@ -861,7 +867,12 @@ def increment_graph_window(
getattr(self.updated[WORKFLOW], EDGES).edges.extend(
self.n_window_edges[active_id])
- def generate_edge(self, parent_tokens, child_tokens, active_id):
+ def generate_edge(
+ self,
+ parent_tokens: Tokens,
+ child_tokens: Tokens,
+ active_id: str,
+ ) -> None:
"""Construct edge of child and parent task proxy node."""
# Initiate edge element.
e_id = self.edge_id(parent_tokens, child_tokens)
@@ -919,12 +930,12 @@ def add_pool_node(self, name, point):
def generate_ghost_task(
self,
- tokens,
+ tokens: Tokens,
point,
flow_nums,
is_parent=False,
itask=None
- ):
+ ) -> Tuple[bool, Optional[dict]]:
"""Create task-point element populated with static data.
Args:
@@ -937,8 +948,7 @@ def generate_ghost_task(
Update task-node from corresponding task proxy object.
Returns:
-
- (True/False, Dict/None)
+ (is_orphan, graph_children)
Orphan tasks with no children return (True, None) respectively.
@@ -962,6 +972,7 @@ def generate_ghost_task(
if itask is None:
itask = TaskProxy(
+ self.id_,
self.schd.config.get_taskdef(name),
point,
flow_nums,
@@ -1141,7 +1152,7 @@ def generate_ghost_family(self, fp_id, child_fam=None, child_task=None):
fp_delta.runtime.CopyFrom(
runtime_from_config(
self._apply_broadcasts_to_runtime(
- tokens.relative_id,
+ tokens,
self.schd.config.cfg['runtime'][fam.name]
)
)
@@ -1270,12 +1281,6 @@ def _process_internal_task_proxy(self, itask, tproxy):
output.satisfied = satisfied
output.time = update_time
- if itask.tdef.clocktrigger_offset is not None:
- tproxy.clock_trigger.satisfied = itask.is_waiting_clock_done()
- tproxy.clock_trigger.time = itask.clock_trigger_time
- tproxy.clock_trigger.time_string = time2str(
- itask.clock_trigger_time)
-
for trig, satisfied in itask.state.external_triggers.items():
ext_trig = tproxy.external_triggers[trig]
ext_trig.id = trig
@@ -1300,26 +1305,26 @@ def _process_internal_task_proxy(self, itask, tproxy):
tproxy.runtime.CopyFrom(
runtime_from_config(
self._apply_broadcasts_to_runtime(
- itask.identity,
+ itask.tokens,
itask.tdef.rtconfig
)
)
)
- def _apply_broadcasts_to_runtime(self, relative_id, rtconfig):
+ def _apply_broadcasts_to_runtime(self, tokens, rtconfig):
# Handle broadcasts
- overrides = self.schd.broadcast_mgr.get_broadcast(relative_id)
+ overrides = self.schd.broadcast_mgr.get_broadcast(tokens)
if overrides:
rtconfig = pdeepcopy(rtconfig)
poverride(rtconfig, overrides, prepend=True)
return rtconfig
- def insert_job(self, name, point_string, status, job_conf):
+ def insert_job(self, name, cycle_point, status, job_conf):
"""Insert job into data-store.
Args:
name (str): Corresponding task name.
- point_string (str): Cycle point string
+ cycle_point (str|PointBase): Cycle point string
job_conf (dic):
Dictionary of job configuration used to generate
the job script.
@@ -1331,17 +1336,16 @@ def insert_job(self, name, point_string, status, job_conf):
"""
sub_num = job_conf['submit_num']
- tp_id, tproxy = self.store_node_fetcher(name, point_string)
+ tp_tokens = self.id_.duplicate(
+ cycle=str(cycle_point),
+ task=name,
+ )
+ tp_id, tproxy = self.store_node_fetcher(tp_tokens)
if not tproxy:
return
update_time = time()
- tp_tokens = Tokens(tp_id)
j_tokens = tp_tokens.duplicate(job=str(sub_num))
- j_id, job = self.store_node_fetcher(
- j_tokens['task'],
- j_tokens['cycle'],
- j_tokens['job'],
- )
+ j_id, job = self.store_node_fetcher(j_tokens)
if job:
# Job already exists (i.e. post-submission submit failure)
return
@@ -1364,7 +1368,7 @@ def insert_job(self, name, point_string, status, job_conf):
# Not all fields are populated with some submit-failures,
# so use task cfg as base.
j_cfg = pdeepcopy(self._apply_broadcasts_to_runtime(
- tp_tokens.relative_id,
+ tp_tokens,
self.schd.config.cfg['runtime'][tproxy.name]
))
for key, val in job_conf.items():
@@ -1406,11 +1410,13 @@ def insert_db_job(self, row_idx, row):
job_id,
platform_name
) = row
-
- tp_id, tproxy = self.store_node_fetcher(name, point_string)
+ tp_tokens = self.id_.duplicate(
+ cycle=point_string,
+ task=name,
+ )
+ tp_id, tproxy = self.store_node_fetcher(tp_tokens)
if not tproxy:
return
- tp_tokens = Tokens(tp_id)
j_tokens = tp_tokens.duplicate(job=str(submit_num))
j_id = j_tokens.id
@@ -1874,7 +1880,7 @@ def _generate_broadcast_node_deltas(self, node_data, node_type):
tokens = Tokens(node_id)
new_runtime = runtime_from_config(
self._apply_broadcasts_to_runtime(
- tokens.relative_id,
+ tokens,
cfg['runtime'][node.name]
)
)
@@ -1901,7 +1907,7 @@ def delta_task_state(self, itask):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
update_time = time()
@@ -1935,7 +1941,7 @@ def delta_task_state(self, itask):
def delta_task_held(
self,
itask: Union[TaskProxy, Tuple[str, 'PointBase', bool]]
- ):
+ ) -> None:
"""Create delta for change in task proxy held state.
Args:
@@ -1945,13 +1951,16 @@ def delta_task_held(
"""
if isinstance(itask, TaskProxy):
- name = itask.tdef.name
- cycle = itask.point
+ tokens = itask.tokens
is_held = itask.state.is_held
else:
name, cycle, is_held = itask
+ tokens = self.id_.duplicate(
+ task=name,
+ cycle=str(cycle),
+ )
- tp_id, tproxy = self.store_node_fetcher(name, cycle)
+ tp_id, tproxy = self.store_node_fetcher(tokens)
if not tproxy:
return
tp_delta = self.updated[TASK_PROXIES].setdefault(
@@ -1961,7 +1970,7 @@ def delta_task_held(
self.state_update_families.add(tproxy.first_parent)
self.updates_pending = True
- def delta_task_queued(self, itask):
+ def delta_task_queued(self, itask: TaskProxy) -> None:
"""Create delta for change in task proxy queued state.
Args:
@@ -1970,7 +1979,7 @@ def delta_task_queued(self, itask):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
tp_delta = self.updated[TASK_PROXIES].setdefault(
@@ -1980,7 +1989,7 @@ def delta_task_queued(self, itask):
self.state_update_families.add(tproxy.first_parent)
self.updates_pending = True
- def delta_task_runahead(self, itask):
+ def delta_task_runahead(self, itask: TaskProxy) -> None:
"""Create delta for change in task proxy runahead state.
Args:
@@ -1989,7 +1998,7 @@ def delta_task_runahead(self, itask):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
tp_delta = self.updated[TASK_PROXIES].setdefault(
@@ -1999,7 +2008,11 @@ def delta_task_runahead(self, itask):
self.state_update_families.add(tproxy.first_parent)
self.updates_pending = True
- def delta_task_output(self, itask, message):
+ def delta_task_output(
+ self,
+ itask: TaskProxy,
+ message: str,
+ ) -> None:
"""Create delta for change in task proxy output.
Args:
@@ -2008,7 +2021,7 @@ def delta_task_output(self, itask, message):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
item = itask.state.outputs.get_item(message)
@@ -2027,7 +2040,7 @@ def delta_task_output(self, itask, message):
output.time = update_time
self.updates_pending = True
- def delta_task_outputs(self, itask):
+ def delta_task_outputs(self, itask: TaskProxy) -> None:
"""Create delta for change in all task proxy outputs.
Args:
@@ -2036,7 +2049,7 @@ def delta_task_outputs(self, itask):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
update_time = time()
@@ -2051,7 +2064,7 @@ def delta_task_outputs(self, itask):
self.updates_pending = True
- def delta_task_prerequisite(self, itask):
+ def delta_task_prerequisite(self, itask: TaskProxy) -> None:
"""Create delta for change in task proxy prerequisite.
Args:
@@ -2060,7 +2073,7 @@ def delta_task_prerequisite(self, itask):
objects from the workflow task pool.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
update_time = time()
@@ -2078,37 +2091,13 @@ def delta_task_prerequisite(self, itask):
tp_delta.prerequisites.extend(prereq_list)
self.updates_pending = True
- def delta_task_clock_trigger(self, itask, check_items):
- """Create delta for change in task proxy prereqs.
-
- Args:
- itask (cylc.flow.task_proxy.TaskProxy):
- Update task-node from corresponding task proxy
- objects from the workflow task pool.
- check_items (tuple):
- Collection of prerequisites checked to determine if
- task is ready to run.
-
- """
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
- if not tproxy:
- return
- if len(check_items) == 1:
- return
- _, clock, _ = check_items
- # update task instance
- if (
- tproxy.HasField('clock_trigger')
- and tproxy.clock_trigger.satisfied is not clock
- ):
- update_time = time()
- tp_delta = self.updated[TASK_PROXIES].setdefault(
- tp_id, PbTaskProxy(id=tp_id))
- tp_delta.stamp = f'{tp_id}@{update_time}'
- tp_delta.clock_trigger.satisfied = clock
- self.updates_pending = True
-
- def delta_task_ext_trigger(self, itask, trig, message, satisfied):
+ def delta_task_ext_trigger(
+ self,
+ itask: TaskProxy,
+ trig: str,
+ message: str,
+ satisfied: bool,
+ ) -> None:
"""Create delta for change in task proxy external_trigger.
Args:
@@ -2119,7 +2108,7 @@ def delta_task_ext_trigger(self, itask, trig, message, satisfied):
message (str): Trigger message.
"""
- tp_id, tproxy = self.store_node_fetcher(itask.tdef.name, itask.point)
+ tp_id, tproxy = self.store_node_fetcher(itask.tokens)
if not tproxy:
return
# update task instance
@@ -2158,14 +2147,9 @@ def delta_task_xtrigger(self, sig, satisfied):
# -----------
# Job Deltas
# -----------
- def delta_job_msg(self, job_d, msg):
+ def delta_job_msg(self, tokens: Tokens, msg: str) -> None:
"""Add message to job."""
- tokens = Tokens(job_d, relative=True)
- j_id, job = self.store_node_fetcher(
- tokens['task'],
- tokens['cycle'],
- tokens['job'],
- )
+ j_id, job = self.store_node_fetcher(tokens)
if not job:
return
j_delta = self.updated[JOBS].setdefault(
@@ -2181,14 +2165,14 @@ def delta_job_msg(self, job_d, msg):
j_delta.messages.append(msg)
self.updates_pending = True
- def delta_job_attr(self, job_d, attr_key, attr_val):
+ def delta_job_attr(
+ self,
+ tokens: Tokens,
+ attr_key: str,
+ attr_val: Any,
+ ) -> None:
"""Set job attribute."""
- tokens = Tokens(job_d, relative=True)
- j_id, job = self.store_node_fetcher(
- tokens['task'],
- tokens['cycle'],
- tokens['job'],
- )
+ j_id, job = self.store_node_fetcher(tokens)
if not job:
return
j_delta = PbJob(stamp=f'{j_id}@{time()}')
@@ -2199,14 +2183,13 @@ def delta_job_attr(self, job_d, attr_key, attr_val):
).MergeFrom(j_delta)
self.updates_pending = True
- def delta_job_state(self, job_d, status):
+ def delta_job_state(
+ self,
+ tokens: Tokens,
+ status: str,
+ ) -> None:
"""Set job state."""
- tokens = Tokens(job_d, relative=True)
- j_id, job = self.store_node_fetcher(
- tokens['task'],
- tokens['cycle'],
- tokens['job'],
- )
+ j_id, job = self.store_node_fetcher(tokens)
if not job or status not in JOB_STATUS_SET:
return
j_delta = PbJob(
@@ -2219,17 +2202,17 @@ def delta_job_state(self, job_d, status):
).MergeFrom(j_delta)
self.updates_pending = True
- def delta_job_time(self, job_d, event_key, time_str=None):
+ def delta_job_time(
+ self,
+ tokens: Tokens,
+ event_key: str,
+ time_str: Optional[str] = None,
+ ) -> None:
"""Set an event time in job pool object.
Set values of both event_key + '_time' and event_key + '_time_string'.
"""
- tokens = Tokens(job_d, relative=True)
- j_id, job = self.store_node_fetcher(
- tokens['task'],
- tokens['cycle'],
- tokens['job'],
- )
+ j_id, job = self.store_node_fetcher(tokens)
if not job:
return
j_delta = PbJob(stamp=f'{j_id}@{time()}')
@@ -2241,24 +2224,13 @@ def delta_job_time(self, job_d, event_key, time_str=None):
).MergeFrom(j_delta)
self.updates_pending = True
- def store_node_fetcher(
- self, name, point=None, sub_num=None, node_type=TASK_PROXIES):
+ def store_node_fetcher(self, tokens: Tokens) -> Tuple[str, Any]:
"""Check that task proxy is in or being added to the store"""
- if point is None:
- node_id = self.definition_id(name)
- node_type = TASKS
- elif sub_num is None:
- node_id = self.id_.duplicate(
- cycle=str(point),
- task=name,
- ).id
- else:
- node_id = self.id_.duplicate(
- cycle=str(point),
- task=name,
- job=str(sub_num),
- ).id
- node_type = JOBS
+ node_type = {
+ 'task': TASK_PROXIES,
+ 'job': JOBS,
+ }[tokens.lowest_token]
+ node_id = tokens.id
if node_id in self.added[node_type]:
return (node_id, self.added[node_type][node_id])
elif node_id in self.data[self.workflow_id][node_type]:
diff --git a/cylc/flow/hostuserutil.py b/cylc/flow/hostuserutil.py
index 782a193746a..34e033b2d7b 100644
--- a/cylc/flow/hostuserutil.py
+++ b/cylc/flow/hostuserutil.py
@@ -197,8 +197,8 @@ def is_remote_host(self, name):
"""
if name not in self._remote_hosts:
- if not name or name.split(".")[0].startswith("localhost"):
- # e.g. localhost.localdomain
+ if not name or name.startswith("localhost"):
+ # e.g. localhost4.localdomain4
self._remote_hosts[name] = False
else:
try:
diff --git a/cylc/flow/id.py b/cylc/flow/id.py
index 9c3a56e3082..8f0f57ed94d 100644
--- a/cylc/flow/id.py
+++ b/cylc/flow/id.py
@@ -152,12 +152,16 @@ def __repr__(self):
return f''
def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ return False
return all(
self[key] == other[key]
for key in self._KEYS
)
def __ne__(self, other):
+ if not isinstance(other, self.__class__):
+ return True
return any(
self[key] != other[key]
for key in self._KEYS
diff --git a/cylc/flow/install.py b/cylc/flow/install.py
new file mode 100644
index 00000000000..b736e8aeef3
--- /dev/null
+++ b/cylc/flow/install.py
@@ -0,0 +1,631 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""Functionality for (local) workflow installation."""
+
+import logging
+import os
+import re
+import shlex
+from contextlib import suppress
+from pathlib import Path
+from subprocess import (
+ PIPE,
+ Popen,
+)
+from typing import (
+ Any,
+ Dict,
+ List,
+ Optional,
+ Tuple,
+ Union,
+)
+
+from cylc.flow import LOG
+from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
+from cylc.flow.exceptions import (
+ InputError,
+ WorkflowFilesError,
+)
+from cylc.flow.loggingutil import (
+ CylcLogFormatter,
+ close_log,
+ get_next_log_number,
+ get_sorted_logs_by_time,
+)
+from cylc.flow.pathutil import (
+ expand_path,
+ get_cylc_run_dir,
+ get_next_rundir_number,
+ get_workflow_run_dir,
+ make_localhost_symlinks,
+)
+from cylc.flow.remote import (
+ DEFAULT_RSYNC_OPTS,
+)
+from cylc.flow.util import cli_format
+from cylc.flow.workflow_files import (
+ WorkflowFiles,
+ abort_if_flow_file_in_path,
+ check_deprecation,
+ check_flow_file,
+ get_cylc_run_abs_path,
+ is_valid_run_dir,
+ check_reserved_dir_names,
+ validate_workflow_name,
+)
+
+
+NESTED_DIRS_MSG = (
+ "Nested {dir_type} directories not allowed - cannot install workflow"
+ " in '{dest}' as '{existing}' is already a valid {dir_type} directory."
+)
+
+
+def _get_logger(rund, log_name, open_file=True):
+ """Get log and create and open if necessary.
+
+ Args:
+ rund:
+ The workflow run directory of the associated workflow.
+ log_name:
+ The name of the log to open.
+ open_file:
+ Open the appropriate log file and add it as a file handler to
+ the logger. I.E. Start writing the log to a file if not already
+ doing so.
+
+ """
+ logger = logging.getLogger(log_name)
+ if logger.getEffectiveLevel != logging.INFO:
+ logger.setLevel(logging.INFO)
+ if open_file and not logger.hasHandlers():
+ _open_install_log(rund, logger)
+ return logger
+
+
+def _open_install_log(rund, logger):
+ """Open Cylc log handlers for install/reinstall."""
+ rund = Path(rund).expanduser()
+ log_type = logger.name[logger.name.startswith('cylc-') and len('cylc-'):]
+ log_dir = Path(
+ rund, WorkflowFiles.LogDir.DIRNAME, WorkflowFiles.LogDir.INSTALL)
+ log_files = get_sorted_logs_by_time(log_dir, '*.log')
+ log_num = get_next_log_number(log_files[-1]) if log_files else 1
+ log_path = Path(log_dir, f"{log_num:02d}-{log_type}.log")
+ log_parent_dir = log_path.parent
+ log_parent_dir.mkdir(exist_ok=True, parents=True)
+ handler = logging.FileHandler(log_path)
+ handler.setFormatter(CylcLogFormatter())
+ logger.addHandler(handler)
+
+
+def get_rsync_rund_cmd(src, dst, reinstall=False, dry_run=False):
+ """Create and return the rsync command used for cylc install/re-install.
+
+ Args:
+ src (str):
+ file path location of source directory
+ dst (str):
+ file path location of destination directory
+ reinstall (bool):
+ indicate reinstall (--delete option added)
+ dry-run (bool):
+ indicate dry-run, rsync will not take place but report output if a
+ real run were to be executed
+
+ Return:
+ list: command to use for rsync.
+
+ """
+ rsync_cmd = shlex.split(
+ glbl_cfg().get(['platforms', 'localhost', 'rsync command'])
+ )
+ rsync_cmd += DEFAULT_RSYNC_OPTS
+ if dry_run:
+ rsync_cmd.append("--dry-run")
+ if reinstall:
+ rsync_cmd.append('--delete')
+
+ exclusions = [
+ '.git',
+ '.svn',
+ '.cylcignore',
+ 'opt/rose-suite-cylc-install.conf',
+ WorkflowFiles.LogDir.DIRNAME,
+ WorkflowFiles.WORK_DIR,
+ WorkflowFiles.SHARE_DIR,
+ WorkflowFiles.Install.DIRNAME,
+ WorkflowFiles.Service.DIRNAME
+ ]
+
+ # This is a hack to make sure that changes to rose-suite.conf
+ # are considered when re-installing.
+ # It should be removed after https://github.com/cylc/cylc-rose/issues/149
+ if not dry_run:
+ exclusions.append('rose-suite.conf')
+
+ for exclude in exclusions:
+ if (
+ Path(src).joinpath(exclude).exists() or
+ Path(dst).joinpath(exclude).exists()
+ ):
+ # Note '/' is the rsync "anchor" to the top level:
+ rsync_cmd.append(f"--exclude=/{exclude}")
+ cylcignore_file = Path(src).joinpath('.cylcignore')
+ if cylcignore_file.exists():
+ rsync_cmd.append(f"--exclude-from={cylcignore_file}")
+ rsync_cmd.append(f"{src}/")
+ rsync_cmd.append(f"{dst}/")
+
+ return rsync_cmd
+
+
+def reinstall_workflow(
+ source: Path,
+ named_run: str,
+ rundir: Path,
+ dry_run: bool = False
+) -> str:
+ """Reinstall workflow.
+
+ Args:
+ source: source directory
+ named_run: name of the run e.g. my-flow/run1
+ rundir: run directory
+ dry_run: if True, will not execute the file transfer but report what
+ would be changed.
+
+ Raises:
+ WorkflowFilesError:
+ If rsync returns non-zero.
+
+ Returns:
+ Stdout from the rsync command.
+
+ """
+ validate_source_dir(source, named_run)
+ check_nested_dirs(rundir)
+ reinstall_log = _get_logger(
+ rundir,
+ 'cylc-reinstall',
+ open_file=not dry_run, # don't open the log file for --dry-run
+ )
+ reinstall_log.info(
+ f'Reinstalling "{named_run}", from "{source}" to "{rundir}"'
+ )
+ rsync_cmd = get_rsync_rund_cmd(
+ source,
+ rundir,
+ reinstall=True,
+ dry_run=dry_run,
+ )
+
+ # Add '+++' to -out-format to mark lines passed through formatter.
+ rsync_cmd.append('--out-format=+++%o %n%L+++')
+
+ # Run rsync command:
+ reinstall_log.info(cli_format(rsync_cmd))
+ LOG.debug(cli_format(rsync_cmd))
+ proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec
+ # * command is constructed via internal interface
+ stdout, stderr = proc.communicate()
+
+ # Strip unwanted output.
+ stdout = ('\n'.join(re.findall(r'\+\+\+(.*)\+\+\+', stdout))).strip()
+ stderr = stderr.strip()
+
+ if proc.returncode != 0:
+ raise WorkflowFilesError(
+ f'An error occurred reinstalling from {source} to {rundir}'
+ f'\n{stderr}'
+ )
+
+ check_flow_file(rundir)
+ reinstall_log.info(f'REINSTALLED {named_run} from {source}')
+ print(
+ f'REINSTALL{"ED" if not dry_run else ""} {named_run} from {source}'
+ )
+ close_log(reinstall_log)
+ return stdout
+
+
+def install_workflow(
+ source: Path,
+ workflow_name: Optional[str] = None,
+ run_name: Optional[str] = None,
+ no_run_name: bool = False,
+ cli_symlink_dirs: Optional[Dict[str, Dict[str, Any]]] = None
+) -> Tuple[Path, Path, str, str]:
+ """Install a workflow, or renew its installation.
+
+ Install workflow into new run directory.
+ Create symlink to workflow source location, creating any symlinks for run,
+ work, log, share, share/cycle directories.
+
+ Args:
+ source: absolute path to workflow source directory.
+ workflow_name: workflow name, default basename($PWD).
+ run_name: name of the run, overrides run1, run2, run 3 etc...
+ If specified, cylc install will not create runN symlink.
+ rundir: for overriding the default cylc-run directory.
+ no_run_name: Flag as True to install workflow into
+ ~/cylc-run/
+ cli_symlink_dirs: Symlink dirs, if entered on the cli.
+
+ Return:
+ source: absolute path to source directory.
+ rundir: absolute path to run directory, where the workflow has been
+ installed into.
+ workflow_name: installed workflow name (which may be computed here).
+ named_run: Name of the run.
+
+ Raise:
+ WorkflowFilesError:
+ No flow.cylc file found in source location.
+ Illegal name (can look like a relative path, but not absolute).
+ Another workflow already has this name.
+ Trying to install a workflow that is nested inside of another.
+ """
+ abort_if_flow_file_in_path(source)
+ source = Path(expand_path(source)).resolve()
+ if not workflow_name:
+ workflow_name = get_source_workflow_name(source)
+ validate_workflow_name(workflow_name, check_reserved_names=True)
+ if run_name is not None:
+ if len(Path(run_name).parts) != 1:
+ raise WorkflowFilesError(
+ f'Run name cannot be a path. (You used {run_name})'
+ )
+ check_reserved_dir_names(run_name)
+ validate_source_dir(source, workflow_name)
+ run_path_base = Path(get_workflow_run_dir(workflow_name))
+ relink, run_num, rundir = get_run_dir_info(
+ run_path_base, run_name, no_run_name
+ )
+ max_scan_depth = glbl_cfg().get(['install', 'max depth'])
+ workflow_id = rundir.relative_to(get_cylc_run_dir())
+ if len(workflow_id.parts) > max_scan_depth:
+ raise WorkflowFilesError(
+ f"Cannot install: workflow ID '{workflow_id}' would exceed "
+ f"global.cylc[install]max depth = {max_scan_depth}"
+ )
+ check_nested_dirs(rundir, run_path_base)
+ if rundir.exists():
+ raise WorkflowFilesError(
+ f"'{rundir}' already exists\n"
+ "To reinstall, use `cylc reinstall`"
+ )
+ symlinks_created = {}
+ named_run = workflow_name
+ if run_name:
+ named_run = os.path.join(named_run, run_name)
+ elif run_num:
+ named_run = os.path.join(named_run, f'run{run_num}')
+ symlinks_created = make_localhost_symlinks(
+ rundir, named_run, symlink_conf=cli_symlink_dirs)
+ install_log = _get_logger(rundir, 'cylc-install')
+ if symlinks_created:
+ for target, symlink in symlinks_created.items():
+ install_log.info(f"Symlink created: {symlink} -> {target}")
+ try:
+ rundir.mkdir(exist_ok=True, parents=True)
+ except FileExistsError:
+ # This occurs when the file exists but is _not_ a directory.
+ raise WorkflowFilesError(
+ f"Cannot install as there is an existing file at {rundir}."
+ )
+ if relink:
+ link_runN(rundir)
+ rsync_cmd = get_rsync_rund_cmd(source, rundir)
+ proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec
+ # * command is constructed via internal interface
+ stdout, stderr = proc.communicate()
+ install_log.info(
+ f"Copying files from {source} to {rundir}"
+ f"\n{stdout}"
+ )
+ if proc.returncode != 0:
+ install_log.warning(
+ f"An error occurred when copying files from {source} to {rundir}")
+ install_log.warning(f" Warning: {stderr}")
+ cylc_install = Path(rundir.parent, WorkflowFiles.Install.DIRNAME)
+ check_deprecation(check_flow_file(rundir))
+ if no_run_name:
+ cylc_install = Path(rundir, WorkflowFiles.Install.DIRNAME)
+ source_link = cylc_install.joinpath(WorkflowFiles.Install.SOURCE)
+ # check source link matches the source symlink from workflow dir.
+ cylc_install.mkdir(parents=True, exist_ok=True)
+ if not source_link.exists():
+ if source_link.is_symlink():
+ # Condition represents a broken symlink.
+ raise WorkflowFilesError(
+ f'Symlink broken: {source_link} -> {source_link.resolve()}.'
+ )
+ install_log.info(f"Creating symlink from {source_link}")
+ source_link.symlink_to(source.resolve())
+ else:
+ if source_link.resolve() != source.resolve():
+ raise WorkflowFilesError(
+ f"Failed to install from {source.resolve()}: "
+ f"previous installations were from {source_link.resolve()}"
+ )
+ install_log.info(
+ f'Symlink from "{source_link}" to "{source}" in place.')
+ install_log.info(f'INSTALLED {named_run} from {source}')
+ print(f'INSTALLED {named_run} from {source}')
+ close_log(install_log)
+ return source, rundir, workflow_name, named_run
+
+
+def get_run_dir_info(
+ run_path_base: Path, run_name: Optional[str], no_run_name: bool
+) -> Tuple[bool, Optional[int], Path]:
+ """Get (numbered, named or unnamed) run directory info for current install.
+
+ Args:
+ run_path_base: The workflow directory absolute path.
+ run_name: Name of the run.
+ no_run_name: Flag as True to indicate no run name - workflow installed
+ into ~/cylc-run/.
+
+ Returns:
+ relink: True if runN symlink needs updating.
+ run_num: Run number of the current install, if using numbered runs.
+ rundir: Run directory absolute path.
+ """
+ relink = False
+ run_num = None
+ if no_run_name:
+ rundir = run_path_base
+ elif run_name:
+ rundir = run_path_base.joinpath(run_name)
+ if run_path_base.exists() and detect_flow_exists(run_path_base, True):
+ raise WorkflowFilesError(
+ f"--run-name option not allowed as '{run_path_base}' contains "
+ "installed numbered runs."
+ )
+ else:
+ run_num = get_next_rundir_number(run_path_base)
+ rundir = Path(run_path_base, f'run{run_num}')
+ if run_path_base.exists() and detect_flow_exists(run_path_base, False):
+ raise WorkflowFilesError(
+ f"Path: \"{run_path_base}\" contains an installed"
+ " workflow. Use --run-name to create a new run."
+ )
+ unlink_runN(run_path_base)
+ relink = True
+ return relink, run_num, rundir
+
+
+def get_source_dirs() -> List[str]:
+ return glbl_cfg().get(['install', 'source dirs'])
+
+
+def search_install_source_dirs(workflow_name: Union[Path, str]) -> Path:
+ """Return the path of a workflow source dir if it is present in the
+ 'global.cylc[install]source dirs' search path."""
+ abort_if_flow_file_in_path(Path(workflow_name))
+ search_path: List[str] = get_source_dirs()
+ if not search_path:
+ raise WorkflowFilesError(
+ "Cannot find workflow as 'global.cylc[install]source dirs' "
+ "does not contain any paths")
+ for path in search_path:
+ try:
+ return check_flow_file(Path(path, workflow_name)).parent
+ except WorkflowFilesError:
+ continue
+ raise WorkflowFilesError(
+ f"Could not find workflow '{workflow_name}' in: "
+ f"{', '.join(search_path)}")
+
+
+def get_source_workflow_name(source: Path) -> str:
+ """Return workflow name relative to configured source dirs if possible,
+ else the basename of the given path.
+ Note the source path provided should be fully expanded (user and env vars)
+ and normalised.
+ """
+ for dir_ in get_source_dirs():
+ try:
+ return str(source.relative_to(Path(expand_path(dir_)).resolve()))
+ except ValueError:
+ continue
+ return source.name
+
+
+def unlink_runN(path: Union[Path, str]) -> bool:
+ """Remove symlink runN if it exists.
+
+ Args:
+ path: Absolute path to workflow dir containing runN.
+ """
+ try:
+ Path(expand_path(path, WorkflowFiles.RUN_N)).unlink()
+ except OSError:
+ return False
+ return True
+
+
+def link_runN(latest_run: Union[Path, str]):
+ """Create symlink runN, pointing at the latest run"""
+ latest_run = Path(latest_run)
+ run_n = Path(latest_run.parent, WorkflowFiles.RUN_N)
+ with suppress(OSError):
+ run_n.symlink_to(latest_run.name)
+
+
+def validate_source_dir(
+ source: Union[Path, str], workflow_name: str
+) -> None:
+ """Ensure the source directory is valid:
+ - has flow file
+ - does not contain reserved dir names
+
+ Args:
+ source: Path to source directory
+ Raises:
+ WorkflowFilesError:
+ If log, share, work or _cylc-install directories exist in the
+ source directory.
+ """
+ # Source dir must not contain reserved run dir names (as file or dir).
+ for dir_ in WorkflowFiles.RESERVED_DIRNAMES:
+ if Path(source, dir_).exists():
+ raise WorkflowFilesError(
+ f"{workflow_name} installation failed "
+ f"- {dir_} exists in source directory."
+ )
+ check_flow_file(source)
+
+
+def parse_cli_sym_dirs(symlink_dirs: str) -> Dict[str, Dict[str, Any]]:
+ """Converts command line entered symlink dirs to a dictionary.
+
+ Args:
+ symlink_dirs: As entered by user on cli,
+ e.g. "log=$DIR, share=$DIR2".
+
+ Raises:
+ WorkflowFilesError: If directory to be symlinked is not in permitted
+ dirs: run, log, share, work, share/cycle
+
+ Returns:
+ dict: In the same form as would be returned by global config.
+ e.g. {'localhost': {'log': '$DIR',
+ 'share': '$DIR2'
+ }
+ }
+ """
+ # Ensures the same nested dict format which is returned by the glb cfg
+ symdict: Dict[str, Dict[str, Any]] = {'localhost': {'run': None}}
+ if symlink_dirs == "":
+ return symdict
+ symlist = symlink_dirs.strip(',').split(',')
+ possible_symlink_dirs = set(WorkflowFiles.SYMLINK_DIRS.union(
+ {WorkflowFiles.RUN_DIR})
+ )
+ possible_symlink_dirs.remove('')
+ for pair in symlist:
+ try:
+ key, val = pair.split("=")
+ key = key.strip()
+ except ValueError:
+ raise InputError(
+ 'There is an error in --symlink-dirs option:'
+ f' {pair}. Try entering option in the form '
+ '--symlink-dirs=\'log=$DIR, share=$DIR2, ...\''
+ )
+ if key not in possible_symlink_dirs:
+ dirs = ', '.join(possible_symlink_dirs)
+ raise InputError(
+ f"{key} not a valid entry for --symlink-dirs. "
+ f"Configurable symlink dirs are: {dirs}"
+ )
+ symdict['localhost'][key] = val.strip() or None
+
+ return symdict
+
+
+def detect_flow_exists(
+ run_path_base: Union[Path, str], numbered: bool
+) -> bool:
+ """Returns True if installed flow already exists.
+
+ Args:
+ run_path_base: Absolute path of workflow directory,
+ i.e ~/cylc-run/
+ numbered: If True, will detect if numbered runs exist. If False, will
+ detect if non-numbered runs exist, i.e. runs installed
+ by --run-name.
+ """
+ for entry in Path(run_path_base).iterdir():
+ is_numbered = bool(re.search(r'^run\d+$', entry.name))
+ if (
+ entry.is_dir()
+ and entry.name not in {
+ WorkflowFiles.Install.DIRNAME, WorkflowFiles.RUN_N
+ }
+ and Path(entry, WorkflowFiles.FLOW_FILE).exists()
+ and is_numbered == numbered
+ ):
+ return True
+ return False
+
+
+def check_nested_dirs(
+ run_dir: Path,
+ install_dir: Optional[Path] = None
+) -> None:
+ """Disallow nested dirs:
+
+ - Nested installed run dirs
+ - Nested installed workflow dirs
+
+ Args:
+ run_dir: Absolute workflow run directory path.
+ install_dir: Absolute workflow install directory path
+ (contains _cylc-install). If None, will not check for nested
+ install dirs.
+
+ Raises:
+ WorkflowFilesError if reg dir is nested inside a run dir, or an
+ install dirs are nested.
+ """
+ if install_dir is not None:
+ install_dir = Path(os.path.normpath(install_dir))
+ # Check parents:
+ for parent_dir in run_dir.parents:
+ # Stop searching at ~/cylc-run
+ if parent_dir == Path(get_cylc_run_dir()):
+ break
+ # check for run directories:
+ if is_valid_run_dir(parent_dir):
+ raise WorkflowFilesError(
+ NESTED_DIRS_MSG.format(
+ dir_type='run',
+ dest=run_dir,
+ existing=get_cylc_run_abs_path(parent_dir)
+ )
+ )
+ # Check for install directories:
+ if (
+ install_dir
+ and parent_dir in install_dir.parents
+ and (parent_dir / WorkflowFiles.Install.DIRNAME).is_dir()
+ ):
+ raise WorkflowFilesError(
+ NESTED_DIRS_MSG.format(
+ dir_type='install',
+ dest=run_dir,
+ existing=get_cylc_run_abs_path(parent_dir)
+ )
+ )
+
+ if install_dir:
+ # Search child tree for install directories:
+ for depth in range(glbl_cfg().get(['install', 'max depth'])):
+ search_pattern = f'*/{"*/" * depth}{WorkflowFiles.Install.DIRNAME}'
+ for result in install_dir.glob(search_pattern):
+ raise WorkflowFilesError(
+ NESTED_DIRS_MSG.format(
+ dir_type='install',
+ dest=run_dir,
+ existing=get_cylc_run_abs_path(result.parent)
+ )
+ )
diff --git a/cylc/flow/main_loop/__init__.py b/cylc/flow/main_loop/__init__.py
index 23a18c9c077..2350153842c 100644
--- a/cylc/flow/main_loop/__init__.py
+++ b/cylc/flow/main_loop/__init__.py
@@ -127,13 +127,12 @@ async def my_startup_coroutine(schd, state):
^^^^^^^^^^
.. _coroutines: https://docs.python.org/3/library/asyncio-task.html#coroutines
-.. _aiofiles: https://github.com/Tinche/aiofiles
Plugins provide asynchronous functions (`coroutines`_) which Cylc will
then run inside the scheduler.
Coroutines should be fast running (read as gentle on the scheduler)
-and perform IO asynchronously e.g. by using `aiofiles`_.
+and perform IO asynchronously.
Coroutines shouldn't meddle with the state of the scheduler and should be
parallel-safe with other plugins.
diff --git a/cylc/flow/network/__init__.py b/cylc/flow/network/__init__.py
index 935c27f8075..448b35fb65d 100644
--- a/cylc/flow/network/__init__.py
+++ b/cylc/flow/network/__init__.py
@@ -22,6 +22,7 @@
import zmq
import zmq.asyncio
+import zmq.auth
from cylc.flow import LOG
from cylc.flow.exceptions import (
diff --git a/cylc/flow/network/client.py b/cylc/flow/network/client.py
index 96e78373183..9539fd55d6e 100644
--- a/cylc/flow/network/client.py
+++ b/cylc/flow/network/client.py
@@ -305,7 +305,9 @@ async def async_request(
if msg['command'] in PB_METHOD_MAP:
response = {'data': res}
else:
- response = decode_(res.decode())
+ response = decode_(
+ res.decode() if isinstance(res, bytes) else res
+ )
LOG.debug('zmq:recv %s', response)
try:
@@ -316,8 +318,8 @@ async def async_request(
{'message': f'Received invalid response: {response}'},
)
raise ClientError(
- error.get('message'),
- error.get('traceback'),
+ error.get('message'), # type: ignore
+ error.get('traceback'), # type: ignore
)
def get_header(self) -> dict:
diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py
index 116edf40ab0..8fee9decfb3 100644
--- a/cylc/flow/network/schema.py
+++ b/cylc/flow/network/schema.py
@@ -869,14 +869,6 @@ class Meta:
time = Float()
-class ClockTrigger(ObjectType):
- class Meta:
- description = """Task clock-trigger"""
- time = Float()
- time_string = String()
- satisfied = Boolean()
-
-
class XTrigger(ObjectType):
class Meta:
description = """Task trigger"""
@@ -919,7 +911,6 @@ class Meta:
limit=Int(default_value=0),
satisfied=Boolean(),
resolver=resolve_mapping_to_list)
- clock_trigger = Field(ClockTrigger)
external_triggers = graphene.List(
XTrigger,
description="""Task external trigger prerequisites.""",
diff --git a/cylc/flow/option_parsers.py b/cylc/flow/option_parsers.py
index 51fdfa2a8d6..d8037e0dddc 100644
--- a/cylc/flow/option_parsers.py
+++ b/cylc/flow/option_parsers.py
@@ -104,7 +104,7 @@ def __sub__(self, other):
def _in_list(self, others):
"""CLI arguments for this option found in any of a list of
other options."""
- return any([self & other for other in others])
+ return any(self & other for other in others)
def _update_sources(self, other):
"""Update the sources from this and 1 other OptionSettings object"""
diff --git a/cylc/flow/platforms.py b/cylc/flow/platforms.py
index 6f1b4b1be03..04cd427a74b 100644
--- a/cylc/flow/platforms.py
+++ b/cylc/flow/platforms.py
@@ -487,10 +487,10 @@ def generic_items_match(
# Get a set of items actually set in both platform and task_section.
shared_items = set(task_section).intersection(set(platform_spec))
# If any set items do not match, we can't use this platform.
- if not all([
+ if not all(
platform_spec[item] == task_section[item]
for item in shared_items
- ]):
+ ):
return False
return True
diff --git a/cylc/flow/remote.py b/cylc/flow/remote.py
index 226af937b31..62cd77fd9af 100644
--- a/cylc/flow/remote.py
+++ b/cylc/flow/remote.py
@@ -78,7 +78,7 @@ def run_cmd(
manage=False,
text=True,
):
- """Run a given cylc command on another account and/or host.
+ """Run a given cylc command on another host.
Arguments:
command (list):
@@ -107,6 +107,9 @@ def run_cmd(
* Else True if the remote command is executed successfully, or
if unsuccessful and capture_status=True the remote command exit code.
* Otherwise exit with an error message.
+
+ Exits with code 1 in the event of certain command errors.
+
"""
# CODACY ISSUE:
# subprocess call - check for execution of untrusted input.
@@ -385,6 +388,8 @@ def remote_cylc_cmd(
Raises:
NoHostsError: If the platform is not contactable.
+ Exits with code 1 in the event of certain command errors.
+
"""
if not host:
# no host selected => perform host selection from platform config
@@ -426,6 +431,8 @@ def cylc_server_cmd(cmd, host=None, **kwargs):
Raises:
NoHostsError: If the platform is not contactable.
+ Exits with code 1 in the event of certain command errors.
+
"""
return remote_cylc_cmd(
cmd,
diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py
index 9acac8bd283..740e56f43c8 100644
--- a/cylc/flow/scheduler.py
+++ b/cylc/flow/scheduler.py
@@ -273,10 +273,11 @@ def __init__(self, reg: str, options: Values) -> None:
self.workflow_name = get_workflow_name_from_id(self.workflow)
self.owner = get_user()
self.host = get_host()
- self.id = Tokens(
+ self.tokens = Tokens(
user=self.owner,
workflow=self.workflow,
- ).id
+ )
+ self.id = self.tokens.id
self.uuid_str = str(uuid4())
self.options = options
self.template_vars = load_template_vars(
@@ -462,6 +463,7 @@ async def configure(self):
get_workflow_test_log_path(self.workflow)))
self.pool = TaskPool(
+ self.tokens,
self.config,
self.workflow_db_mgr,
self.task_events_mgr,
@@ -653,8 +655,18 @@ async def run_scheduler(self) -> None:
LOG.exception(exc)
raise
- except (KeyboardInterrupt, asyncio.CancelledError, Exception) as exc:
- # Includes SchedulerError
+ except (KeyboardInterrupt, asyncio.CancelledError) as exc:
+ await self.handle_exception(exc)
+
+ except Exception as exc: # Includes SchedulerError
+ with suppress(Exception):
+ LOG.critical(
+ 'An uncaught error caused Cylc to shut down.'
+ '\nIf you think this was an issue in Cylc,'
+ ' please report the following traceback to the developers.'
+ '\nhttps://github.com/cylc/cylc-flow/issues/new'
+ '?assignees=&labels=bug&template=bug.md&title=;'
+ )
await self.handle_exception(exc)
else:
@@ -1601,13 +1613,6 @@ async def main_loop(self) -> None:
):
self.pool.queue_task(itask)
- # Old-style clock-trigger tasks:
- if (
- itask.tdef.clocktrigger_offset is not None
- and all(itask.is_ready_to_run())
- ):
- self.pool.queue_task(itask)
-
if housekeep_xtriggers:
# (Could do this periodically?)
self.xtrigger_mgr.housekeep(self.pool.get_tasks())
diff --git a/cylc/flow/scheduler_cli.py b/cylc/flow/scheduler_cli.py
index d97ca81c459..63ebd231b95 100644
--- a/cylc/flow/scheduler_cli.py
+++ b/cylc/flow/scheduler_cli.py
@@ -134,7 +134,7 @@
PLAY_OPTIONS = [
OptionSettings(
- ["-n", "--no-detach", "--non-daemon"],
+ ["-N", "--no-detach", "--non-daemon"],
help="Do not daemonize the scheduler (infers --format=plain)",
action='store_true', dest="no_detach", sources={'play'}),
OptionSettings(
@@ -386,7 +386,7 @@ def scheduler_cli(options: 'Values', workflow_id_raw: str) -> None:
_upgrade_database(db_file)
# re-execute on another host if required
- _distribute(options.host, workflow_id_raw, workflow_id)
+ _distribute(options.host, workflow_id_raw, workflow_id, options.color)
# print the start message
_print_startup_message(options)
@@ -557,7 +557,7 @@ def _print_startup_message(options):
LOG.warning(SUITERC_DEPR_MSG)
-def _distribute(host, workflow_id_raw, workflow_id):
+def _distribute(host, workflow_id_raw, workflow_id, color):
"""Re-invoke this command on a different host if requested.
Args:
@@ -589,6 +589,15 @@ def _distribute(host, workflow_id_raw, workflow_id):
# Prevent recursive host selection
cmd.append("--host=localhost")
+ # Preserve CLI colour
+ if is_terminal() and color != 'never':
+ # the detached process doesn't pass the is_terminal test
+ # so we have to explicitly tell Cylc to use color
+ cmd.append('--color=always')
+ else:
+ # otherwise set --color=never to make testing easier
+ cmd.append('--color=never')
+
# Re-invoke the command
# NOTE: has the potential to raise NoHostsError, however, this will
# most likely have been raised during host-selection
diff --git a/cylc/flow/scripts/clean.py b/cylc/flow/scripts/clean.py
index 317ed77b192..51ddbe5627b 100644
--- a/cylc/flow/scripts/clean.py
+++ b/cylc/flow/scripts/clean.py
@@ -64,6 +64,7 @@
from typing import TYPE_CHECKING, Iterable, List, Tuple
from cylc.flow import LOG
+from cylc.flow.clean import init_clean, get_contained_workflows
from cylc.flow.exceptions import CylcError, InputError
import cylc.flow.flags
from cylc.flow.id_cli import parse_ids_async
@@ -74,7 +75,6 @@
Options,
)
from cylc.flow.terminal import cli_function, is_terminal
-from cylc.flow.workflow_files import init_clean, get_contained_workflows
if TYPE_CHECKING:
from optparse import Values
diff --git a/cylc/flow/scripts/install.py b/cylc/flow/scripts/install.py
index 574c2c0e3a7..6b05a1cd6a1 100755
--- a/cylc/flow/scripts/install.py
+++ b/cylc/flow/scripts/install.py
@@ -105,7 +105,7 @@
expand_path,
get_workflow_run_dir
)
-from cylc.flow.workflow_files import (
+from cylc.flow.install import (
install_workflow,
parse_cli_sym_dirs,
search_install_source_dirs
@@ -137,7 +137,7 @@
sources={'install'},
),
OptionSettings(
- ["--run-name"],
+ ["--run-name", "-r"],
help=(
"Give the run a custom name instead of automatically"
" numbering it."),
diff --git a/cylc/flow/scripts/lint.py b/cylc/flow/scripts/lint.py
index fdcd8a67273..12c58525340 100755
--- a/cylc/flow/scripts/lint.py
+++ b/cylc/flow/scripts/lint.py
@@ -273,7 +273,7 @@ def get_pyproject_toml(dir_):
raise CylcError(f'pyproject.toml did not load: {exc}')
if any(
- [i in loadeddata for i in ['cylc-lint', 'cylclint', 'cylc_lint']]
+ i in loadeddata for i in ['cylc-lint', 'cylclint', 'cylc_lint']
):
for key in keys:
tomldata[key] = loadeddata.get('cylc-lint').get(key, [])
diff --git a/cylc/flow/scripts/message.py b/cylc/flow/scripts/message.py
index 9e9e1f258ef..b2fb01f431c 100755
--- a/cylc/flow/scripts/message.py
+++ b/cylc/flow/scripts/message.py
@@ -59,6 +59,18 @@
The default message severity is INFO. The --severity=SEVERITY option can be
used to set the default severity level for all unprefixed messages.
+Increased severity will make messages more visible in workflow logs, using
+colour and format changes. DEBUG messages will not be shown in logs by default.
+
+The severity levels are those of the Python Logging Library
+https://docs.python.org/3/library/logging.html#logging-levels:
+
+- CRITICAL
+- ERROR
+- WARNING
+- INFO
+- DEBUG
+
Note:
To abort a job script with a custom error message, use cylc__job_abort:
cylc__job_abort 'message...'
diff --git a/cylc/flow/scripts/reinstall.py b/cylc/flow/scripts/reinstall.py
index 5b0b482109c..c3b48b23c74 100644
--- a/cylc/flow/scripts/reinstall.py
+++ b/cylc/flow/scripts/reinstall.py
@@ -80,18 +80,19 @@
ServiceFileError,
WorkflowFilesError,
)
+from cylc.flow.install import (
+ reinstall_workflow,
+)
from cylc.flow.id_cli import parse_id
from cylc.flow.option_parsers import (
CylcOptionParser as COP,
OptionSettings,
- Options,
WORKFLOW_ID_ARG_DOC,
)
from cylc.flow.pathutil import get_workflow_run_dir
from cylc.flow.workflow_files import (
get_workflow_source_dir,
load_contact_file,
- reinstall_workflow,
)
from cylc.flow.terminal import cli_function, DIM, is_terminal
@@ -112,25 +113,36 @@
)
]
+REINSTALL_OPTIONS = [
+ OptionSettings(
+ ["--yes"],
+ help='Skip interactive prompts.',
+ action="store_true",
+ default=False,
+ dest="skip_interactive",
+ sources={'reinstall'}
+ ),
+]
+
def get_option_parser() -> COP:
parser = COP(
__doc__, comms=True, argdoc=[WORKFLOW_ID_ARG_DOC]
)
- parser.add_cylc_rose_options()
try:
# If cylc-rose plugin is available
__import__('cylc.rose')
except ImportError:
- pass
+ options = REINSTALL_OPTIONS
else:
- for option in REINSTALL_CYLC_ROSE_OPTIONS:
- parser.add_option(*option.args, **option.kwargs)
- return parser
+ parser.add_cylc_rose_options()
+ options = REINSTALL_CYLC_ROSE_OPTIONS + REINSTALL_OPTIONS
+ for option in options:
+ parser.add_option(*option.args, **option.kwargs)
-ReInstallOptions = Options(get_option_parser())
+ return parser
@cli_function(get_option_parser)
@@ -146,6 +158,7 @@ def main(
def reinstall_cli(
opts: 'Values',
args: Optional[str] = None,
+ print_reload_tip: bool = True,
) -> bool:
"""Implement cylc reinstall.
@@ -175,7 +188,8 @@ def reinstall_cli(
usr: str = ''
try:
- if is_terminal(): # interactive mode - perform dry-run and prompt
+ if is_terminal() and not opts.skip_interactive:
+ # interactive mode - perform dry-run and prompt
# dry-mode reinstall
if not reinstall(
opts,
@@ -212,7 +226,8 @@ def reinstall_cli(
# reinstall for real
reinstall(opts, workflow_id, source, run_dir, dry_run=False)
print(cparse('Successfully reinstalled.'))
- display_cylc_reload_tip(workflow_id)
+ if print_reload_tip:
+ display_cylc_reload_tip(workflow_id)
return True
else:
diff --git a/cylc/flow/scripts/show.py b/cylc/flow/scripts/show.py
index 70656dc3f0c..067df97894e 100755
--- a/cylc/flow/scripts/show.py
+++ b/cylc/flow/scripts/show.py
@@ -117,10 +117,6 @@
message
satisfied
}
- clockTrigger {
- timeString
- satisfied
- }
externalTriggers {
id
label
@@ -327,19 +323,11 @@ async def prereqs_and_outputs_query(
info = f'{task_id} {output["label"]}'
print_msg_state(info, output['satisfied'])
if (
- t_proxy['clockTrigger']['timeString']
- or t_proxy['externalTriggers']
+ t_proxy['externalTriggers']
or t_proxy['xtriggers']
):
ansiprint(
"other: ('-': not satisfied)")
- if t_proxy['clockTrigger']['timeString']:
- state = t_proxy['clockTrigger']['satisfied']
- time_str = t_proxy['clockTrigger']['timeString']
- print_msg_state(
- 'Clock trigger time reached',
- state)
- print(f' o Triggers at ... {time_str}')
for ext_trig in t_proxy['externalTriggers']:
state = ext_trig['satisfied']
print_msg_state(
diff --git a/cylc/flow/scripts/validate.py b/cylc/flow/scripts/validate.py
index 0db83d1bdfa..2c1f7468aff 100755
--- a/cylc/flow/scripts/validate.py
+++ b/cylc/flow/scripts/validate.py
@@ -39,6 +39,7 @@
TriggerExpressionError
)
import cylc.flow.flags
+from cylc.flow.id import Tokens
from cylc.flow.id_cli import parse_id_async
from cylc.flow.loggingutil import disable_timestamps
from cylc.flow.option_parsers import (
@@ -167,7 +168,11 @@ async def wrapped_main(
print('Instantiating tasks to check trigger expressions')
for name, taskdef in cfg.taskdefs.items():
try:
- itask = TaskProxy(taskdef, cfg.start_point)
+ itask = TaskProxy(
+ Tokens(workflow_id),
+ taskdef,
+ cfg.start_point,
+ )
except TaskProxySequenceBoundsError:
# Should already failed above
mesg = 'Task out of bounds for %s: %s\n' % (cfg.start_point, name)
diff --git a/cylc/flow/scripts/validate_reinstall.py b/cylc/flow/scripts/validate_reinstall.py
index 5341b2167c2..0fb63b73ef9 100644
--- a/cylc/flow/scripts/validate_reinstall.py
+++ b/cylc/flow/scripts/validate_reinstall.py
@@ -61,7 +61,9 @@
_main as cylc_validate
)
from cylc.flow.scripts.reinstall import (
- REINSTALL_CYLC_ROSE_OPTIONS, reinstall_cli as cylc_reinstall
+ REINSTALL_CYLC_ROSE_OPTIONS,
+ REINSTALL_OPTIONS,
+ reinstall_cli as cylc_reinstall,
)
from cylc.flow.scripts.reload import (
reload_cli as cylc_reload
@@ -73,6 +75,7 @@
CYLC_ROSE_OPTIONS = COP.get_cylc_rose_options()
VR_OPTIONS = combine_options(
VALIDATE_OPTIONS,
+ REINSTALL_OPTIONS,
REINSTALL_CYLC_ROSE_OPTIONS,
PLAY_OPTIONS,
CYLC_ROSE_OPTIONS,
@@ -162,7 +165,7 @@ def vro_cli(parser: COP, options: 'Values', workflow_id: str):
cylc_validate(parser, options, workflow_id)
log_subcommand('reinstall', workflow_id)
- reinstall_ok = cylc_reinstall(options, workflow_id)
+ reinstall_ok = cylc_reinstall(options, workflow_id, print_reload_tip=False)
if not reinstall_ok:
LOG.warning(
'No changes to source: No reinstall or'
diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py
index ea2c7c1be7d..bade854875e 100644
--- a/cylc/flow/task_events_mgr.py
+++ b/cylc/flow/task_events_mgr.py
@@ -430,7 +430,7 @@ def check_job_time(self, itask, now):
def _get_remote_conf(self, itask, key):
"""Get deprecated "[remote]" items that default to platforms."""
- overrides = self.broadcast_mgr.get_broadcast(itask.identity)
+ overrides = self.broadcast_mgr.get_broadcast(itask.tokens)
SKEY = 'remote'
if SKEY not in overrides:
overrides[SKEY] = {}
@@ -442,7 +442,7 @@ def _get_remote_conf(self, itask, key):
def _get_workflow_platforms_conf(self, itask, key):
"""Return top level [runtime] items that default to platforms."""
- overrides = self.broadcast_mgr.get_broadcast(itask.identity)
+ overrides = self.broadcast_mgr.get_broadcast(itask.tokens)
return (
overrides.get(key) or
itask.tdef.rtconfig[key] or
@@ -594,7 +594,7 @@ def process_message(
else:
new_msg = message
self.data_store_mgr.delta_job_msg(
- itask.tokens.duplicate(job=str(submit_num)).relative_id,
+ itask.tokens.duplicate(job=str(submit_num)),
new_msg
)
@@ -675,11 +675,11 @@ def process_message(
# ... but either way update the job ID in the job proxy (it only
# comes in via the submission message).
if itask.tdef.run_mode != 'simulation':
- job_d = itask.tokens.duplicate(
+ job_tokens = itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id
+ )
self.data_store_mgr.delta_job_attr(
- job_d, 'job_id', itask.summary['submit_method_id'])
+ job_tokens, 'job_id', itask.summary['submit_method_id'])
elif message.startswith(FAIL_MESSAGE_PREFIX):
# Task received signal.
@@ -923,7 +923,7 @@ def _event_email_callback(self, proc_ctx, schd):
def _get_events_conf(self, itask, key, default=None):
"""Return an events setting from workflow then global configuration."""
for getter in [
- self.broadcast_mgr.get_broadcast(itask.identity).get("events"),
+ self.broadcast_mgr.get_broadcast(itask.tokens).get("events"),
itask.tdef.rtconfig["mail"],
itask.tdef.rtconfig["events"],
glbl_cfg().get(["scheduler", "mail"]),
@@ -1101,9 +1101,9 @@ def _process_message_failed(self, itask, event_time, message):
if event_time is None:
event_time = get_current_time_string()
itask.set_summary_time('finished', event_time)
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
- self.data_store_mgr.delta_job_time(job_d, 'finished', event_time)
- self.data_store_mgr.delta_job_state(job_d, TASK_STATUS_FAILED)
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
+ self.data_store_mgr.delta_job_time(job_tokens, 'finished', event_time)
+ self.data_store_mgr.delta_job_state(job_tokens, TASK_STATUS_FAILED)
self.workflow_db_mgr.put_update_task_jobs(itask, {
"run_status": 1,
"time_run_exit": event_time,
@@ -1133,9 +1133,9 @@ def _process_message_started(self, itask, event_time):
if itask.job_vacated:
itask.job_vacated = False
LOG.warning(f"[{itask}] Vacated job restarted")
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
- self.data_store_mgr.delta_job_time(job_d, 'started', event_time)
- self.data_store_mgr.delta_job_state(job_d, TASK_STATUS_RUNNING)
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
+ self.data_store_mgr.delta_job_time(job_tokens, 'started', event_time)
+ self.data_store_mgr.delta_job_state(job_tokens, TASK_STATUS_RUNNING)
itask.set_summary_time('started', event_time)
self.workflow_db_mgr.put_update_task_jobs(itask, {
"time_run": itask.summary['started_time_string']})
@@ -1151,9 +1151,10 @@ def _process_message_started(self, itask, event_time):
def _process_message_succeeded(self, itask, event_time):
"""Helper for process_message, handle a succeeded message."""
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
- self.data_store_mgr.delta_job_time(job_d, 'finished', event_time)
- self.data_store_mgr.delta_job_state(job_d, TASK_STATUS_SUCCEEDED)
+
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
+ self.data_store_mgr.delta_job_time(job_tokens, 'finished', event_time)
+ self.data_store_mgr.delta_job_state(job_tokens, TASK_STATUS_SUCCEEDED)
itask.set_summary_time('finished', event_time)
self.workflow_db_mgr.put_update_task_jobs(itask, {
"run_status": 0,
@@ -1206,9 +1207,12 @@ def _process_message_submit_failed(self, itask, event_time, submit_num):
self.setup_event_handlers(itask, self.EVENT_SUBMIT_RETRY, msg)
# Register newly submit-failed job with the database and datastore.
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
self._insert_task_job(itask, event_time, self.JOB_SUBMIT_FAIL_FLAG)
- self.data_store_mgr.delta_job_state(job_d, TASK_STATUS_SUBMIT_FAILED)
+ self.data_store_mgr.delta_job_state(
+ job_tokens,
+ TASK_STATUS_SUBMIT_FAILED
+ )
self._reset_job_timers(itask)
@@ -1257,13 +1261,24 @@ def _process_message_submitted(
# Register the newly submitted job with the database and datastore.
# Do after itask has changed state
self._insert_task_job(itask, event_time, self.JOB_SUBMIT_SUCCESS_FLAG)
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
- self.data_store_mgr.delta_job_time(job_d, 'submitted', event_time)
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
+ self.data_store_mgr.delta_job_time(
+ job_tokens,
+ 'submitted',
+ event_time,
+ )
if itask.tdef.run_mode == 'simulation':
# Simulate job started as well.
- self.data_store_mgr.delta_job_time(job_d, 'started', event_time)
+ self.data_store_mgr.delta_job_time(
+ job_tokens,
+ 'started',
+ event_time,
+ )
else:
- self.data_store_mgr.delta_job_state(job_d, TASK_STATUS_SUBMITTED)
+ self.data_store_mgr.delta_job_state(
+ job_tokens,
+ TASK_STATUS_SUBMITTED,
+ )
def _insert_task_job(
self,
diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py
index 6671cbe0c93..e68bada23b9 100644
--- a/cylc/flow/task_job_mgr.py
+++ b/cylc/flow/task_job_mgr.py
@@ -58,8 +58,6 @@
)
from cylc.flow.pathutil import get_remote_workflow_run_job_dir
from cylc.flow.platforms import (
- HOST_REC_COMMAND,
- PLATFORM_REC_COMMAND,
get_host_from_platform,
get_install_target_from_platform,
get_localhost_install_target,
@@ -370,7 +368,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth,
self.data_store_mgr.delta_job_msg(
itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id,
+ ),
self.REMOTE_INIT_MSG,
)
continue
@@ -399,7 +397,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth,
self.data_store_mgr.delta_job_msg(
itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id,
+ ),
self.REMOTE_INIT_MSG
)
continue
@@ -423,7 +421,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth,
self.data_store_mgr.delta_job_msg(
itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id,
+ ),
self.REMOTE_INIT_MSG,
)
continue
@@ -457,7 +455,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth,
self.data_store_mgr.delta_job_msg(
itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id,
+ ),
REMOTE_FILE_INSTALL_IN_PROGRESS
)
continue
@@ -706,7 +704,7 @@ def _kill_task_job_callback(self, workflow, itask, cmd_ctx, line):
self.data_store_mgr.delta_job_msg(
itask.tokens.duplicate(
job=str(itask.submit_num)
- ).relative_id,
+ ),
log_msg
)
LOG.log(log_lvl, f"[{itask}] {log_msg}")
@@ -811,17 +809,17 @@ def _poll_task_job_callback(self, workflow, itask, cmd_ctx, line):
ctx.out = line
ctx.ret_code = 0
# See cylc.flow.job_runner_mgr.JobPollContext
- job_d = itask.tokens.duplicate(job=str(itask.submit_num)).relative_id
+ job_tokens = itask.tokens.duplicate(job=str(itask.submit_num))
try:
job_log_dir, context = line.split('|')[1:3]
items = json.loads(context)
jp_ctx = JobPollContext(job_log_dir, **items)
except TypeError:
- self.data_store_mgr.delta_job_msg(job_d, self.POLL_FAIL)
+ self.data_store_mgr.delta_job_msg(job_tokens, self.POLL_FAIL)
ctx.cmd = cmd_ctx.cmd # print original command on failure
return
except ValueError:
- self.data_store_mgr.delta_job_msg(job_d, self.POLL_FAIL)
+ self.data_store_mgr.delta_job_msg(job_tokens, self.POLL_FAIL)
ctx.cmd = cmd_ctx.cmd # print original command on failure
return
finally:
@@ -1084,7 +1082,7 @@ def _prep_submit_task_job(
# Handle broadcasts
overrides = self.task_events_mgr.broadcast_mgr.get_broadcast(
- itask.identity
+ itask.tokens
)
if overrides:
rtconfig = pdeepcopy(itask.tdef.rtconfig)
@@ -1120,12 +1118,12 @@ def _prep_submit_task_job(
host_n, platform_name = None, None
try:
if rtconfig['remote']['host'] is not None:
- host_n = self.task_remote_mgr.subshell_eval(
- rtconfig['remote']['host'], HOST_REC_COMMAND
+ host_n = self.task_remote_mgr.eval_host(
+ rtconfig['remote']['host']
)
else:
- platform_name = self.task_remote_mgr.subshell_eval(
- rtconfig['platform'], PLATFORM_REC_COMMAND
+ platform_name = self.task_remote_mgr.eval_platform(
+ rtconfig['platform']
)
except PlatformError as exc:
itask.waiting_on_job_prep = False
diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py
index b5f7adf09a4..a93999847ef 100644
--- a/cylc/flow/task_pool.py
+++ b/cylc/flow/task_pool.py
@@ -87,13 +87,14 @@ class TaskPool:
def __init__(
self,
+ tokens: 'Tokens',
config: 'WorkflowConfig',
workflow_db_mgr: 'WorkflowDatabaseManager',
task_events_mgr: 'TaskEventsManager',
data_store_mgr: 'DataStoreMgr',
flow_mgr: 'FlowMgr'
) -> None:
-
+ self.tokens = tokens
self.config: 'WorkflowConfig' = config
self.stop_point = config.stop_point or config.final_point
self.workflow_db_mgr: 'WorkflowDatabaseManager' = workflow_db_mgr
@@ -437,6 +438,7 @@ def load_db_task_pool_for_restart(self, row_idx, row):
outputs_str) = row
try:
itask = TaskProxy(
+ self.tokens,
self.config.get_taskdef(name),
get_point(cycle),
deserialise(flow_nums),
@@ -906,8 +908,12 @@ def reload_taskdefs(self) -> None:
)
else:
new_task = TaskProxy(
+ self.tokens,
self.config.get_taskdef(itask.tdef.name),
- itask.point, itask.flow_nums, itask.state.status)
+ itask.point,
+ itask.flow_nums,
+ itask.state.status,
+ )
itask.copy_to_reload_successor(new_task)
self._swap_out(new_task)
LOG.info(f"[{itask}] reloaded task definition")
@@ -937,11 +943,6 @@ def reload_taskdefs(self) -> None:
# Already queued
continue
ready_check_items = itask.is_ready_to_run()
- # Use this periodic checking point for data-store delta
- # creation, some items aren't event driven (i.e. clock).
- if itask.tdef.clocktrigger_offset is not None:
- self.data_store_mgr.delta_task_clock_trigger(
- itask, ready_check_items)
if all(ready_check_items) and not itask.state.is_runahead:
self.queue_task(itask)
@@ -1457,12 +1458,13 @@ def spawn_task(
return None
itask = TaskProxy(
+ self.tokens,
taskdef,
point,
flow_nums,
submit_num=submit_num,
is_manual_submit=is_manual_submit,
- flow_wait=flow_wait
+ flow_wait=flow_wait,
)
if (name, point) in self.tasks_to_hold:
LOG.info(f"[{itask}] holding (as requested earlier)")
@@ -1538,7 +1540,12 @@ def force_spawn_children(
n_warnings, task_items = self.match_taskdefs(items)
for (_, point), taskdef in sorted(task_items.items()):
# This the parent task:
- itask = TaskProxy(taskdef, point, flow_nums=flow_nums)
+ itask = TaskProxy(
+ self.tokens,
+ taskdef,
+ point,
+ flow_nums=flow_nums,
+ )
# Spawn children of selected outputs.
for trig, out, _ in itask.state.outputs.get_all():
if trig in outputs:
diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py
index 696794f44db..e56dd635bf9 100644
--- a/cylc/flow/task_proxy.py
+++ b/cylc/flow/task_proxy.py
@@ -19,7 +19,6 @@
from collections import Counter
from copy import copy
from fnmatch import fnmatchcase
-from time import time
from typing import (
Any, Callable, Dict, List, Set, Tuple, Optional, TYPE_CHECKING
)
@@ -51,7 +50,7 @@ class TaskProxy:
Attributes:
.clock_trigger_time:
Clock trigger time in seconds since epoch.
- (Used for both old-style clock triggers and wall_clock xtrigger).
+ (Used for wall_clock xtrigger).
.expire_time:
Time in seconds since epoch when this task is considered expired.
.identity:
@@ -185,6 +184,7 @@ class TaskProxy:
def __init__(
self,
+ scheduler_tokens: 'Tokens',
tdef: 'TaskDef',
start_point: 'PointBase',
flow_nums: Optional[Set[int]] = None,
@@ -209,8 +209,7 @@ def __init__(
self.flow_nums = copy(flow_nums)
self.flow_wait = flow_wait
self.point = start_point
- self.tokens = Tokens(
- # TODO: make these absolute?
+ self.tokens = scheduler_tokens.duplicate(
cycle=str(self.point),
task=self.tdef.name,
)
@@ -364,7 +363,7 @@ def is_ready_to_run(self) -> Tuple[bool, ...]:
"""Is this task ready to run?
Takes account of all dependence: on other tasks, xtriggers, and
- old-style ext- and clock-triggers. Or, manual triggering.
+ old-style ext-triggers. Or, manual triggering.
"""
if self.is_manual_submit:
@@ -378,7 +377,6 @@ def is_ready_to_run(self) -> Tuple[bool, ...]:
return (self.try_timers[self.state.status].is_delay_done(),)
return (
self.state(TASK_STATUS_WAITING),
- self.is_waiting_clock_done(),
self.is_waiting_prereqs_done()
)
@@ -393,18 +391,6 @@ def set_summary_time(self, event_key, time_str=None):
self.summary[event_key + '_time'] = float(str2time(time_str))
self.summary[event_key + '_time_string'] = time_str
- def is_waiting_clock_done(self):
- """Is this task done waiting for its old-style clock trigger time?
-
- Return True if there is no clock trigger or when clock trigger is done.
- """
- if self.tdef.clocktrigger_offset is None:
- return True
- return (
- time() >
- self.get_clock_trigger_time(str(self.tdef.clocktrigger_offset))
- )
-
def is_task_prereqs_not_done(self):
"""Are some task prerequisites not satisfied?"""
return (not all(pre.is_satisfied()
diff --git a/cylc/flow/task_remote_mgr.py b/cylc/flow/task_remote_mgr.py
index 59b6e04cf42..c38c2ce1e4c 100644
--- a/cylc/flow/task_remote_mgr.py
+++ b/cylc/flow/task_remote_mgr.py
@@ -32,12 +32,14 @@
from subprocess import Popen, PIPE, DEVNULL
import tarfile
from time import sleep, time
-from typing import Any, Deque, Dict, TYPE_CHECKING, List, NamedTuple, Tuple
+from typing import (
+ Any, Deque, Dict, TYPE_CHECKING, List,
+ NamedTuple, Optional, Tuple
+)
from cylc.flow import LOG
from cylc.flow.exceptions import PlatformError
import cylc.flow.flags
-from cylc.flow.hostuserutil import is_remote_host
from cylc.flow.network.client_factory import CommsMeth
from cylc.flow.pathutil import (
get_dirs_to_symlink,
@@ -46,6 +48,8 @@
get_workflow_run_dir,
)
from cylc.flow.platforms import (
+ HOST_REC_COMMAND,
+ PLATFORM_REC_COMMAND,
NoHostsError,
PlatformLookupError,
get_host_from_platform,
@@ -67,6 +71,7 @@
)
from cylc.flow.loggingutil import get_next_log_number, get_sorted_logs_by_time
+from cylc.flow.hostuserutil import is_remote_host
if TYPE_CHECKING:
from zmq.auth.thread import ThreadAuthenticator
@@ -106,39 +111,31 @@ def __init__(self, workflow, proc_pool, bad_hosts):
self.is_reload = False
self.is_restart = False
- def subshell_eval(self, command, command_pattern, host_check=True):
- """Evaluate a task platform from a subshell string.
-
- At Cylc 7, from a host string.
+ def _subshell_eval(
+ self, eval_str: str, command_pattern: re.Pattern
+ ) -> Optional[str]:
+ """Evaluate a platform or host from a possible subshell string.
Arguments:
- command (str):
- An explicit host name, a command in back-tick or $(command)
- format, or an environment variable holding a hostname.
- command_pattern (re.Pattern):
+ eval_str:
+ An explicit host/platform name, a command, or an environment
+ variable holding a host/patform name.
+ command_pattern:
A compiled regex pattern designed to match subshell strings.
- host_check (bool):
- A flag to enable remote testing. If True, and if the command
- is running locally, then it will return 'localhost'.
- Return (str):
+ Return:
- None if evaluation of command is still taking place.
- - If command is not defined or the evaluated name is equivalent
- to 'localhost', _and_ host_check is set to True then
- 'localhost'
- - Otherwise, return the evaluated host name on success.
+ - 'localhost' if string is empty/not defined.
+ - Otherwise, return the evaluated host/platform name on success.
Raise PlatformError on error.
"""
- # BACK COMPAT: references to "host"
- # remove at:
- # Cylc8.x
- if not command:
+ if not eval_str:
return 'localhost'
# Host selection command: $(command) or `command`
- match = command_pattern.match(command)
+ match = command_pattern.match(eval_str)
if match:
cmd_str = match.groups()[1]
if cmd_str in self.remote_command_map:
@@ -146,34 +143,51 @@ def subshell_eval(self, command, command_pattern, host_check=True):
value = self.remote_command_map[cmd_str]
if isinstance(value, PlatformError):
raise value # command failed
- elif value is None:
- return # command not yet ready
- else:
- command = value # command succeeded
+ if value is None:
+ return None # command not yet ready
+ eval_str = value # command succeeded
else:
# Command not launched (or already reset)
self.proc_pool.put_command(
SubProcContext(
'remote-host-select',
['bash', '-c', cmd_str],
- env=dict(os.environ)),
+ env=dict(os.environ)
+ ),
callback=self._subshell_eval_callback,
callback_args=[cmd_str]
)
self.remote_command_map[cmd_str] = None
- return self.remote_command_map[cmd_str]
+ return None
# Environment variable substitution
- command = os.path.expandvars(command)
- # Remote?
- # TODO - Remove at Cylc 8.x as this only makes sense with host logic
- if host_check is True:
- if is_remote_host(command):
- return command
- else:
- return 'localhost'
- else:
- return command
+ return os.path.expandvars(eval_str)
+
+ # BACK COMPAT: references to "host"
+ # remove at:
+ # Cylc8.x
+ def eval_host(self, host_str: str) -> Optional[str]:
+ """Evaluate a host from a possible subshell string.
+
+ Args:
+ host_str: An explicit host name, a command in back-tick or
+ $(command) format, or an environment variable holding
+ a hostname.
+
+ Returns 'localhost' if evaluated name is equivalent
+ (e.g. localhost4.localdomain4).
+ """
+ host = self._subshell_eval(host_str, HOST_REC_COMMAND)
+ return host if is_remote_host(host) else 'localhost'
+
+ def eval_platform(self, platform_str: str) -> Optional[str]:
+ """Evaluate a platform from a possible subshell string.
+
+ Args:
+ platform_str: An explicit platform name, a command in $(command)
+ format, or an environment variable holding a platform name.
+ """
+ return self._subshell_eval(platform_str, PLATFORM_REC_COMMAND)
def subshell_eval_reset(self):
"""Reset remote eval subshell results.
diff --git a/cylc/flow/taskdef.py b/cylc/flow/taskdef.py
index 29e0f7d0de0..e4e771ec82d 100644
--- a/cylc/flow/taskdef.py
+++ b/cylc/flow/taskdef.py
@@ -115,7 +115,7 @@ class TaskDef:
"run_mode", "rtconfig", "start_point", "initial_point", "sequences",
"used_in_offset_trigger", "max_future_prereq_offset",
"sequential", "is_coldstart",
- "workflow_polling_cfg", "clocktrigger_offset", "expiration_offset",
+ "workflow_polling_cfg", "expiration_offset",
"namespace_hierarchy", "dependencies", "outputs", "param_var",
"graph_children", "graph_parents", "has_abs_triggers",
"external_triggers", "xtrig_labels", "name", "elapsed_times"]
@@ -140,7 +140,6 @@ def __init__(self, name, rtcfg, run_mode, start_point, initial_point):
self.sequential = False
self.workflow_polling_cfg = {}
- self.clocktrigger_offset = None
self.expiration_offset = None
self.namespace_hierarchy = []
self.dependencies = {}
diff --git a/cylc/flow/tui/app.py b/cylc/flow/tui/app.py
index fd05a613443..eb4e248d49c 100644
--- a/cylc/flow/tui/app.py
+++ b/cylc/flow/tui/app.py
@@ -481,6 +481,7 @@ def create_overlay(self, widget, kwargs):
See `urwid` docs for details.
"""
+ # create the overlay
kwargs = {'width': 'pack', 'height': 'pack', **kwargs}
overlay = urwid.Overlay(
urwid.LineBox(
@@ -503,9 +504,15 @@ def create_overlay(self, widget, kwargs):
top=self.stack * 5,
**kwargs,
)
+
+ # add it into the overlay stack
self.loop.widget = overlay
self.stack += 1
+ # force urwid to render the overlay now rather than waiting until the
+ # event loop becomes idle
+ self.loop.draw_screen()
+
def close_topmost(self):
"""Remove the topmost frame or uit the app if none present."""
if self.stack <= 0:
diff --git a/cylc/flow/tui/data.py b/cylc/flow/tui/data.py
index 0a304bec46f..4fd538d8783 100644
--- a/cylc/flow/tui/data.py
+++ b/cylc/flow/tui/data.py
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
+from functools import partial
from subprocess import Popen, PIPE
import sys
@@ -84,7 +85,6 @@
MUTATIONS = {
'workflow': [
'pause',
- 'resume',
'reload',
'stop',
],
@@ -130,6 +130,46 @@
}
+def cli_cmd(*cmd):
+ """Issue a CLI command.
+
+ Args:
+ cmd:
+ The command without the 'cylc' prefix'.
+
+ Rasies:
+ ClientError:
+ In the event of mishap for consistency with the network
+ client alternative.
+
+ """
+ proc = Popen( # nosec (command constructed internally, no untrusted input)
+ ['cylc', *cmd],
+ stderr=PIPE,
+ stdout=PIPE,
+ text=True,
+ )
+ out, err = proc.communicate()
+ if proc.returncode != 0:
+ raise ClientError(f'Error in command {" ".join(cmd)}\n{err}')
+
+
+def _clean(workflow):
+ # for now we will exit tui when the workflow is cleaned
+ # this will change when tui supports multiple workflows
+ cli_cmd('clean', workflow)
+ sys.exit(0)
+
+
+OFFLINE_MUTATIONS = {
+ 'workflow': {
+ 'play': partial(cli_cmd, 'play'),
+ 'clean': _clean,
+ 'reinstall-reload': partial(cli_cmd, 'vr', '--yes'),
+ }
+}
+
+
def generate_mutation(mutation, arguments):
graphql_args = ', '.join([
f'${argument}: {ARGUMENT_TYPES[argument]}'
@@ -149,10 +189,16 @@ def generate_mutation(mutation, arguments):
'''
-def list_mutations(selection):
+def list_mutations(client, selection):
context = extract_context(selection)
selection_type = list(context)[-1]
- return MUTATIONS.get(selection_type, [])
+ ret = []
+ if client:
+ # add the online mutations
+ ret.extend(MUTATIONS.get(selection_type, []))
+ # add the offline mutations
+ ret.extend(OFFLINE_MUTATIONS.get(selection_type, []))
+ return sorted(ret)
def context_to_variables(context):
@@ -181,6 +227,18 @@ def context_to_variables(context):
def mutate(client, mutation, selection):
+ if mutation in OFFLINE_MUTATIONS['workflow']:
+ offline_mutate(mutation, selection)
+ elif client:
+ online_mutate(client, mutation, selection)
+ else:
+ raise Exception(
+ f'Cannot peform command {mutation} on a stopped workflow'
+ ' or invalid command.'
+ )
+
+
+def online_mutate(client, mutation, selection):
"""Issue a mutation over a network interface."""
context = extract_context(selection)
variables = context_to_variables(context)
@@ -199,39 +257,5 @@ def offline_mutate(mutation, selection):
context = extract_context(selection)
variables = context_to_variables(context)
for workflow in variables['workflow']:
- if mutation == 'play':
- cli_cmd('play', workflow)
- elif mutation == 'clean': # noqa: SIM106
- cli_cmd('clean', workflow)
- # tui only supports single-workflow display ATM so
- # clean should shut down the program
- sys.exit()
- elif mutation in list_mutations(selection): # noqa: SIM106
- # this is an "online" mutation -> ignore
- pass
- else:
- raise Exception(f'Invalid mutation: {mutation}')
-
-
-def cli_cmd(*cmd):
- """Issue a CLI command.
-
- Args:
- cmd:
- The command without the 'cylc' prefix'.
-
- Rasies:
- ClientError:
- In the event of mishap for consistency with the network
- client alternative.
-
- """
- proc = Popen( # nosec (command constructed internally, no untrusted input)
- ['cylc', *cmd],
- stderr=PIPE,
- stdout=PIPE,
- text=True,
- )
- out, err = proc.communicate()
- if proc.returncode != 0:
- raise ClientError(err)
+ # NOTE: this currently only supports workflow mutations
+ OFFLINE_MUTATIONS['workflow'][mutation](workflow)
diff --git a/cylc/flow/tui/overlay.py b/cylc/flow/tui/overlay.py
index 539d9826cd6..cddcc8d5093 100644
--- a/cylc/flow/tui/overlay.py
+++ b/cylc/flow/tui/overlay.py
@@ -58,7 +58,6 @@
from cylc.flow.tui.data import (
list_mutations,
mutate,
- offline_mutate,
)
from cylc.flow.tui.util import (
get_task_icon
@@ -218,16 +217,14 @@ def context(app):
def _mutate(mutation, _):
nonlocal app
+ app.open_overlay(partial(progress, text='Running Command'))
try:
- if app.client:
- mutate(app.client, mutation, selection)
- else:
- offline_mutate(mutation, selection)
+ mutate(app.client, mutation, selection)
except ClientError as exc:
- # app.set_header([('workflow_error', str(exc))])
app.open_overlay(partial(error, text=str(exc)))
else:
app.close_topmost()
+ app.close_topmost()
widget = urwid.ListBox(
urwid.SimpleFocusListWalker(
@@ -245,18 +242,14 @@ def _mutate(mutation, _):
mutation,
on_press=partial(_mutate, mutation)
)
- for mutation in (
- list_mutations(selection)
- if app.client
- else ['play', 'clean']
- )
+ for mutation in list_mutations(app.client, selection)
]
)
)
return (
widget,
- {'width': 30, 'height': 15}
+ {'width': 30, 'height': 20}
)
@@ -270,3 +263,13 @@ def error(app, text=''):
]),
{'width': 50, 'height': 40}
)
+
+
+def progress(app, text='Working'):
+ """An overlay for presenting a running action."""
+ return (
+ urwid.ListBox([
+ urwid.Text(text),
+ ]),
+ {'width': 30, 'height': 10}
+ )
diff --git a/cylc/flow/workflow_files.py b/cylc/flow/workflow_files.py
index e51036da327..4997286a2a6 100644
--- a/cylc/flow/workflow_files.py
+++ b/cylc/flow/workflow_files.py
@@ -14,82 +14,58 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-"""Workflow service files management."""
+"""Files which define workflows or are created by Cylc.
+
+See also:
+* cylc.flow.install - for installing files from source dirs into the run dir.
+* cylc.flow.clean - for removing files from the run dir.
+
+"""
-from collections import deque
-from contextlib import suppress
-from enum import Enum
-from functools import partial
-import glob
-import logging
import os
-from pathlib import Path
-from random import shuffle
import re
-from subprocess import Popen, PIPE, DEVNULL, TimeoutExpired
-import shlex
import shutil
-import sqlite3
-from time import sleep
+from enum import Enum
+from pathlib import Path
+from subprocess import (
+ PIPE,
+ Popen,
+ TimeoutExpired,
+)
from typing import (
- Any, Container, Deque, Dict, Iterable, List, NamedTuple, Optional, Set,
- Tuple, TYPE_CHECKING, Union
+ Dict,
+ Optional,
+ Tuple,
+ Union,
)
-import aiofiles
-import zmq.auth
-
import cylc.flow.flags
from cylc.flow import LOG
-from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
+from cylc.flow.async_util import make_async
from cylc.flow.exceptions import (
CylcError,
- PlatformError,
- PlatformLookupError,
- ServiceFileError,
InputError,
+ ServiceFileError,
WorkflowFilesError,
handle_rmtree_err,
)
-from cylc.flow.loggingutil import (
- CylcLogFormatter,
- close_log,
- get_next_log_number,
- get_sorted_logs_by_time
+from cylc.flow.hostuserutil import (
+ get_user,
+ is_remote_host,
)
from cylc.flow.pathutil import (
expand_path,
get_cylc_run_dir,
get_workflow_run_dir,
make_localhost_symlinks,
- parse_rm_dirs,
- remove_dir_and_target,
- get_next_rundir_number,
- remove_dir_or_file,
- remove_empty_parents
-)
-from cylc.flow.platforms import (
- get_host_from_platform,
- get_install_target_to_platforms_map,
- get_localhost_install_target,
-)
-from cylc.flow.hostuserutil import (
- get_user,
- is_remote_host
)
from cylc.flow.remote import (
- DEFAULT_RSYNC_OPTS,
construct_cylc_server_ssh_cmd,
- construct_ssh_cmd,
)
-from cylc.flow.rundb import CylcWorkflowDAO
from cylc.flow.terminal import parse_dirty_json
from cylc.flow.unicode_rules import WorkflowNameValidator
from cylc.flow.util import cli_format
-if TYPE_CHECKING:
- from optparse import Values
-
class KeyType(Enum):
"""Used for authentication keys - public or private"""
@@ -141,7 +117,7 @@ def __init__(self, key_type, key_owner, full_key_path=None,
if key_type == KeyType.PRIVATE:
file_extension = WorkflowFiles.Service.PRIVATE_FILE_EXTENSION
- elif key_type == KeyType.PUBLIC:
+ else:
file_extension = WorkflowFiles.Service.PUBLIC_FILE_EXTENSION
self.file_name = f"{file_name}{file_extension}"
@@ -334,12 +310,6 @@ class ContactFileFields:
"""Remote command setting for Scheduler."""
-class RemoteCleanQueueTuple(NamedTuple):
- proc: 'Popen[str]'
- install_target: str
- platforms: List[Dict[str, Any]]
-
-
REG_DELIM = "/"
NO_TITLE = "No title provided"
@@ -365,11 +335,6 @@ class RemoteCleanQueueTuple(NamedTuple):
"in {}"
)
-NESTED_DIRS_MSG = (
- "Nested {dir_type} directories not allowed - cannot install workflow"
- " in '{dest}' as '{existing}' is already a valid {dir_type} directory."
-)
-
def _is_process_running(
host: str,
@@ -624,10 +589,18 @@ def get_workflow_srv_dir(reg):
return os.path.join(run_d, WorkflowFiles.Service.DIRNAME)
-def load_contact_file(reg: str) -> Dict[str, str]:
+def load_contact_file(id_: str, run_dir=None) -> Dict[str, str]:
"""Load contact file. Return data as key=value dict."""
+ if not run_dir:
+ path = Path(get_contact_file_path(id_))
+ else:
+ path = Path(
+ run_dir,
+ WorkflowFiles.Service.DIRNAME,
+ WorkflowFiles.Service.CONTACT
+ )
try:
- with open(get_contact_file_path(reg)) as f:
+ with open(path) as f:
file_content = f.read()
except IOError:
raise ServiceFileError("Couldn't load contact file")
@@ -643,29 +616,7 @@ def load_contact_file(reg: str) -> Dict[str, str]:
return data
-async def load_contact_file_async(reg, run_dir=None):
- if not run_dir:
- path = Path(get_contact_file_path(reg))
- else:
- path = Path(
- run_dir,
- WorkflowFiles.Service.DIRNAME,
- WorkflowFiles.Service.CONTACT
- )
- try:
- async with aiofiles.open(path, mode='r') as cont:
- data = {}
- async for line in cont:
- key, value = [item.strip() for item in line.split("=", 1)]
- # BACK COMPAT: contact pre "suite" to "workflow" conversion.
- # from:
- # Cylc 8
- # remove at:
- # Cylc 8.x
- data[key.replace('SUITE', 'WORKFLOW')] = value
- return data
- except IOError:
- raise ServiceFileError("Couldn't load contact file")
+load_contact_file_async = make_async(load_contact_file)
def register(
@@ -733,169 +684,6 @@ def is_installed(rund: Union[Path, str]) -> bool:
return cylc_install_dir.is_dir() or alt_cylc_install_dir.is_dir()
-async def get_contained_workflows(partial_id) -> List[str]:
- """Return the sorted names of any workflows in a directory.
-
- Args:
- path: Absolute path to the dir.
- scan_depth: How many levels deep to look inside the dir.
- """
- from cylc.flow.network.scan import scan
- run_dir = Path(get_workflow_run_dir(partial_id))
- # Note: increased scan depth for safety
- scan_depth = glbl_cfg().get(['install', 'max depth']) + 1
- return sorted(
- [i['name'] async for i in scan(scan_dir=run_dir, max_depth=scan_depth)]
- )
-
-
-def _clean_check(opts: 'Values', reg: str, run_dir: Path) -> None:
- """Check whether a workflow can be cleaned.
-
- Args:
- reg: Workflow name.
- run_dir: Path to the workflow run dir on the filesystem.
- """
- validate_workflow_name(reg)
- reg = os.path.normpath(reg)
- # Thing to clean must be a dir or broken symlink:
- if not run_dir.is_dir() and not run_dir.is_symlink():
- raise FileNotFoundError(f"No directory to clean at {run_dir}")
- db_path = (
- run_dir / WorkflowFiles.Service.DIRNAME / WorkflowFiles.Service.DB
- )
- if opts.local_only and not db_path.is_file():
- # Will reach here if this is cylc clean re-invoked on remote host
- # (workflow DB only exists on scheduler host); don't need to worry
- # about contact file.
- return
- try:
- detect_old_contact_file(reg)
- except ServiceFileError as exc:
- raise ServiceFileError(
- f"Cannot clean running workflow {reg}.\n\n{exc}"
- )
-
-
-def init_clean(id_: str, opts: 'Values') -> None:
- """Initiate the process of removing a stopped workflow from the local
- scheduler filesystem and remote hosts.
-
- Args:
- id_: Workflow ID.
- opts: CLI options object for cylc clean.
-
- """
- local_run_dir = Path(get_workflow_run_dir(id_))
- with suppress(InputError):
- local_run_dir, id_ = infer_latest_run(
- local_run_dir, implicit_runN=False, warn_runN=False
- )
- try:
- _clean_check(opts, id_, local_run_dir)
- except FileNotFoundError as exc:
- LOG.info(exc)
- return
-
- # Parse --rm option to make sure it's valid
- rm_dirs = parse_rm_dirs(opts.rm_dirs) if opts.rm_dirs else None
-
- if not opts.local_only:
- platform_names = None
- db_file = Path(get_workflow_srv_dir(id_), 'db')
- if not db_file.is_file():
- # no DB -> do nothing
- if opts.remote_only:
- raise ServiceFileError(
- f"No workflow database for {id_} - cannot perform "
- "remote clean"
- )
- LOG.info(
- f"No workflow database for {id_} - will only clean locally"
- )
- else:
- # DB present -> load platforms
- try:
- platform_names = get_platforms_from_db(local_run_dir)
- except ServiceFileError as exc:
- raise ServiceFileError(f"Cannot clean {id_} - {exc}")
- except sqlite3.OperationalError as exc:
- # something went wrong with the query
- # e.g. the table/field we need isn't there
- LOG.warning(
- 'This database is either corrupted or not compatible with'
- ' this version of "cylc clean".'
- '\nTry using the version of Cylc the workflow was last ran'
- ' with to remove it.'
- '\nOtherwise please delete the database file.'
- )
- raise ServiceFileError(f"Cannot clean {id_} - {exc}")
-
- if platform_names and platform_names != {'localhost'}:
- remote_clean(
- id_, platform_names, opts.rm_dirs, opts.remote_timeout
- )
-
- if not opts.remote_only:
- # Must be after remote clean
- clean(id_, local_run_dir, rm_dirs)
-
-
-def clean(id_: str, run_dir: Path, rm_dirs: Optional[Set[str]] = None) -> None:
- """Remove a stopped workflow from the local filesystem only.
-
- Deletes the workflow run directory and any symlink dirs, or just the
- specified sub dirs if rm_dirs is specified.
-
- Note: if the run dir has already been manually deleted, it will not be
- possible to clean any symlink dirs.
-
- Args:
- id_: Workflow ID.
- run_dir: Absolute path of the workflow's run dir.
- rm_dirs: Set of sub dirs to remove instead of the whole run dir.
-
- """
- symlink_dirs = get_symlink_dirs(id_, run_dir)
- if rm_dirs is not None:
- # Targeted clean
- for pattern in rm_dirs:
- _clean_using_glob(run_dir, pattern, symlink_dirs)
- else:
- # Wholesale clean
- LOG.debug(f"Cleaning {run_dir}")
- for symlink in symlink_dirs:
- # Remove /cylc-run//
- remove_dir_and_target(run_dir / symlink)
- if '' not in symlink_dirs:
- # if run dir isn't a symlink dir and hasn't been deleted yet
- remove_dir_and_target(run_dir)
-
- # Tidy up if necessary
- # Remove `runN` symlink if it's now broken
- runN = run_dir.parent / WorkflowFiles.RUN_N
- if (
- runN.is_symlink() and
- not run_dir.exists() and
- os.readlink(str(runN)) == run_dir.name
- ):
- runN.unlink()
- # Remove _cylc-install if it's the only thing left
- cylc_install_dir = run_dir.parent / WorkflowFiles.Install.DIRNAME
- for entry in run_dir.parent.iterdir():
- if entry == cylc_install_dir:
- continue
- break
- else: # no break
- if cylc_install_dir.is_dir():
- remove_dir_or_file(cylc_install_dir)
- # Remove any empty parents of run dir up to ~/cylc-run/
- remove_empty_parents(run_dir, id_)
- for symlink, target in symlink_dirs.items():
- # Remove empty parents of symlink target up to /cylc-run/
- remove_empty_parents(target, Path(id_, symlink))
-
-
def get_symlink_dirs(reg: str, run_dir: Union[Path, str]) -> Dict[str, Path]:
"""Return the standard symlink dirs and their targets if they exist in
the workflow run dir.
@@ -924,224 +712,6 @@ def get_symlink_dirs(reg: str, run_dir: Union[Path, str]) -> Dict[str, Path]:
return ret
-def glob_in_run_dir(
- run_dir: Union[Path, str], pattern: str, symlink_dirs: Container[Path]
-) -> List[Path]:
- """Execute a (recursive) glob search in the given run directory.
-
- Returns list of any absolute paths that match the pattern. However:
- * Does not follow symlinks (apart from the spcedified symlink dirs).
- * Also does not return matching subpaths of matching directories (because
- that would be redundant).
-
- Args:
- run_dir: Absolute path of the workflow run dir.
- pattern: The glob pattern.
- symlink_dirs: Absolute paths to the workflow's symlink dirs.
- """
- # Note: use os.path.join, not pathlib, to preserve trailing slash if
- # present in pattern
- pattern = os.path.join(glob.escape(str(run_dir)), pattern)
- # Note: don't use pathlib.Path.glob() because when you give it an exact
- # filename instead of pattern, it doesn't return broken symlinks
- matches = sorted(Path(i) for i in glob.iglob(pattern, recursive=True))
- # sort guarantees parents come before their children
- if len(matches) == 1 and not os.path.lexists(matches[0]):
- # https://bugs.python.org/issue35201
- return []
- results: List[Path] = []
- subpath_excludes: Set[Path] = set()
- for path in matches:
- for rel_ancestor in reversed(path.relative_to(run_dir).parents):
- ancestor = run_dir / rel_ancestor
- if ancestor in subpath_excludes:
- break
- if ancestor.is_symlink() and ancestor not in symlink_dirs:
- # Do not follow non-standard symlinks
- subpath_excludes.add(ancestor)
- break
- if not symlink_dirs and (ancestor in results):
- # We can be sure all subpaths of this ancestor are redundant
- subpath_excludes.add(ancestor)
- break
- if ancestor == path.parent: # noqa: SIM102
- # Final iteration over ancestors
- if ancestor in matches and path not in symlink_dirs:
- # Redundant (but don't exclude subpaths in case any of the
- # subpaths are std symlink dirs)
- break
- else: # No break
- results.append(path)
- return results
-
-
-def _clean_using_glob(
- run_dir: Path, pattern: str, symlink_dirs: Iterable[str]
-) -> None:
- """Delete the files/dirs in the run dir that match the pattern.
-
- Does not follow symlinks (apart from the standard symlink dirs).
-
- Args:
- run_dir: Absolute path of workflow run dir.
- pattern: The glob pattern.
- symlink_dirs: Paths of the workflow's symlink dirs relative to
- the run dir.
- """
- abs_symlink_dirs = tuple(sorted(
- (run_dir / d for d in symlink_dirs),
- reverse=True # ordered by deepest to shallowest
- ))
- matches = glob_in_run_dir(run_dir, pattern, abs_symlink_dirs)
- if not matches:
- LOG.info(f"No files matching '{pattern}' in {run_dir}")
- return
- # First clean any matching symlink dirs
- for path in abs_symlink_dirs:
- if path in matches:
- remove_dir_and_target(path)
- if path == run_dir:
- # We have deleted the run dir
- return
- matches.remove(path)
- # Now clean the rest
- for path in matches:
- remove_dir_or_file(path)
-
-
-def remote_clean(
- reg: str,
- platform_names: Iterable[str],
- rm_dirs: Optional[List[str]] = None,
- timeout: str = '120'
-) -> None:
- """Run subprocesses to clean workflows on remote install targets
- (skip localhost), given a set of platform names to look up.
-
- Args:
- reg: Workflow name.
- platform_names: List of platform names to look up in the global
- config, in order to determine the install targets to clean on.
- rm_dirs: Sub dirs to remove instead of the whole run dir.
- timeout: Number of seconds to wait before cancelling.
- """
- try:
- install_targets_map = (
- get_install_target_to_platforms_map(platform_names))
- except PlatformLookupError as exc:
- raise PlatformLookupError(
- f"Cannot clean {reg} on remote platforms as the workflow database "
- f"is out of date/inconsistent with the global config - {exc}")
- queue: Deque[RemoteCleanQueueTuple] = deque()
- remote_clean_cmd = partial(
- _remote_clean_cmd, reg=reg, rm_dirs=rm_dirs, timeout=timeout
- )
- for target, platforms in install_targets_map.items():
- if target == get_localhost_install_target():
- continue
- shuffle(platforms)
- LOG.info(
- f"Cleaning {reg} on install target: "
- f"{platforms[0]['install target']}"
- )
- # Issue ssh command:
- queue.append(
- RemoteCleanQueueTuple(
- remote_clean_cmd(platform=platforms[0]), target, platforms
- )
- )
- failed_targets: Dict[str, PlatformError] = {}
- # Handle subproc pool results almost concurrently:
- while queue:
- item = queue.popleft()
- ret_code = item.proc.poll()
- if ret_code is None: # proc still running
- queue.append(item)
- continue
- out, err = item.proc.communicate()
- if out:
- LOG.info(f"[{item.install_target}]\n{out}")
- if ret_code:
- this_platform = item.platforms.pop(0)
- excp = PlatformError(
- PlatformError.MSG_TIDY,
- this_platform['name'],
- cmd=item.proc.args,
- ret_code=ret_code,
- out=out,
- err=err,
- )
- if ret_code == 255 and item.platforms:
- # SSH error; try again using the next platform for this
- # install target
- LOG.debug(excp)
- queue.append(
- item._replace(
- proc=remote_clean_cmd(platform=item.platforms[0])
- )
- )
- else: # Exhausted list of platforms
- failed_targets[item.install_target] = excp
- elif err:
- # Only show stderr from remote host in debug mode if ret code 0
- # because stderr often contains useless stuff like ssh login
- # messages
- LOG.debug(f"[{item.install_target}]\n{err}")
- sleep(0.2)
- if failed_targets:
- for target, excp in failed_targets.items():
- LOG.error(
- f"Could not clean {reg} on install target: {target}\n{excp}"
- )
- raise CylcError(f"Remote clean failed for {reg}")
-
-
-def _remote_clean_cmd(
- reg: str,
- platform: Dict[str, Any],
- rm_dirs: Optional[List[str]],
- timeout: str
-) -> 'Popen[str]':
- """Remove a stopped workflow on a remote host.
-
- Call "cylc clean --local-only" over ssh and return the subprocess.
-
- Args:
- reg: Workflow name.
- platform: Config for the platform on which to remove the workflow.
- rm_dirs: Sub dirs to remove instead of the whole run dir.
- timeout: Number of seconds to wait before cancelling the command.
-
- Raises:
- NoHostsError: If the platform is not contactable.
-
- """
- LOG.debug(
- f"Cleaning {reg} on install target: {platform['install target']} "
- f"(using platform: {platform['name']})"
- )
- cmd = ['clean', '--local-only', reg]
- if rm_dirs is not None:
- for item in rm_dirs:
- cmd.extend(['--rm', item])
- cmd = construct_ssh_cmd(
- cmd,
- platform,
- get_host_from_platform(platform),
- timeout=timeout,
- set_verbosity=True,
- )
- LOG.debug(" ".join(cmd))
- return Popen( # nosec
- cmd,
- stdin=DEVNULL,
- stdout=PIPE,
- stderr=PIPE,
- text=True,
- )
- # * command constructed by internal interface
-
-
def remove_keys_on_server(keys):
"""Removes server-held authentication keys"""
# WARNING, DESTRUCTIVE. Removes old keys if they already exist.
@@ -1158,6 +728,7 @@ def create_server_keys(keys, workflow_srv_dir):
"""Create or renew authentication keys for workflow 'reg' in the .service
directory.
Generate a pair of ZMQ authentication keys"""
+ import zmq.auth
# ZMQ keys generated in .service directory.
# .service/client_public_keys will store client public keys generated on
@@ -1203,34 +774,6 @@ def get_workflow_title(reg):
return title
-def get_platforms_from_db(run_dir: Path) -> Set[str]:
- """Return the set of names of platforms (that jobs ran on) from the DB.
-
- Warning:
- This does NOT upgrade the workflow database!
-
- We could upgrade the DB for backward compatiblity, but we haven't
- got any upgraders for this table yet so there's no point.
-
- Note that upgrading the DB here would not help with forward
- compatibility. We can't apply upgraders which don't exist yet.
-
- Args:
- run_dir: The workflow run directory.
-
- Raises:
- sqlite3.OperationalError: in the event the table/field required for
- cleaning is not present.
-
- """
- with CylcWorkflowDAO(
- run_dir / WorkflowFiles.Service.DIRNAME / WorkflowFiles.Service.DB
- ) as pri_dao:
- platform_names = pri_dao.select_task_job_platforms()
-
- return platform_names
-
-
def check_deprecation(path, warn=True):
"""Warn and turn on back-compat flag if Cylc 7 suite.rc detected.
@@ -1356,69 +899,6 @@ def infer_latest_run(
return (path, reg)
-def check_nested_dirs(
- run_dir: Path,
- install_dir: Optional[Path] = None
-) -> None:
- """Disallow nested dirs:
-
- - Nested installed run dirs
- - Nested installed workflow dirs
-
- Args:
- run_dir: Absolute workflow run directory path.
- install_dir: Absolute workflow install directory path
- (contains _cylc-install). If None, will not check for nested
- install dirs.
-
- Raises:
- WorkflowFilesError if reg dir is nested inside a run dir, or an
- install dirs are nested.
- """
- if install_dir is not None:
- install_dir = Path(os.path.normpath(install_dir))
- # Check parents:
- for parent_dir in run_dir.parents:
- # Stop searching at ~/cylc-run
- if parent_dir == Path(get_cylc_run_dir()):
- break
- # check for run directories:
- if is_valid_run_dir(parent_dir):
- raise WorkflowFilesError(
- NESTED_DIRS_MSG.format(
- dir_type='run',
- dest=run_dir,
- existing=get_cylc_run_abs_path(parent_dir)
- )
- )
- # Check for install directories:
- if (
- install_dir
- and parent_dir in install_dir.parents
- and (parent_dir / WorkflowFiles.Install.DIRNAME).is_dir()
- ):
- raise WorkflowFilesError(
- NESTED_DIRS_MSG.format(
- dir_type='install',
- dest=run_dir,
- existing=get_cylc_run_abs_path(parent_dir)
- )
- )
-
- if install_dir:
- # Search child tree for install directories:
- for depth in range(glbl_cfg().get(['install', 'max depth'])):
- search_pattern = f'*/{"*/" * depth}{WorkflowFiles.Install.DIRNAME}'
- for result in install_dir.glob(search_pattern):
- raise WorkflowFilesError(
- NESTED_DIRS_MSG.format(
- dir_type='install',
- dest=run_dir,
- existing=get_cylc_run_abs_path(result.parent)
- )
- )
-
-
def is_valid_run_dir(path: Union[Path, str]) -> bool:
"""Return True if path is a valid, existing run directory, else False.
@@ -1446,358 +926,6 @@ def get_cylc_run_abs_path(path: Union[Path, str]) -> Union[Path, str]:
return get_workflow_run_dir(path)
-def _get_logger(rund, log_name, open_file=True):
- """Get log and create and open if necessary.
-
- Args:
- rund:
- The workflow run directory of the associated workflow.
- log_name:
- The name of the log to open.
- open_file:
- Open the appropriate log file and add it as a file handler to
- the logger. I.E. Start writing the log to a file if not already
- doing so.
-
- """
- logger = logging.getLogger(log_name)
- if logger.getEffectiveLevel != logging.INFO:
- logger.setLevel(logging.INFO)
- if open_file and not logger.hasHandlers():
- _open_install_log(rund, logger)
- return logger
-
-
-def _open_install_log(rund, logger):
- """Open Cylc log handlers for install/reinstall."""
- rund = Path(rund).expanduser()
- log_type = logger.name[logger.name.startswith('cylc-') and len('cylc-'):]
- log_dir = Path(
- rund, WorkflowFiles.LogDir.DIRNAME, WorkflowFiles.LogDir.INSTALL)
- log_files = get_sorted_logs_by_time(log_dir, '*.log')
- log_num = get_next_log_number(log_files[-1]) if log_files else 1
- log_path = Path(log_dir, f"{log_num:02d}-{log_type}.log")
- log_parent_dir = log_path.parent
- log_parent_dir.mkdir(exist_ok=True, parents=True)
- handler = logging.FileHandler(log_path)
- handler.setFormatter(CylcLogFormatter())
- logger.addHandler(handler)
-
-
-def get_rsync_rund_cmd(src, dst, reinstall=False, dry_run=False):
- """Create and return the rsync command used for cylc install/re-install.
-
- Args:
- src (str):
- file path location of source directory
- dst (str):
- file path location of destination directory
- reinstall (bool):
- indicate reinstall (--delete option added)
- dry-run (bool):
- indicate dry-run, rsync will not take place but report output if a
- real run were to be executed
-
- Return:
- list: command to use for rsync.
-
- """
- rsync_cmd = shlex.split(
- glbl_cfg().get(['platforms', 'localhost', 'rsync command'])
- )
- rsync_cmd += DEFAULT_RSYNC_OPTS
- if dry_run:
- rsync_cmd.append("--dry-run")
- if reinstall:
- rsync_cmd.append('--delete')
-
- exclusions = [
- '.git',
- '.svn',
- '.cylcignore',
- 'opt/rose-suite-cylc-install.conf',
- WorkflowFiles.LogDir.DIRNAME,
- WorkflowFiles.WORK_DIR,
- WorkflowFiles.SHARE_DIR,
- WorkflowFiles.Install.DIRNAME,
- WorkflowFiles.Service.DIRNAME
- ]
-
- # This is a hack to make sure that changes to rose-suite.conf
- # are considered when re-installing.
- # It should be removed after https://github.com/cylc/cylc-rose/issues/149
- if not dry_run:
- exclusions.append('rose-suite.conf')
-
- for exclude in exclusions:
- if (
- Path(src).joinpath(exclude).exists() or
- Path(dst).joinpath(exclude).exists()
- ):
- # Note '/' is the rsync "anchor" to the top level:
- rsync_cmd.append(f"--exclude=/{exclude}")
- cylcignore_file = Path(src).joinpath('.cylcignore')
- if cylcignore_file.exists():
- rsync_cmd.append(f"--exclude-from={cylcignore_file}")
- rsync_cmd.append(f"{src}/")
- rsync_cmd.append(f"{dst}/")
-
- return rsync_cmd
-
-
-def reinstall_workflow(
- source: Path,
- named_run: str,
- rundir: Path,
- dry_run: bool = False
-) -> str:
- """Reinstall workflow.
-
- Args:
- source: source directory
- named_run: name of the run e.g. my-flow/run1
- rundir: run directory
- dry_run: if True, will not execute the file transfer but report what
- would be changed.
-
- Raises:
- WorkflowFilesError:
- If rsync returns non-zero.
-
- Returns:
- Stdout from the rsync command.
-
- """
- validate_source_dir(source, named_run)
- check_nested_dirs(rundir)
- reinstall_log = _get_logger(
- rundir,
- 'cylc-reinstall',
- open_file=not dry_run, # don't open the log file for --dry-run
- )
- reinstall_log.info(
- f'Reinstalling "{named_run}", from "{source}" to "{rundir}"'
- )
- rsync_cmd = get_rsync_rund_cmd(
- source,
- rundir,
- reinstall=True,
- dry_run=dry_run,
- )
-
- # Add '+++' to -out-format to mark lines passed through formatter.
- rsync_cmd.append('--out-format=+++%o %n%L+++')
-
- # Run rsync command:
- reinstall_log.info(cli_format(rsync_cmd))
- LOG.debug(cli_format(rsync_cmd))
- proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec
- # * command is constructed via internal interface
- stdout, stderr = proc.communicate()
-
- # Strip unwanted output.
- stdout = ('\n'.join(re.findall(r'\+\+\+(.*)\+\+\+', stdout))).strip()
- stderr = stderr.strip()
-
- if proc.returncode != 0:
- raise WorkflowFilesError(
- f'An error occurred reinstalling from {source} to {rundir}'
- f'\n{stderr}'
- )
-
- check_flow_file(rundir)
- reinstall_log.info(f'REINSTALLED {named_run} from {source}')
- print(
- f'REINSTALL{"ED" if not dry_run else ""} {named_run} from {source}'
- )
- close_log(reinstall_log)
- return stdout
-
-
-def abort_if_flow_file_in_path(source: Path) -> None:
- """Raise an exception if a flow file is found in a source path.
-
- This allows us to avoid seemingly silly warnings that "path/flow.cylc"
- is not a valid workflow ID, when "path" is valid and the user was just
- (erroneously) trying to (e.g.) validate the config file directly.
-
- """
- if source.name in {WorkflowFiles.FLOW_FILE, WorkflowFiles.SUITE_RC}:
- raise InputError(
- f"Not a valid workflow ID or source directory: {source}"
- f"\n(Note you should not include {source.name}"
- " in the workflow source path)"
- )
-
-
-def install_workflow(
- source: Path,
- workflow_name: Optional[str] = None,
- run_name: Optional[str] = None,
- no_run_name: bool = False,
- cli_symlink_dirs: Optional[Dict[str, Dict[str, Any]]] = None
-) -> Tuple[Path, Path, str, str]:
- """Install a workflow, or renew its installation.
-
- Install workflow into new run directory.
- Create symlink to workflow source location, creating any symlinks for run,
- work, log, share, share/cycle directories.
-
- Args:
- source: absolute path to workflow source directory.
- workflow_name: workflow name, default basename($PWD).
- run_name: name of the run, overrides run1, run2, run 3 etc...
- If specified, cylc install will not create runN symlink.
- rundir: for overriding the default cylc-run directory.
- no_run_name: Flag as True to install workflow into
- ~/cylc-run/
- cli_symlink_dirs: Symlink dirs, if entered on the cli.
-
- Return:
- source: absolute path to source directory.
- rundir: absolute path to run directory, where the workflow has been
- installed into.
- workflow_name: installed workflow name (which may be computed here).
- named_run: Name of the run.
-
- Raise:
- WorkflowFilesError:
- No flow.cylc file found in source location.
- Illegal name (can look like a relative path, but not absolute).
- Another workflow already has this name.
- Trying to install a workflow that is nested inside of another.
- """
- abort_if_flow_file_in_path(source)
- source = Path(expand_path(source)).resolve()
- if not workflow_name:
- workflow_name = get_source_workflow_name(source)
- validate_workflow_name(workflow_name, check_reserved_names=True)
- if run_name is not None:
- if len(Path(run_name).parts) != 1:
- raise WorkflowFilesError(
- f'Run name cannot be a path. (You used {run_name})'
- )
- check_reserved_dir_names(run_name)
- validate_source_dir(source, workflow_name)
- run_path_base = Path(get_workflow_run_dir(workflow_name))
- relink, run_num, rundir = get_run_dir_info(
- run_path_base, run_name, no_run_name
- )
- max_scan_depth = glbl_cfg().get(['install', 'max depth'])
- workflow_id = rundir.relative_to(get_cylc_run_dir())
- if len(workflow_id.parts) > max_scan_depth:
- raise WorkflowFilesError(
- f"Cannot install: workflow ID '{workflow_id}' would exceed "
- f"global.cylc[install]max depth = {max_scan_depth}"
- )
- check_nested_dirs(rundir, run_path_base)
- if rundir.exists():
- raise WorkflowFilesError(
- f"'{rundir}' already exists\n"
- "To reinstall, use `cylc reinstall`"
- )
- symlinks_created = {}
- named_run = workflow_name
- if run_name:
- named_run = os.path.join(named_run, run_name)
- elif run_num:
- named_run = os.path.join(named_run, f'run{run_num}')
- symlinks_created = make_localhost_symlinks(
- rundir, named_run, symlink_conf=cli_symlink_dirs)
- install_log = _get_logger(rundir, 'cylc-install')
- if symlinks_created:
- for target, symlink in symlinks_created.items():
- install_log.info(f"Symlink created: {symlink} -> {target}")
- try:
- rundir.mkdir(exist_ok=True, parents=True)
- except FileExistsError:
- # This occurs when the file exists but is _not_ a directory.
- raise WorkflowFilesError(
- f"Cannot install as there is an existing file at {rundir}."
- )
- if relink:
- link_runN(rundir)
- rsync_cmd = get_rsync_rund_cmd(source, rundir)
- proc = Popen(rsync_cmd, stdout=PIPE, stderr=PIPE, text=True) # nosec
- # * command is constructed via internal interface
- stdout, stderr = proc.communicate()
- install_log.info(
- f"Copying files from {source} to {rundir}"
- f"\n{stdout}"
- )
- if proc.returncode != 0:
- install_log.warning(
- f"An error occurred when copying files from {source} to {rundir}")
- install_log.warning(f" Warning: {stderr}")
- cylc_install = Path(rundir.parent, WorkflowFiles.Install.DIRNAME)
- check_deprecation(check_flow_file(rundir))
- if no_run_name:
- cylc_install = Path(rundir, WorkflowFiles.Install.DIRNAME)
- source_link = cylc_install.joinpath(WorkflowFiles.Install.SOURCE)
- # check source link matches the source symlink from workflow dir.
- cylc_install.mkdir(parents=True, exist_ok=True)
- if not source_link.exists():
- if source_link.is_symlink():
- # Condition represents a broken symlink.
- raise WorkflowFilesError(
- f'Symlink broken: {source_link} -> {source_link.resolve()}.'
- )
- install_log.info(f"Creating symlink from {source_link}")
- source_link.symlink_to(source.resolve())
- else:
- if source_link.resolve() != source.resolve():
- raise WorkflowFilesError(
- f"Failed to install from {source.resolve()}: "
- f"previous installations were from {source_link.resolve()}"
- )
- install_log.info(
- f'Symlink from "{source_link}" to "{source}" in place.')
- install_log.info(f'INSTALLED {named_run} from {source}')
- print(f'INSTALLED {named_run} from {source}')
- close_log(install_log)
- return source, rundir, workflow_name, named_run
-
-
-def get_run_dir_info(
- run_path_base: Path, run_name: Optional[str], no_run_name: bool
-) -> Tuple[bool, Optional[int], Path]:
- """Get (numbered, named or unnamed) run directory info for current install.
-
- Args:
- run_path_base: The workflow directory absolute path.
- run_name: Name of the run.
- no_run_name: Flag as True to indicate no run name - workflow installed
- into ~/cylc-run/.
-
- Returns:
- relink: True if runN symlink needs updating.
- run_num: Run number of the current install, if using numbered runs.
- rundir: Run directory absolute path.
- """
- relink = False
- run_num = None
- if no_run_name:
- rundir = run_path_base
- elif run_name:
- rundir = run_path_base.joinpath(run_name)
- if run_path_base.exists() and detect_flow_exists(run_path_base, True):
- raise WorkflowFilesError(
- f"--run-name option not allowed as '{run_path_base}' contains "
- "installed numbered runs."
- )
- else:
- run_num = get_next_rundir_number(run_path_base)
- rundir = Path(run_path_base, f'run{run_num}')
- if run_path_base.exists() and detect_flow_exists(run_path_base, False):
- raise WorkflowFilesError(
- f"Path: \"{run_path_base}\" contains an installed"
- " workflow. Use --run-name to create a new run."
- )
- unlink_runN(run_path_base)
- relink = True
- return relink, run_num, rundir
-
-
def detect_both_flow_and_suite(path: Path) -> None:
"""Detects if both suite.rc and flow.cylc are in directory.
@@ -1845,32 +973,6 @@ def is_forbidden(flow_file: Path) -> bool:
return False
-def detect_flow_exists(
- run_path_base: Union[Path, str], numbered: bool
-) -> bool:
- """Returns True if installed flow already exists.
-
- Args:
- run_path_base: Absolute path of workflow directory,
- i.e ~/cylc-run/
- numbered: If True, will detect if numbered runs exist. If False, will
- detect if non-numbered runs exist, i.e. runs installed
- by --run-name.
- """
- for entry in Path(run_path_base).iterdir():
- is_numbered = bool(re.search(r'^run\d+$', entry.name))
- if (
- entry.is_dir()
- and entry.name not in {
- WorkflowFiles.Install.DIRNAME, WorkflowFiles.RUN_N
- }
- and Path(entry, WorkflowFiles.FLOW_FILE).exists()
- and is_numbered == numbered
- ):
- return True
- return False
-
-
def check_flow_file(path: Union[Path, str]) -> Path:
"""Checks the path for a suite.rc or flow.cylc file.
@@ -1894,131 +996,17 @@ def check_flow_file(path: Union[Path, str]) -> Path:
raise WorkflowFilesError(NO_FLOW_FILE_MSG.format(path))
-def validate_source_dir(
- source: Union[Path, str], workflow_name: str
-) -> None:
- """Ensure the source directory is valid:
- - has flow file
- - does not contain reserved dir names
-
- Args:
- source: Path to source directory
- Raises:
- WorkflowFilesError:
- If log, share, work or _cylc-install directories exist in the
- source directory.
- """
- # Source dir must not contain reserved run dir names (as file or dir).
- for dir_ in WorkflowFiles.RESERVED_DIRNAMES:
- if Path(source, dir_).exists():
- raise WorkflowFilesError(
- f"{workflow_name} installation failed "
- f"- {dir_} exists in source directory."
- )
- check_flow_file(source)
-
-
-def parse_cli_sym_dirs(symlink_dirs: str) -> Dict[str, Dict[str, Any]]:
- """Converts command line entered symlink dirs to a dictionary.
-
- Args:
- symlink_dirs: As entered by user on cli,
- e.g. "log=$DIR, share=$DIR2".
-
- Raises:
- WorkflowFilesError: If directory to be symlinked is not in permitted
- dirs: run, log, share, work, share/cycle
-
- Returns:
- dict: In the same form as would be returned by global config.
- e.g. {'localhost': {'log': '$DIR',
- 'share': '$DIR2'
- }
- }
- """
- # Ensures the same nested dict format which is returned by the glb cfg
- symdict: Dict[str, Dict[str, Any]] = {'localhost': {'run': None}}
- if symlink_dirs == "":
- return symdict
- symlist = symlink_dirs.strip(',').split(',')
- possible_symlink_dirs = set(WorkflowFiles.SYMLINK_DIRS.union(
- {WorkflowFiles.RUN_DIR})
- )
- possible_symlink_dirs.remove('')
- for pair in symlist:
- try:
- key, val = pair.split("=")
- key = key.strip()
- except ValueError:
- raise InputError(
- 'There is an error in --symlink-dirs option:'
- f' {pair}. Try entering option in the form '
- '--symlink-dirs=\'log=$DIR, share=$DIR2, ...\''
- )
- if key not in possible_symlink_dirs:
- dirs = ', '.join(possible_symlink_dirs)
- raise InputError(
- f"{key} not a valid entry for --symlink-dirs. "
- f"Configurable symlink dirs are: {dirs}"
- )
- symdict['localhost'][key] = val.strip() or None
-
- return symdict
-
-
-def unlink_runN(path: Union[Path, str]) -> bool:
- """Remove symlink runN if it exists.
-
- Args:
- path: Absolute path to workflow dir containing runN.
- """
- try:
- Path(expand_path(path, WorkflowFiles.RUN_N)).unlink()
- except OSError:
- return False
- return True
-
-
-def link_runN(latest_run: Union[Path, str]):
- """Create symlink runN, pointing at the latest run"""
- latest_run = Path(latest_run)
- run_n = Path(latest_run.parent, WorkflowFiles.RUN_N)
- with suppress(OSError):
- run_n.symlink_to(latest_run.name)
+def abort_if_flow_file_in_path(source: Path) -> None:
+ """Raise an exception if a flow file is found in a source path.
+ This allows us to avoid seemingly silly warnings that "path/flow.cylc"
+ is not a valid workflow ID, when "path" is valid and the user was just
+ (erroneously) trying to (e.g.) validate the config file directly.
-def search_install_source_dirs(workflow_name: Union[Path, str]) -> Path:
- """Return the path of a workflow source dir if it is present in the
- 'global.cylc[install]source dirs' search path."""
- abort_if_flow_file_in_path(Path(workflow_name))
- search_path: List[str] = get_source_dirs()
- if not search_path:
- raise WorkflowFilesError(
- "Cannot find workflow as 'global.cylc[install]source dirs' "
- "does not contain any paths")
- for path in search_path:
- try:
- return check_flow_file(Path(path, workflow_name)).parent
- except WorkflowFilesError:
- continue
- raise WorkflowFilesError(
- f"Could not find workflow '{workflow_name}' in: "
- f"{', '.join(search_path)}")
-
-
-def get_source_workflow_name(source: Path) -> str:
- """Return workflow name relative to configured source dirs if possible,
- else the basename of the given path.
- Note the source path provided should be fully expanded (user and env vars)
- and normalised.
"""
- for dir_ in get_source_dirs():
- try:
- return str(source.relative_to(Path(expand_path(dir_)).resolve()))
- except ValueError:
- continue
- return source.name
-
-
-def get_source_dirs() -> List[str]:
- return glbl_cfg().get(['install', 'source dirs'])
+ if source.name in {WorkflowFiles.FLOW_FILE, WorkflowFiles.SUITE_RC}:
+ raise InputError(
+ f"Not a valid workflow ID or source directory: {source}"
+ f"\n(Note you should not include {source.name}"
+ " in the workflow source path)"
+ )
diff --git a/mypy.ini b/mypy.ini
index 1bff04085ac..4c35cf1d53e 100644
--- a/mypy.ini
+++ b/mypy.ini
@@ -14,4 +14,8 @@ strict_equality = True
show_error_codes = True
# Not yet mypy compliant.
-exclude= cylc/flow/etc/tutorial/.*
\ No newline at end of file
+exclude= cylc/flow/etc/tutorial/.*
+
+# Suppress the following messages:
+# By default the bodies of untyped functions are not checked, consider using --check-untyped-defs
+disable_error_code = annotation-unchecked
diff --git a/setup.cfg b/setup.cfg
index bdd3b5abc2b..d099dd8c245 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -60,7 +60,6 @@ packages = find_namespace:
include_package_data = True
python_requires = >=3.7
install_requires =
- aiofiles==0.7.*
ansimarkup>=1.0.0
async-timeout>=3.0.0
colorama>=0.4,<=1
@@ -71,7 +70,7 @@ install_requires =
# Constrain protobuf version for compatible Scheduler-UIS comms across hosts
protobuf>=4.21.2,<4.22.0
psutil>=5.6.0
- pyzmq==22.*
+ pyzmq>=22
# https://github.com/pypa/setuptools/issues/3802
setuptools>=49, <67
urwid==2.*
@@ -123,9 +122,7 @@ tests =
testfixtures>=6.11.0
# Type annotation stubs
# http://mypy-lang.blogspot.com/2021/05/the-upcoming-switch-to-modular-typeshed.html
- types-aiofiles>=0.7.0
types-Jinja2>=0.1.3
- types-aiofiles>=0.1.3
types-pkg_resources>=0.1.2
types-protobuf>=0.1.10
types-six>=0.1.6
diff --git a/tests/flakyfunctional/cylc-show/00-simple.t b/tests/flakyfunctional/cylc-show/00-simple.t
index 5a688090cbd..bc00175565d 100644
--- a/tests/flakyfunctional/cylc-show/00-simple.t
+++ b/tests/flakyfunctional/cylc-show/00-simple.t
@@ -127,7 +127,6 @@ cmp_json "${TEST_NAME}-taskinstance" "${TEST_NAME}-taskinstance" \
{"label": "succeeded", "message": "succeeded", "satisfied": false},
{"label": "failed", "message": "failed", "satisfied": false}
],
- "clockTrigger": {"timeString": "", "satisfied": false},
"externalTriggers": [],
"xtriggers": []
}
diff --git a/tests/functional/cylc-show/01-clock-triggered.t b/tests/functional/cylc-show/01-clock-triggered.t
deleted file mode 100644
index 13a05b3aff0..00000000000
--- a/tests/functional/cylc-show/01-clock-triggered.t
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env bash
-# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
-# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#-------------------------------------------------------------------------------
-# Test cylc show for a clock triggered task
-. "$(dirname "$0")/test_header"
-#-------------------------------------------------------------------------------
-set_test_number 3
-#-------------------------------------------------------------------------------
-install_workflow "${TEST_NAME_BASE}" clock-triggered
-#-------------------------------------------------------------------------------
-TEST_SHOW_OUTPUT_PATH="$PWD/${TEST_NAME_BASE}-show.stdout"
-#-------------------------------------------------------------------------------
-TEST_NAME="${TEST_NAME_BASE}-validate"
-run_ok "${TEST_NAME}" cylc validate \
- --set="TEST_OUTPUT_PATH='$TEST_SHOW_OUTPUT_PATH'" "${WORKFLOW_NAME}"
-#-------------------------------------------------------------------------------
-TEST_NAME="${TEST_NAME_BASE}-run"
-workflow_run_ok "${TEST_NAME}" cylc play --reference-test --debug --no-detach \
- --set="TEST_OUTPUT_PATH='$TEST_SHOW_OUTPUT_PATH'" "${WORKFLOW_NAME}"
-#-------------------------------------------------------------------------------
-TEST_NAME=${TEST_NAME_BASE}-show
-contains_ok "${TEST_NAME}.stdout" <<__SHOW_OUTPUT__
-title: (not given)
-description: (not given)
-URL: (not given)
-state: running
-prerequisites: ('-': not satisfied)
- + 20141106T0900Z/woo succeeded
-outputs: ('-': not completed)
- - 20141106T0900Z/foo expired
- + 20141106T0900Z/foo submitted
- - 20141106T0900Z/foo submit-failed
- + 20141106T0900Z/foo started
- - 20141106T0900Z/foo succeeded
- - 20141106T0900Z/foo failed
-other: ('-': not satisfied)
- + Clock trigger time reached
- o Triggers at ... 2014-11-06T09:05:00Z
-__SHOW_OUTPUT__
-#-------------------------------------------------------------------------------
-purge
diff --git a/tests/functional/cylc-show/03-clock-triggered-non-utc-mode.t b/tests/functional/cylc-show/03-clock-triggered-non-utc-mode.t
deleted file mode 100644
index e1c9ccbb8c6..00000000000
--- a/tests/functional/cylc-show/03-clock-triggered-non-utc-mode.t
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env bash
-# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
-# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#-------------------------------------------------------------------------------
-# Test cylc show for a clock triggered task
-. "$(dirname "$0")/test_header"
-#-------------------------------------------------------------------------------
-set_test_number 3
-#-------------------------------------------------------------------------------
-install_workflow "${TEST_NAME_BASE}" clock-triggered-non-utc-mode
-#-------------------------------------------------------------------------------
-cd "${WORKFLOW_RUN_DIR}" || exit 1
-TEST_SHOW_OUTPUT_PATH="$PWD/${TEST_NAME_BASE}-show.stdout"
-TZ_OFFSET_EXTENDED=$(date +%:z | sed "/^%/d")
-if [[ -z "${TZ_OFFSET_EXTENDED}" ]]; then
- skip 3 "'date' command doesn't support '%:z'"
- exit 0
-fi
-TZ_OFFSET_BASIC=$(date +%z | sed "/^%/d")
-if [[ "${TZ_OFFSET_EXTENDED}" == "+00:00" ]]; then
- TZ_OFFSET_EXTENDED=Z
- TZ_OFFSET_BASIC=Z
-fi
-#-------------------------------------------------------------------------------
-TEST_NAME="${TEST_NAME_BASE}-validate"
-run_ok "${TEST_NAME}" cylc validate \
- --set="TEST_SHOW_OUTPUT_PATH='$TEST_SHOW_OUTPUT_PATH'" \
- --set="TZ_OFFSET_BASIC='$TZ_OFFSET_BASIC'" "${WORKFLOW_NAME}"
-#-------------------------------------------------------------------------------
-sed "s/\$TZ_OFFSET_BASIC/$TZ_OFFSET_BASIC/g" reference-untz.log >reference.log
-TEST_NAME="${TEST_NAME_BASE}-run"
-workflow_run_ok "${TEST_NAME}" cylc play --reference-test --debug --no-detach \
- --set="TEST_SHOW_OUTPUT_PATH='$TEST_SHOW_OUTPUT_PATH'" \
- --set="TZ_OFFSET_BASIC='$TZ_OFFSET_BASIC'" "${WORKFLOW_NAME}"
-#-------------------------------------------------------------------------------
-TEST_NAME=${TEST_NAME_BASE}-show
-contains_ok "${TEST_NAME}.stdout" <<__SHOW_OUTPUT__
-title: (not given)
-description: (not given)
-URL: (not given)
-state: running
-prerequisites: ('-': not satisfied)
- + 20140808T0900$TZ_OFFSET_BASIC/woo succeeded
-outputs: ('-': not completed)
- - 20140808T0900$TZ_OFFSET_BASIC/foo expired
- + 20140808T0900$TZ_OFFSET_BASIC/foo submitted
- - 20140808T0900$TZ_OFFSET_BASIC/foo submit-failed
- + 20140808T0900$TZ_OFFSET_BASIC/foo started
- - 20140808T0900$TZ_OFFSET_BASIC/foo succeeded
- - 20140808T0900$TZ_OFFSET_BASIC/foo failed
-other: ('-': not satisfied)
- + Clock trigger time reached
- o Triggers at ... 2014-08-08T09:05:00$TZ_OFFSET_EXTENDED
-__SHOW_OUTPUT__
-#-------------------------------------------------------------------------------
-purge
diff --git a/tests/functional/cylc-show/clock-triggered/flow.cylc b/tests/functional/cylc-show/clock-triggered/flow.cylc
deleted file mode 100644
index 2c048bf3045..00000000000
--- a/tests/functional/cylc-show/clock-triggered/flow.cylc
+++ /dev/null
@@ -1,25 +0,0 @@
-#!jinja2
-
-[scheduler]
- UTC mode = True
-
-[scheduling]
- initial cycle point = 20141106T09
- final cycle point = 20141106T09
- [[special tasks]]
- clock-trigger = foo(PT5M)
- [[graph]]
- PT1H = "woo => foo & show"
-
-[runtime]
- [[woo]]
- script = true
- [[foo]]
- script = """
- cylc workflow-state "$CYLC_WORKFLOW_ID" -p "$CYLC_TASK_CYCLE_POINT" -t 'show' -S finish --interval 1
- """
- [[show]]
- script = """
- cylc workflow-state "$CYLC_WORKFLOW_ID" -p "$CYLC_TASK_CYCLE_POINT" -t 'show' -S running --interval 1
- cylc show "$CYLC_WORKFLOW_ID//20141106T0900Z/foo" >{{ TEST_OUTPUT_PATH }}
- """
diff --git a/tests/functional/cylc-show/clock-triggered/reference.log b/tests/functional/cylc-show/clock-triggered/reference.log
deleted file mode 100644
index 5fb433475fb..00000000000
--- a/tests/functional/cylc-show/clock-triggered/reference.log
+++ /dev/null
@@ -1,5 +0,0 @@
-Initial point: 20141106T0900Z
-Final point: 20141106T0900Z
-20141106T0900Z/woo -triggered off []
-20141106T0900Z/foo -triggered off ['20141106T0900Z/woo']
-20141106T0900Z/show -triggered off ['20141106T0900Z/woo']
diff --git a/tests/functional/cylc-trigger/06-already-active.t b/tests/functional/cylc-trigger/06-already-active.t
index dfc4a0da453..1cba992c9f8 100644
--- a/tests/functional/cylc-trigger/06-already-active.t
+++ b/tests/functional/cylc-trigger/06-already-active.t
@@ -27,6 +27,6 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
workflow_run_ok "${TEST_NAME_BASE}-run" \
- cylc play --debug -n "${WORKFLOW_NAME}"
+ cylc play --debug --no-detach "${WORKFLOW_NAME}"
purge
diff --git a/tests/functional/job-submission/03-job-nn-remote-host-with-shared-fs.t b/tests/functional/job-submission/03-job-nn-remote-host-with-shared-fs.t
index a9152e3a094..233ff43296f 100755
--- a/tests/functional/job-submission/03-job-nn-remote-host-with-shared-fs.t
+++ b/tests/functional/job-submission/03-job-nn-remote-host-with-shared-fs.t
@@ -25,7 +25,7 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
mkdir "${WORKFLOW_RUN_DIR}/.service"
sqlite3 "${WORKFLOW_RUN_DIR}/.service/db" <'db.sqlite3'
workflow_run_ok "${TEST_NAME_BASE}-restart" \
- cylc play --reference-test --debug --no-detach "${WORKFLOW_NAME}" --upgrade
+ cylc play --reference-test --debug --no-detach --upgrade "${WORKFLOW_NAME}"
purge
exit
diff --git a/tests/functional/optional-outputs/00-stall-on-partial.t b/tests/functional/optional-outputs/00-stall-on-partial.t
index a453f489bf5..8482611653a 100644
--- a/tests/functional/optional-outputs/00-stall-on-partial.t
+++ b/tests/functional/optional-outputs/00-stall-on-partial.t
@@ -25,7 +25,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
workflow_run_fail "${TEST_NAME_BASE}-run" \
- cylc play -n --reference-test --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --reference-test --debug "${WORKFLOW_NAME}"
LOG="${WORKFLOW_RUN_DIR}/log/scheduler/log"
grep_ok "Partially satisfied prerequisites" "${LOG}"
diff --git a/tests/functional/optional-outputs/01-stall-on-incomplete.t b/tests/functional/optional-outputs/01-stall-on-incomplete.t
index 6c69a066ae2..81c71116471 100644
--- a/tests/functional/optional-outputs/01-stall-on-incomplete.t
+++ b/tests/functional/optional-outputs/01-stall-on-incomplete.t
@@ -26,7 +26,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
workflow_run_fail "${TEST_NAME_BASE}-run" \
- cylc play -n --reference-test --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --reference-test --debug "${WORKFLOW_NAME}"
LOG="${WORKFLOW_RUN_DIR}/log/scheduler/log"
grep_ok "Incomplete tasks" "${LOG}"
diff --git a/tests/functional/optional-outputs/03-c7backcompat.t b/tests/functional/optional-outputs/03-c7backcompat.t
index cc868ac2017..2d959e8bf1a 100644
--- a/tests/functional/optional-outputs/03-c7backcompat.t
+++ b/tests/functional/optional-outputs/03-c7backcompat.t
@@ -46,7 +46,7 @@ grep_ok "${DEPR_MSG_1}" "${TEST_NAME}.stderr"
# And it should run without stalling with an incomplete task.
workflow_run_ok "${TEST_NAME_BASE}-run" \
- cylc play -n --reference-test --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --reference-test --debug "${WORKFLOW_NAME}"
purge
exit
diff --git a/tests/functional/optional-outputs/04-c7backcompat-blocked-task.t b/tests/functional/optional-outputs/04-c7backcompat-blocked-task.t
index d52b0d39346..56fcf52b58c 100644
--- a/tests/functional/optional-outputs/04-c7backcompat-blocked-task.t
+++ b/tests/functional/optional-outputs/04-c7backcompat-blocked-task.t
@@ -32,7 +32,7 @@ grep_ok "${DEPR_MSG_1}" "${TEST_NAME}.stderr"
# Should stall and abort with an unsatisfied prerequisite.
workflow_run_fail "${TEST_NAME_BASE}-run" \
- cylc play -n --reference-test --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --reference-test --debug "${WORKFLOW_NAME}"
grep_workflow_log_ok grep-1 "WARNING - Partially satisfied prerequisites"
grep_workflow_log_ok grep-2 "Workflow stalled"
diff --git a/tests/functional/optional-outputs/05-c7backcompat-blocked-task-2.t b/tests/functional/optional-outputs/05-c7backcompat-blocked-task-2.t
index 236d42c6bb6..0543eb2f599 100644
--- a/tests/functional/optional-outputs/05-c7backcompat-blocked-task-2.t
+++ b/tests/functional/optional-outputs/05-c7backcompat-blocked-task-2.t
@@ -32,7 +32,7 @@ grep_ok "${DEPR_MSG_1}" "${TEST_NAME}.stderr"
# Should stall and abort with an unsatisfied prerequisite.
workflow_run_fail "${TEST_NAME_BASE}-run" \
- cylc play -n --reference-test --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --reference-test --debug "${WORKFLOW_NAME}"
grep_workflow_log_ok grep-1 "WARNING - Partially satisfied prerequisites"
grep_workflow_log_ok grep-2 "Workflow stalled"
diff --git a/tests/functional/optional-outputs/06-c7backcompat-family.t b/tests/functional/optional-outputs/06-c7backcompat-family.t
index e17ce6589d2..06051903add 100644
--- a/tests/functional/optional-outputs/06-c7backcompat-family.t
+++ b/tests/functional/optional-outputs/06-c7backcompat-family.t
@@ -32,7 +32,7 @@ grep_ok "${DEPR_MSG_1}" "${TEST_NAME}.stderr"
# Should stall and abort with unsatisfied "stall" tasks.
workflow_run_fail "${TEST_NAME_BASE}-run" \
- cylc play -n --debug "${WORKFLOW_NAME}"
+ cylc play --no-detach --debug "${WORKFLOW_NAME}"
grep_workflow_log_ok grep-1 "Workflow stalled"
grep_workflow_log_ok grep-2 "WARNING - Partially satisfied prerequisites"
diff --git a/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t b/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t
index 5a58d7ab50b..37d13e2ee88 100644
--- a/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t
+++ b/tests/functional/optional-outputs/07-finish-fail-c7-backcompat.t
@@ -33,7 +33,7 @@ DEPR_MSG_1=$(python -c \
grep_ok "${DEPR_MSG_1}" "${TEST_NAME}.stderr"
# Stall expected at FCP (but not at runahead limit).
-workflow_run_fail "${TEST_NAME_BASE}-run" cylc play -n --debug "${WORKFLOW_NAME}"
+workflow_run_fail "${TEST_NAME_BASE}-run" cylc play --no-detach --debug "${WORKFLOW_NAME}"
grep_workflow_log_ok grep-0 "Workflow stalled"
grep_workflow_log_ok grep-1 "ERROR - Incomplete tasks:"
diff --git a/tests/functional/optional-outputs/08-finish-fail-c7-c8.t b/tests/functional/optional-outputs/08-finish-fail-c7-c8.t
index cee630499cc..e9cfbf831b1 100644
--- a/tests/functional/optional-outputs/08-finish-fail-c7-c8.t
+++ b/tests/functional/optional-outputs/08-finish-fail-c7-c8.t
@@ -36,6 +36,6 @@ DEPR_MSG="deprecated graph items were automatically upgraded" # (not back-compa
grep_ok "${DEPR_MSG}" "${TEST_NAME}.stderr"
# No stall expected.
-workflow_run_ok "${TEST_NAME_BASE}-run" cylc play -n --debug "${WORKFLOW_NAME}"
+workflow_run_ok "${TEST_NAME_BASE}-run" cylc play --no-detach --debug "${WORKFLOW_NAME}"
purge
diff --git a/tests/functional/platforms/10-do-not-host-check-platforms.t b/tests/functional/platforms/10-do-not-host-check-platforms.t
new file mode 100755
index 00000000000..c52ef7e8383
--- /dev/null
+++ b/tests/functional/platforms/10-do-not-host-check-platforms.t
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+#-------------------------------------------------------------------------------
+# Check that platform names are not treated as host names. E.g. a platform
+# name starting with "localhost" should not be treated as localhost.
+# https://github.com/cylc/cylc-flow/issues/5342
+. "$(dirname "$0")/test_header"
+
+set_test_number 2
+
+# shellcheck disable=SC2016
+create_test_global_config '' '
+[platforms]
+ [[localhost_spice]]
+ hosts = unreachable
+'
+
+make_rnd_workflow
+
+cat > "${RND_WORKFLOW_SOURCE}/flow.cylc" <<__HEREDOC__
+[scheduler]
+ [[events]]
+ stall timeout = PT0S
+[scheduling]
+ [[graph]]
+ R1 = foo
+[runtime]
+ [[foo]]
+ platform = localhost_spice
+__HEREDOC__
+
+ERR_STR='Unable to find valid host for localhost_spice'
+
+TEST_NAME="${TEST_NAME_BASE}-vip-workflow"
+run_fail "${TEST_NAME}" cylc vip "${RND_WORKFLOW_SOURCE}" --no-detach
+grep_ok "${ERR_STR}" \
+ "${TEST_NAME}.stderr" -F
+
+purge_rnd_workflow
+exit
diff --git a/tests/functional/restart/58-removed-task.t b/tests/functional/restart/58-removed-task.t
index 808273f55a9..17dc19f626e 100755
--- a/tests/functional/restart/58-removed-task.t
+++ b/tests/functional/restart/58-removed-task.t
@@ -30,14 +30,14 @@ run_ok "${TEST_NAME_BASE}-validate" cylc validate --set="INCL_B_C=True" "${WORKF
run_ok "${TEST_NAME_BASE}-validate" cylc validate --set="INCL_B_C=False" "${WORKFLOW_NAME}"
TEST_NAME="${TEST_NAME_BASE}-run"
-workflow_run_ok "${TEST_NAME}" cylc play -n "${WORKFLOW_NAME}"
+workflow_run_ok "${TEST_NAME}" cylc play --no-detach "${WORKFLOW_NAME}"
# Restart with removed tasks should not cause an error.
# It should shut down cleanly after orphaned task a and incomplete failed task
# b are polled (even though b has been removed from the graph) and a finishes
# (after checking the poll results).
TEST_NAME="${TEST_NAME_BASE}-restart"
-workflow_run_ok "${TEST_NAME}" cylc play --set="INCL_B_C=False" -n "${WORKFLOW_NAME}"
+workflow_run_ok "${TEST_NAME}" cylc play --set="INCL_B_C=False" --no-detach "${WORKFLOW_NAME}"
grep_workflow_log_ok "grep-3" "\[1/a running job:01 flows:1\] (polled)started"
grep_workflow_log_ok "grep-4" "\[1/b failed job:01 flows:1\] (polled)failed"
diff --git a/tests/functional/rnd/05-main-loop.t b/tests/functional/rnd/05-main-loop.t
index d43626da889..63a11a0aff3 100644
--- a/tests/functional/rnd/05-main-loop.t
+++ b/tests/functional/rnd/05-main-loop.t
@@ -55,7 +55,7 @@ __FLOW_CYLC__
# run a workflow with all the development main-loop plugins turned on
run_ok "${TEST_NAME_BASE}-run" \
cylc play "${WORKFLOW_NAME}" \
- -n \
+ --no-detach \
--debug \
--main-loop 'log data store' \
--main-loop 'log db' \
diff --git a/tests/functional/triggering/18-suicide-active.t b/tests/functional/triggering/18-suicide-active.t
index dbc53eecb44..d4df07b1275 100644
--- a/tests/functional/triggering/18-suicide-active.t
+++ b/tests/functional/triggering/18-suicide-active.t
@@ -27,7 +27,7 @@ install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
run_ok "${TEST_NAME_BASE}-validate" cylc validate "${WORKFLOW_NAME}"
workflow_run_ok "${TEST_NAME_BASE}-run" \
- cylc play --debug -n "${WORKFLOW_NAME}"
+ cylc play --debug --no-detach "${WORKFLOW_NAME}"
grep_workflow_log_ok "${TEST_NAME_BASE}-grep" "suiciding while active"
diff --git a/tests/integration/test_data_store_mgr.py b/tests/integration/test_data_store_mgr.py
index 68e2d8a10d7..e25330562c6 100644
--- a/tests/integration/test_data_store_mgr.py
+++ b/tests/integration/test_data_store_mgr.py
@@ -24,6 +24,7 @@
TASK_PROXIES,
WORKFLOW
)
+from cylc.flow.id import Tokens
from cylc.flow.task_state import (
TASK_STATUS_FAILED,
TASK_STATUS_SUCCEEDED,
@@ -226,10 +227,10 @@ def test_delta_job_msg(harness):
"""Test method adding messages to job element."""
schd, data = harness
j_id = ext_id(schd)
- job_d = int_id(schd)
+ tokens = Tokens(j_id)
# First update creation
assert schd.data_store_mgr.updated[JOBS].get('j_id') is None
- schd.data_store_mgr.delta_job_msg(job_d, 'The Atomic Age')
+ schd.data_store_mgr.delta_job_msg(tokens, 'The Atomic Age')
assert schd.data_store_mgr.updated[JOBS][j_id].messages
@@ -237,7 +238,7 @@ def test_delta_job_attr(harness):
"""Test method modifying job fields to job element."""
schd, data = harness
schd.data_store_mgr.delta_job_attr(
- int_id(schd), 'job_runner_name', 'at')
+ Tokens(ext_id(schd)), 'job_runner_name', 'at')
assert schd.data_store_mgr.updated[JOBS][ext_id(schd)].messages != (
schd.data_store_mgr.added[JOBS][ext_id(schd)].job_runner_name
)
@@ -248,7 +249,7 @@ def test_delta_job_time(harness):
schd, data = harness
event_time = get_current_time_string()
schd.data_store_mgr.delta_job_time(
- int_id(schd), 'submitted', event_time)
+ Tokens(ext_id(schd)), 'submitted', event_time)
job_updated = schd.data_store_mgr.updated[JOBS][ext_id(schd)]
with pytest.raises(ValueError):
job_updated.HasField('jumped_time')
diff --git a/tests/integration/test_install.py b/tests/integration/test_install.py
index 22ff88a55ba..01df57eec97 100644
--- a/tests/integration/test_install.py
+++ b/tests/integration/test_install.py
@@ -86,7 +86,7 @@ def src_run_dirs(
src=('w1', 'w2')
)
mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
+ 'cylc.flow.install.glbl_cfg',
f'''
[install]
source dirs = {tmp_src_path}
diff --git a/tests/integration/test_reinstall.py b/tests/integration/test_reinstall.py
index 8b13c55749f..3dadc4b7df0 100644
--- a/tests/integration/test_reinstall.py
+++ b/tests/integration/test_reinstall.py
@@ -22,18 +22,24 @@
import pytest
-from cylc.flow.workflow_files import (
- WorkflowFiles,
- reinstall_workflow,
-)
from cylc.flow.exceptions import (
WorkflowFilesError,
)
+from cylc.flow.install import (
+ reinstall_workflow,
+)
+from cylc.flow.option_parsers import Options
from cylc.flow.scripts.reinstall import (
- ReInstallOptions,
+ get_option_parser as reinstall_gop,
reinstall_cli,
)
+from cylc.flow.terminal import cli_function
+from cylc.flow.workflow_files import (
+ WorkflowFiles,
+)
+
+ReInstallOptions = Options(reinstall_gop())
# cli opts
@@ -280,7 +286,7 @@ def raise_keyboard_interrupt():
def test_rsync_fail(one_src, one_run, mock_glbl_cfg, non_interactive):
"""It should raise an error on rsync failure."""
mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
+ 'cylc.flow.install.glbl_cfg',
'''
[platforms]
[[localhost]]
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
index 3023be487ed..26c3b98c4b3 100644
--- a/tests/unit/conftest.py
+++ b/tests/unit/conftest.py
@@ -28,11 +28,13 @@
INTEGER_CYCLING_TYPE
)
from cylc.flow.data_store_mgr import DataStoreMgr
+from cylc.flow.install import (
+ link_runN,
+ unlink_runN,
+)
from cylc.flow.scheduler import Scheduler
from cylc.flow.workflow_files import (
WorkflowFiles,
- link_runN,
- unlink_runN,
)
from cylc.flow.xtrigger_mgr import XtriggerManager
diff --git a/tests/unit/test_clean.py b/tests/unit/test_clean.py
new file mode 100644
index 00000000000..1668dc36cf7
--- /dev/null
+++ b/tests/unit/test_clean.py
@@ -0,0 +1,1096 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import logging
+import os
+import shutil
+from glob import iglob
+from pathlib import Path
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Type,
+)
+from unittest import mock
+
+import pytest
+
+from cylc.flow import CYLC_LOG
+from cylc.flow import clean as cylc_clean
+from cylc.flow.clean import (
+ _clean_using_glob,
+ _remote_clean_cmd,
+ clean,
+ glob_in_run_dir,
+ init_clean,
+)
+from cylc.flow.exceptions import (
+ CylcError,
+ InputError,
+ PlatformError,
+ ServiceFileError,
+ WorkflowFilesError,
+)
+from cylc.flow.pathutil import parse_rm_dirs
+from cylc.flow.scripts.clean import CleanOptions
+from cylc.flow.workflow_files import (
+ WorkflowFiles,
+ get_symlink_dirs,
+)
+
+from .conftest import MonkeyMock
+from .filetree import (
+ FILETREE_1,
+ FILETREE_2,
+ FILETREE_3,
+ FILETREE_4,
+ create_filetree,
+ get_filetree_as_list,
+)
+
+NonCallableFixture = Any
+
+
+# global.cylc[install]scan depth for these tests:
+MAX_SCAN_DEPTH = 3
+
+
+@pytest.fixture
+def glbl_cfg_max_scan_depth(mock_glbl_cfg: Callable) -> None:
+ mock_glbl_cfg(
+ 'cylc.flow.workflow_files.glbl_cfg',
+ f'''
+ [install]
+ max depth = {MAX_SCAN_DEPTH}
+ '''
+ )
+
+
+@pytest.mark.parametrize(
+ 'reg, stopped, err, err_msg',
+ [
+ ('foo/..', True, WorkflowFilesError,
+ "cannot be a path that points to the cylc-run directory or above"),
+ ('foo/../..', True, WorkflowFilesError,
+ "cannot be a path that points to the cylc-run directory or above"),
+ ('foo', False, ServiceFileError, "Cannot clean running workflow"),
+ ]
+)
+def test_clean_check__fail(
+ reg: str,
+ stopped: bool,
+ err: Type[Exception],
+ err_msg: str,
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ """Test that _clean_check() fails appropriately.
+
+ Params:
+ reg: Workflow name.
+ stopped: Whether the workflow is stopped when _clean_check() is called.
+ err: Expected error class.
+ err_msg: Message that is expected to be in the exception.
+ """
+ def mocked_detect_old_contact_file(*a, **k):
+ if not stopped:
+ raise ServiceFileError('Mocked error')
+
+ monkeypatch.setattr(
+ 'cylc.flow.clean.detect_old_contact_file',
+ mocked_detect_old_contact_file
+ )
+
+ with pytest.raises(err) as exc:
+ cylc_clean._clean_check(CleanOptions(), reg, tmp_path)
+ assert err_msg in str(exc.value)
+
+
+@pytest.mark.parametrize(
+ 'db_platforms, opts, clean_called, remote_clean_called',
+ [
+ pytest.param(
+ ['localhost', 'localhost'], {}, True, False,
+ id="Only platform in DB is localhost"
+ ),
+ pytest.param(
+ ['horse'], {}, True, True,
+ id="Remote platform in DB"
+ ),
+ pytest.param(
+ ['horse'], {'local_only': True}, True, False,
+ id="Local clean only"
+ ),
+ pytest.param(
+ ['horse'], {'remote_only': True}, False, True,
+ id="Remote clean only"
+ )
+ ]
+)
+def test_init_clean(
+ db_platforms: List[str],
+ opts: Dict[str, Any],
+ clean_called: bool,
+ remote_clean_called: bool,
+ monkeypatch: pytest.MonkeyPatch, monkeymock: MonkeyMock,
+ tmp_run_dir: Callable
+) -> None:
+ """Test the init_clean() function logic.
+
+ Params:
+ db_platforms: Platform names that would be loaded from the database.
+ opts: Any options passed to the cylc clean CLI.
+ clean_called: If a local clean is expected to go ahead.
+ remote_clean_called: If a remote clean is expected to go ahead.
+ """
+ reg = 'foo/bar/'
+ rdir = tmp_run_dir(reg, installed=True)
+ Path(rdir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+ monkeypatch.setattr('cylc.flow.clean.get_platforms_from_db',
+ lambda x: set(db_platforms))
+
+ init_clean(reg, opts=CleanOptions(**opts))
+ assert mock_clean.called is clean_called
+ assert mock_remote_clean.called is remote_clean_called
+
+
+def test_init_clean__no_dir(
+ monkeymock: MonkeyMock, tmp_run_dir: Callable,
+ caplog: pytest.LogCaptureFixture
+) -> None:
+ """Test init_clean() when the run dir doesn't exist"""
+ caplog.set_level(logging.INFO, CYLC_LOG)
+ tmp_run_dir()
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+
+ init_clean('foo/bar', opts=CleanOptions())
+ assert "No directory to clean" in caplog.text
+ assert mock_clean.called is False
+ assert mock_remote_clean.called is False
+
+
+def test_init_clean__no_db(
+ monkeymock: MonkeyMock, tmp_run_dir: Callable,
+ caplog: pytest.LogCaptureFixture
+) -> None:
+ """Test init_clean() when the workflow database doesn't exist"""
+ caplog.set_level(logging.INFO, CYLC_LOG)
+ tmp_run_dir('bespin')
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+
+ init_clean('bespin', opts=CleanOptions())
+ assert (
+ "No workflow database for bespin - will only clean locally"
+ ) in caplog.text
+ assert mock_clean.called is True
+ assert mock_remote_clean.called is False
+
+
+def test_init_clean__remote_only_no_db(
+ monkeymock: MonkeyMock, tmp_run_dir: Callable
+) -> None:
+ """Test remote-only init_clean() when the workflow DB doesn't exist"""
+ tmp_run_dir('hoth')
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+
+ with pytest.raises(ServiceFileError) as exc:
+ init_clean('hoth', opts=CleanOptions(remote_only=True))
+ assert (
+ "No workflow database for hoth - cannot perform remote clean"
+ ) in str(exc.value)
+ assert mock_clean.called is False
+ assert mock_remote_clean.called is False
+
+
+def test_init_clean__running_workflow(
+ monkeypatch: pytest.MonkeyPatch, tmp_run_dir: Callable
+) -> None:
+ """Test init_clean() fails when workflow is still running"""
+ def mock_err(*args, **kwargs):
+ raise ServiceFileError("Mocked error")
+ monkeypatch.setattr('cylc.flow.clean.detect_old_contact_file',
+ mock_err)
+ tmp_run_dir('yavin')
+
+ with pytest.raises(ServiceFileError) as exc:
+ init_clean('yavin', opts=CleanOptions())
+ assert "Cannot clean running workflow" in str(exc.value)
+
+
+@pytest.mark.parametrize(
+ 'rm_dirs, expected_clean, expected_remote_clean',
+ [(None, None, []),
+ (["r2d2:c3po"], {"r2d2", "c3po"}, ["r2d2:c3po"])]
+)
+def test_init_clean__rm_dirs(
+ rm_dirs: Optional[List[str]],
+ expected_clean: Set[str],
+ expected_remote_clean: List[str],
+ monkeymock: MonkeyMock, monkeypatch: pytest.MonkeyPatch,
+ tmp_run_dir: Callable
+) -> None:
+ """Test init_clean() with the --rm option.
+
+ Params:
+ rm_dirs: Dirs given by --rm option.
+ expected_clean: The dirs that are expected to be passed to clean().
+ expected_remote_clean: The dirs that are expected to be passed to
+ remote_clean().
+ """
+ reg = 'dagobah'
+ run_dir: Path = tmp_run_dir(reg)
+ Path(run_dir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+ platforms = {'platform_one'}
+ monkeypatch.setattr('cylc.flow.clean.get_platforms_from_db',
+ lambda x: platforms)
+ opts = CleanOptions(rm_dirs=rm_dirs) if rm_dirs else CleanOptions()
+
+ init_clean(reg, opts=opts)
+ mock_clean.assert_called_with(reg, run_dir, expected_clean)
+ mock_remote_clean.assert_called_with(
+ reg, platforms, expected_remote_clean, opts.remote_timeout)
+
+
+@pytest.mark.parametrize(
+ 'reg, symlink_dirs, rm_dirs, expected_deleted, expected_remaining',
+ [
+ pytest.param(
+ 'foo/bar',
+ {},
+ None,
+ ['cylc-run/foo'],
+ ['cylc-run'],
+ id="Basic clean"
+ ),
+ pytest.param(
+ 'foo/bar/baz',
+ {
+ 'log': 'sym-log',
+ 'share': 'sym-share',
+ 'share/cycle': 'sym-cycle',
+ 'work': 'sym-work'
+ },
+ None,
+ ['cylc-run/foo', 'sym-log/cylc-run/foo', 'sym-share/cylc-run/foo',
+ 'sym-cycle/cylc-run/foo', 'sym-work/cylc-run/foo'],
+ ['cylc-run', 'sym-log/cylc-run', 'sym-share/cylc-run',
+ 'sym-cycle/cylc-run', 'sym-work/cylc-run'],
+ id="Symlink dirs"
+ ),
+ pytest.param(
+ 'foo',
+ {
+ 'run': 'sym-run',
+ 'log': 'sym-log',
+ 'share': 'sym-share',
+ 'share/cycle': 'sym-cycle',
+ 'work': 'sym-work'
+ },
+ None,
+ ['cylc-run/foo', 'sym-run/cylc-run/foo', 'sym-log/cylc-run/foo',
+ 'sym-share/cylc-run/foo', 'sym-cycle/cylc-run/foo',
+ 'sym-work/cylc-run/foo'],
+ ['cylc-run', 'sym-run/cylc-run', 'sym-log/cylc-run',
+ 'sym-share/cylc-run', 'sym-cycle/cylc-run',
+ 'sym-work'],
+ id="Symlink dirs including run dir"
+ ),
+ pytest.param(
+ 'foo',
+ {},
+ {'log', 'share'},
+ ['cylc-run/foo/log', 'cylc-run/foo/share'],
+ ['cylc-run/foo/work'],
+ id="Targeted clean"
+ ),
+ pytest.param(
+ 'foo',
+ {'log': 'sym-log'},
+ {'log'},
+ ['cylc-run/foo/log', 'sym-log/cylc-run/foo'],
+ ['cylc-run/foo/work', 'cylc-run/foo/share/cycle',
+ 'sym-log/cylc-run'],
+ id="Targeted clean with symlink dirs"
+ ),
+ pytest.param(
+ 'foo',
+ {},
+ {'share/cy*'},
+ ['cylc-run/foo/share/cycle'],
+ ['cylc-run/foo/log', 'cylc-run/foo/work', 'cylc-run/foo/share'],
+ id="Targeted clean with glob"
+ ),
+ pytest.param(
+ 'foo',
+ {'log': 'sym-log'},
+ {'w*', 'wo*', 'l*', 'lo*'},
+ ['cylc-run/foo/work', 'cylc-run/foo/log', 'sym-log/cylc-run/foo'],
+ ['cylc-run/foo/share', 'cylc-run/foo/share/cycle'],
+ id="Targeted clean with degenerate glob"
+ ),
+ ]
+)
+def test_clean(
+ reg: str,
+ symlink_dirs: Dict[str, str],
+ rm_dirs: Optional[Set[str]],
+ expected_deleted: List[str],
+ expected_remaining: List[str],
+ tmp_path: Path, tmp_run_dir: Callable
+) -> None:
+ """Test the clean() function.
+
+ Params:
+ reg: Workflow name.
+ symlink_dirs: As you would find in the global config
+ under [symlink dirs][platform].
+ rm_dirs: As passed to clean().
+ expected_deleted: Dirs (relative paths under tmp_path) that are
+ expected to be cleaned.
+ expected_remaining: Any dirs (relative paths under tmp_path) that are
+ not expected to be cleaned.
+ """
+ # --- Setup ---
+ run_dir: Path = tmp_run_dir(reg)
+
+ if 'run' in symlink_dirs:
+ target = tmp_path / symlink_dirs['run'] / 'cylc-run' / reg
+ target.mkdir(parents=True)
+ shutil.rmtree(run_dir)
+ run_dir.symlink_to(target)
+ symlink_dirs.pop('run')
+ for symlink_name, target_name in symlink_dirs.items():
+ target = tmp_path / target_name / 'cylc-run' / reg / symlink_name
+ target.mkdir(parents=True)
+ symlink = run_dir / symlink_name
+ symlink.symlink_to(target)
+ for d_name in ('log', 'share', 'share/cycle', 'work'):
+ if d_name not in symlink_dirs:
+ (run_dir / d_name).mkdir()
+
+ for rel_path in [*expected_deleted, *expected_remaining]:
+ assert (tmp_path / rel_path).exists()
+
+ # --- The actual test ---
+ cylc_clean.clean(reg, run_dir, rm_dirs)
+ for rel_path in expected_deleted:
+ assert (tmp_path / rel_path).exists() is False
+ assert (tmp_path / rel_path).is_symlink() is False
+ for rel_path in expected_remaining:
+ assert (tmp_path / rel_path).exists()
+
+
+def test_clean__broken_symlink_run_dir(
+ tmp_path: Path, tmp_run_dir: Callable
+) -> None:
+ """Test clean() successfully remove a run dir that is a broken symlink."""
+ # Setup
+ reg = 'foo/bar'
+ run_dir: Path = tmp_run_dir(reg)
+ target = tmp_path.joinpath('rabbow/cylc-run', reg)
+ target.mkdir(parents=True)
+ shutil.rmtree(run_dir)
+ run_dir.symlink_to(target)
+ target.rmdir()
+ assert run_dir.parent.exists() is True # cylc-run/foo should exist
+ # Test
+ cylc_clean.clean(reg, run_dir)
+ assert run_dir.parent.exists() is False # cylc-run/foo should be gone
+ assert target.parent.exists() is False # rabbow/cylc-run/foo too
+
+
+def test_clean__bad_symlink_dir_wrong_type(
+ tmp_path: Path, tmp_run_dir: Callable
+) -> None:
+ """Test clean() raises error when a symlink dir actually points to a file
+ instead of a dir"""
+ reg = 'foo'
+ run_dir: Path = tmp_run_dir(reg)
+ symlink = run_dir.joinpath('log')
+ target = tmp_path.joinpath('sym-log', 'cylc-run', reg, 'meow.txt')
+ target.parent.mkdir(parents=True)
+ target.touch()
+ symlink.symlink_to(target)
+
+ with pytest.raises(WorkflowFilesError) as exc:
+ cylc_clean.clean(reg, run_dir)
+ assert "Invalid symlink at" in str(exc.value)
+ assert symlink.exists() is True
+
+
+def test_clean__bad_symlink_dir_wrong_form(
+ tmp_path: Path, tmp_run_dir: Callable
+) -> None:
+ """Test clean() raises error when a symlink dir points to an
+ unexpected dir"""
+ run_dir: Path = tmp_run_dir('foo')
+ symlink = run_dir.joinpath('log')
+ target = tmp_path.joinpath('sym-log', 'oops', 'log')
+ target.mkdir(parents=True)
+ symlink.symlink_to(target)
+
+ with pytest.raises(WorkflowFilesError) as exc:
+ cylc_clean.clean('foo', run_dir)
+ assert 'should end with "cylc-run/foo/log"' in str(exc.value)
+ assert symlink.exists() is True
+
+
+@pytest.mark.parametrize('pattern', ['thing/', 'thing/*'])
+def test_clean__rm_dir_not_file(pattern: str, tmp_run_dir: Callable):
+ """Test clean() does not remove a file when the rm_dir glob pattern would
+ match a dir only."""
+ reg = 'foo'
+ run_dir: Path = tmp_run_dir(reg)
+ a_file = run_dir.joinpath('thing')
+ a_file.touch()
+ rm_dirs = parse_rm_dirs([pattern])
+
+ cylc_clean.clean(reg, run_dir, rm_dirs)
+ assert a_file.exists()
+
+
+@pytest.fixture
+def filetree_for_testing_cylc_clean(tmp_path: Path):
+ """Fixture that creates a filetree from the given dict, and returns which
+ files are expected to be deleted and which aren't.
+
+ See tests/unit/filetree.py
+
+ Args:
+ reg: Workflow name.
+ initial_filetree: The filetree before cleaning.
+ filetree_left_behind: The filetree that is expected to be left behind
+ after cleaning, excluding the 'you-shall-not-pass/' directory,
+ which is always expected to be left behind.
+
+ Returns:
+ run_dir: Workflow run dir.
+ files_to_delete: List of files that are expected to be deleted.
+ files_not_to_delete: List of files that are not expected to be deleted.
+ """
+ def _filetree_for_testing_cylc_clean(
+ reg: str,
+ initial_filetree: Dict[str, Any],
+ filetree_left_behind: Dict[str, Any]
+ ) -> Tuple[Path, List[str], List[str]]:
+ create_filetree(initial_filetree, tmp_path, tmp_path)
+ files_not_to_delete = [
+ os.path.normpath(i) for i in
+ iglob(str(tmp_path / 'you-shall-not-pass/**'), recursive=True)
+ ]
+ files_not_to_delete.extend(
+ get_filetree_as_list(filetree_left_behind, tmp_path)
+ )
+ files_to_delete = list(
+ set(get_filetree_as_list(initial_filetree, tmp_path)).difference(
+ files_not_to_delete
+ )
+ )
+ run_dir = tmp_path / 'cylc-run' / reg
+ return run_dir, files_to_delete, files_not_to_delete
+ return _filetree_for_testing_cylc_clean
+
+
+@pytest.mark.parametrize(
+ 'pattern, initial_filetree, filetree_left_behind',
+ [
+ pytest.param(
+ '**',
+ FILETREE_1,
+ {
+ 'cylc-run': {'foo': {}},
+ 'sym': {'cylc-run': {'foo': {'bar': {}}}}
+ }
+ ),
+ pytest.param(
+ '*/**',
+ FILETREE_1,
+ {
+ 'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ 'rincewind.txt': Path('whatever')
+ }}},
+ 'sym': {'cylc-run': {'foo': {'bar': {}}}}
+ }
+ ),
+ pytest.param(
+ '**/*.txt',
+ FILETREE_1,
+ {
+ 'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ 'log': Path('whatever'),
+ 'mirkwood': Path('whatever')
+ }}},
+ 'sym': {'cylc-run': {'foo': {'bar': {
+ 'log': {
+ 'darmok': Path('whatever'),
+ 'bib': {}
+ }
+ }}}}
+ }
+ )
+ ]
+)
+def test__clean_using_glob(
+ pattern: str,
+ initial_filetree: Dict[str, Any],
+ filetree_left_behind: Dict[str, Any],
+ filetree_for_testing_cylc_clean: Callable
+) -> None:
+ """Test _clean_using_glob(), particularly that it does not follow and
+ delete symlinks (apart from the standard symlink dirs).
+
+ Params:
+ pattern: The glob pattern to test.
+ initial_filetree: The filetree to test against.
+ files_left_behind: The filetree expected to remain after
+ _clean_using_glob() is called (excluding
+ /you-shall-not-pass, which is always expected to remain).
+ """
+ # --- Setup ---
+ run_dir: Path
+ files_to_delete: List[str]
+ files_not_to_delete: List[str]
+ run_dir, files_to_delete, files_not_to_delete = (
+ filetree_for_testing_cylc_clean(
+ 'foo/bar', initial_filetree, filetree_left_behind)
+ )
+ # --- Test ---
+ _clean_using_glob(run_dir, pattern, symlink_dirs=['log'])
+ for file in files_not_to_delete:
+ assert os.path.exists(file) is True
+ for file in files_to_delete:
+ assert os.path.lexists(file) is False
+
+
+@pytest.mark.parametrize(
+ 'rm_dirs, initial_filetree, filetree_left_behind',
+ [
+ pytest.param(
+ {'**'},
+ FILETREE_1,
+ {
+ 'cylc-run': {},
+ 'sym': {'cylc-run': {}}
+ },
+ id="filetree1 **"
+ ),
+ pytest.param(
+ {'*/**'},
+ FILETREE_1,
+ {
+ 'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ 'rincewind.txt': Path('whatever')
+ }}},
+ 'sym': {'cylc-run': {}}
+ },
+ id="filetree1 */**"
+ ),
+ pytest.param(
+ {'**/*.txt'},
+ FILETREE_1,
+ {
+ 'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ 'log': Path('whatever'),
+ 'mirkwood': Path('whatever')
+ }}},
+ 'sym': {'cylc-run': {'foo': {'bar': {
+ 'log': {
+ 'darmok': Path('whatever'),
+ 'bib': {}
+ }
+ }}}}
+ },
+ id="filetree1 **/*.txt"
+ ),
+ pytest.param(
+ {'**/cycle'},
+ FILETREE_2,
+ {
+ 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
+ 'sym-run': {'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ 'share': Path('sym-share/cylc-run/foo/bar/share')
+ }}}},
+ 'sym-share': {'cylc-run': {'foo': {'bar': {
+ 'share': {}
+ }}}},
+ 'sym-cycle': {'cylc-run': {}}
+ },
+ id="filetree2 **/cycle"
+ ),
+ pytest.param(
+ {'share'},
+ FILETREE_2,
+ {
+ 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
+ 'sym-run': {'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ 'flow.cylc': None,
+ }}}},
+ 'sym-share': {'cylc-run': {}},
+ 'sym-cycle': {'cylc-run': {'foo': {'bar': {
+ 'share': {
+ 'cycle': {
+ 'macklunkey.txt': None
+ }
+ }
+ }}}}
+ },
+ id="filetree2 share"
+ ),
+ pytest.param(
+ {'**'},
+ FILETREE_2,
+ {
+ 'cylc-run': {},
+ 'sym-run': {'cylc-run': {}},
+ 'sym-share': {'cylc-run': {}},
+ 'sym-cycle': {'cylc-run': {}}
+ },
+ id="filetree2 **"
+ ),
+ pytest.param(
+ {'*'},
+ FILETREE_2,
+ {
+ 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
+ 'sym-run': {'cylc-run': {'foo': {'bar': {
+ '.service': {'db': None},
+ }}}},
+ 'sym-share': {'cylc-run': {}},
+ 'sym-cycle': {'cylc-run': {'foo': {'bar': {
+ 'share': {
+ 'cycle': {
+ 'macklunkey.txt': None
+ }
+ }
+ }}}}
+ },
+ id="filetree2 *"
+ ),
+ pytest.param( # Check https://bugs.python.org/issue35201 has no effect
+ {'non-exist/**'},
+ FILETREE_2,
+ FILETREE_2,
+ id="filetree2 non-exist/**"
+ ),
+ pytest.param(
+ {'**'},
+ FILETREE_3,
+ {
+ 'cylc-run': {},
+ 'sym-run': {'cylc-run': {}},
+ 'sym-cycle': {'cylc-run': {}},
+ },
+ id="filetree3 **"
+ ),
+ pytest.param(
+ {'**'},
+ FILETREE_4,
+ {
+ 'cylc-run': {},
+ 'sym-cycle': {'cylc-run': {}},
+ },
+ id="filetree4 **"
+ )
+ ],
+)
+def test_clean__targeted(
+ rm_dirs: Set[str],
+ initial_filetree: Dict[str, Any],
+ filetree_left_behind: Dict[str, Any],
+ caplog: pytest.LogCaptureFixture, tmp_run_dir: Callable,
+ filetree_for_testing_cylc_clean: Callable
+) -> None:
+ """Test clean(), particularly that it does not follow and delete symlinks
+ (apart from the standard symlink dirs).
+
+ This is similar to test__clean_using_glob(), but the filetree expected to
+ remain after cleaning is different due to the tidy up of empty dirs.
+
+ Params:
+ rm_dirs: The glob patterns to test.
+ initial_filetree: The filetree to test against.
+ files_left_behind: The filetree expected to remain after
+ clean() is called (excluding /you-shall-not-pass,
+ which is always expected to remain).
+ """
+ # --- Setup ---
+ caplog.set_level(logging.DEBUG, CYLC_LOG)
+ tmp_run_dir()
+ reg = 'foo/bar'
+ run_dir: Path
+ files_to_delete: List[str]
+ files_not_to_delete: List[str]
+ run_dir, files_to_delete, files_not_to_delete = (
+ filetree_for_testing_cylc_clean(
+ reg, initial_filetree, filetree_left_behind)
+ )
+ # --- Test ---
+ cylc_clean.clean(reg, run_dir, rm_dirs)
+ for file in files_not_to_delete:
+ assert os.path.exists(file) is True
+ for file in files_to_delete:
+ assert os.path.lexists(file) is False
+
+
+@pytest.mark.parametrize(
+ 'rm_dirs',
+ [
+ [".."],
+ ["foo:.."],
+ ["foo/../../meow"]
+ ]
+)
+def test_init_clean__targeted_bad(
+ rm_dirs: List[str],
+ tmp_run_dir: Callable,
+ monkeymock: MonkeyMock
+):
+ """Test init_clean() fails when abusing --rm option."""
+ tmp_run_dir('chalmers')
+ mock_clean = monkeymock('cylc.flow.clean.clean')
+ mock_remote_clean = monkeymock('cylc.flow.clean.remote_clean')
+ with pytest.raises(InputError) as exc_info:
+ init_clean('chalmers', opts=CleanOptions(rm_dirs=rm_dirs))
+ assert "cannot take paths that point to the run directory or above" in str(
+ exc_info.value
+ )
+ mock_clean.assert_not_called()
+ mock_remote_clean.assert_not_called()
+
+
+PLATFORMS = {
+ 'enterprise': {
+ 'hosts': ['kirk', 'picard'],
+ 'install target': 'picard',
+ 'name': 'enterprise'
+ },
+ 'voyager': {
+ 'hosts': ['janeway'],
+ 'install target': 'janeway',
+ 'name': 'voyager'
+ },
+ 'stargazer': {
+ 'hosts': ['picard'],
+ 'install target': 'picard',
+ 'name': 'stargazer'
+ },
+ 'exeter': {
+ 'hosts': ['localhost'],
+ 'install target': 'localhost',
+ 'name': 'exeter'
+ }
+}
+
+
+@pytest.mark.parametrize(
+ ('install_targets_map', 'failed_platforms', 'expected_platforms',
+ 'exc_expected', 'expected_err_msgs'),
+ [
+ pytest.param(
+ {'localhost': [PLATFORMS['exeter']]}, None, None, False, [],
+ id="Only localhost install target - no remote clean"
+ ),
+ pytest.param(
+ {
+ 'localhost': [PLATFORMS['exeter']],
+ 'picard': [PLATFORMS['enterprise']]
+ },
+ None, ['enterprise'], False, [],
+ id="Localhost and remote install target"
+ ),
+ pytest.param(
+ {
+ 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
+ 'janeway': [PLATFORMS['voyager']]
+ },
+ None, ['enterprise', 'voyager'], False, [],
+ id="Only remote install targets"
+ ),
+ pytest.param(
+ {
+ 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
+ 'janeway': [PLATFORMS['voyager']]
+ },
+ {'enterprise': 255},
+ ['enterprise', 'stargazer', 'voyager'],
+ False,
+ [],
+ id="Install target with 1 failed, 1 successful platform"
+ ),
+ pytest.param(
+ {
+ 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
+ 'janeway': [PLATFORMS['voyager']]
+ },
+ {'enterprise': 255, 'stargazer': 255},
+ ['enterprise', 'stargazer', 'voyager'],
+ True,
+ ["Could not clean foo on install target: picard"],
+ id="Install target with all failed platforms"
+ ),
+ pytest.param(
+ {
+ 'picard': [PLATFORMS['enterprise']],
+ 'janeway': [PLATFORMS['voyager']]
+ },
+ {'enterprise': 255, 'voyager': 255},
+ ['enterprise', 'voyager'],
+ True,
+ ["Could not clean foo on install target: picard",
+ "Could not clean foo on install target: janeway"],
+ id="All install targets have all failed platforms"
+ ),
+ pytest.param(
+ {
+ 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']]
+ },
+ {'enterprise': 1},
+ ['enterprise'],
+ True,
+ ["Could not clean foo on install target: picard"],
+ id=("Remote clean cmd fails on a platform for non-SSH reason - "
+ "does not retry")
+ ),
+ ]
+)
+def test_remote_clean(
+ install_targets_map: Dict[str, Any],
+ failed_platforms: Optional[Dict[str, int]],
+ expected_platforms: Optional[List[str]],
+ exc_expected: bool,
+ expected_err_msgs: List[str],
+ monkeymock: MonkeyMock, monkeypatch: pytest.MonkeyPatch,
+ caplog: pytest.LogCaptureFixture, log_filter: Callable
+) -> None:
+ """Test remote_clean() logic.
+
+ Params:
+ install_targets_map The map that would be returned by
+ platforms.get_install_target_to_platforms_map()
+ failed_platforms: If specified, any platforms that clean will
+ artificially fail on in this test case. The key is the platform
+ name, the value is the remote clean cmd return code.
+ expected_platforms: If specified, all the platforms that the
+ remote clean cmd is expected to run on.
+ exc_expected: If a CylcError is expected to be raised.
+ expected_err_msgs: List of error messages expected to be in the log.
+ """
+ # ----- Setup -----
+ caplog.set_level(logging.DEBUG, CYLC_LOG)
+ monkeypatch.setattr(
+ 'cylc.flow.clean.get_install_target_to_platforms_map',
+ lambda x: install_targets_map)
+ # Remove randomness:
+ monkeymock('cylc.flow.clean.shuffle')
+
+ def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout):
+ proc_ret_code = 0
+ if failed_platforms and platform['name'] in failed_platforms:
+ proc_ret_code = failed_platforms[platform['name']]
+ return mock.Mock(
+ poll=lambda: proc_ret_code,
+ communicate=lambda: ("Mocked stdout", "Mocked stderr"),
+ args=[]
+ )
+
+ mocked_remote_clean_cmd = monkeymock(
+ 'cylc.flow.clean._remote_clean_cmd',
+ spec=_remote_clean_cmd,
+ side_effect=mocked_remote_clean_cmd_side_effect)
+ rm_dirs = ["whatever"]
+ # ----- Test -----
+ reg = 'foo'
+ platform_names = (
+ "This arg bypassed as we provide the install targets map in the test")
+ if exc_expected:
+ with pytest.raises(CylcError) as exc:
+ cylc_clean.remote_clean(
+ reg, platform_names, rm_dirs, timeout='irrelevant')
+ assert "Remote clean failed" in str(exc.value)
+ else:
+ cylc_clean.remote_clean(
+ reg, platform_names, rm_dirs, timeout='irrelevant')
+ for msg in expected_err_msgs:
+ assert log_filter(caplog, level=logging.ERROR, contains=msg)
+ if expected_platforms:
+ for p_name in expected_platforms:
+ mocked_remote_clean_cmd.assert_any_call(
+ reg, PLATFORMS[p_name], rm_dirs, 'irrelevant')
+ else:
+ mocked_remote_clean_cmd.assert_not_called()
+ if failed_platforms:
+ for p_name in failed_platforms:
+ assert f"{p_name} - {PlatformError.MSG_TIDY}" in caplog.text
+
+
+@pytest.mark.parametrize(
+ 'rm_dirs, expected_args',
+ [
+ (None, []),
+ (['holodeck', 'ten_forward'],
+ ['--rm', 'holodeck', '--rm', 'ten_forward'])
+ ]
+)
+def test_remote_clean_cmd(
+ rm_dirs: Optional[List[str]],
+ expected_args: List[str],
+ monkeymock: MonkeyMock
+) -> None:
+ """Test _remote_clean_cmd()
+
+ Params:
+ rm_dirs: Argument passed to _remote_clean_cmd().
+ expected_args: Expected CLI arguments of the cylc clean command that
+ gets constructed.
+ """
+ reg = 'jean/luc/picard'
+ platform = {
+ 'name': 'enterprise',
+ 'install target': 'mars',
+ 'hosts': ['Trill'],
+ 'selection': {'method': 'definition order'}
+ }
+ mock_construct_ssh_cmd = monkeymock(
+ 'cylc.flow.clean.construct_ssh_cmd', return_value=['blah'])
+ monkeymock('cylc.flow.clean.Popen')
+
+ cylc_clean._remote_clean_cmd(reg, platform, rm_dirs, timeout='dunno')
+ args, kwargs = mock_construct_ssh_cmd.call_args
+ constructed_cmd = args[0]
+ assert constructed_cmd == ['clean', '--local-only', reg, *expected_args]
+
+
+def test_clean_top_level(tmp_run_dir: Callable):
+ """Test that cleaning last remaining run dir inside a workflow dir removes
+ the top level dir if it's empty (excluding _cylc-install)."""
+ # Setup
+ reg = 'blue/planet/run1'
+ run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
+ cylc_install_dir = run_dir.parent / WorkflowFiles.Install.DIRNAME
+ assert cylc_install_dir.is_dir()
+ runN_symlink = run_dir.parent / WorkflowFiles.RUN_N
+ assert runN_symlink.exists()
+ # Test
+ clean(reg, run_dir)
+ assert not run_dir.parent.parent.exists()
+ # Now check that if the top level dir is not empty, it doesn't get removed
+ run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
+ jellyfish_file = (run_dir.parent / 'jellyfish.txt')
+ jellyfish_file.touch()
+ clean(reg, run_dir)
+ assert cylc_install_dir.is_dir()
+ assert jellyfish_file.exists()
+
+
+@pytest.mark.parametrize(
+ 'pattern, filetree, expected_matches',
+ [
+ pytest.param(
+ '**',
+ FILETREE_1,
+ ['cylc-run/foo/bar',
+ 'cylc-run/foo/bar/log'],
+ id="filetree1 **"
+ ),
+ pytest.param(
+ '*',
+ FILETREE_1,
+ ['cylc-run/foo/bar/flow.cylc',
+ 'cylc-run/foo/bar/log',
+ 'cylc-run/foo/bar/mirkwood',
+ 'cylc-run/foo/bar/rincewind.txt'],
+ id="filetree1 *"
+ ),
+ pytest.param(
+ '**/*.txt',
+ FILETREE_1,
+ ['cylc-run/foo/bar/log/bib/fortuna.txt',
+ 'cylc-run/foo/bar/log/temba.txt',
+ 'cylc-run/foo/bar/rincewind.txt'],
+ id="filetree1 **/*.txt"
+ ),
+ pytest.param(
+ '**',
+ FILETREE_2,
+ ['cylc-run/foo/bar',
+ 'cylc-run/foo/bar/share',
+ 'cylc-run/foo/bar/share/cycle'],
+ id="filetree2 **"
+ ),
+ pytest.param(
+ '**',
+ FILETREE_3,
+ ['cylc-run/foo/bar',
+ 'cylc-run/foo/bar/share/cycle'],
+ id="filetree3 **"
+ ),
+ pytest.param(
+ '**/s*',
+ FILETREE_3,
+ ['cylc-run/foo/bar/share',
+ 'cylc-run/foo/bar/share/cycle/sokath.txt'],
+ id="filetree3 **/s*"
+ ),
+ pytest.param(
+ '**',
+ FILETREE_4,
+ ['cylc-run/foo/bar',
+ 'cylc-run/foo/bar/share/cycle'],
+ id="filetree4 **"
+ ),
+ ]
+)
+def test_glob_in_run_dir(
+ pattern: str,
+ filetree: Dict[str, Any],
+ expected_matches: List[str],
+ tmp_path: Path, tmp_run_dir: Callable
+) -> None:
+ """Test that glob_in_run_dir() returns the minimal set of results with
+ no redundant paths.
+ """
+ # Setup
+ cylc_run_dir: Path = tmp_run_dir()
+ reg = 'foo/bar'
+ run_dir = cylc_run_dir / reg
+ create_filetree(filetree, tmp_path, tmp_path)
+ symlink_dirs = [run_dir / i for i in get_symlink_dirs(reg, run_dir)]
+ expected = [tmp_path / i for i in expected_matches]
+ # Test
+ assert glob_in_run_dir(run_dir, pattern, symlink_dirs) == expected
diff --git a/tests/unit/test_hostuserutil.py b/tests/unit/test_hostuserutil.py
index f9d9d745262..aa21632530a 100644
--- a/tests/unit/test_hostuserutil.py
+++ b/tests/unit/test_hostuserutil.py
@@ -35,10 +35,11 @@ def test_is_remote_user_on_current_user():
assert not is_remote_user(os.getenv('USER'))
-def test_is_remote_host_on_localhost():
+def test_is_remote_host_on_localhost(monkeypatch):
"""is_remote_host with localhost."""
assert not is_remote_host(None)
assert not is_remote_host('localhost')
+ assert not is_remote_host('localhost4.localhost42')
assert not is_remote_host(os.getenv('HOSTNAME'))
assert not is_remote_host(get_host())
diff --git a/tests/unit/test_id.py b/tests/unit/test_id.py
index 837a8b27b71..ba47083b02f 100644
--- a/tests/unit/test_id.py
+++ b/tests/unit/test_id.py
@@ -320,6 +320,7 @@ def test_tokens():
with pytest.raises(ValueError):
Tokens()['foo'] = 'a'
+ # test equality
assert Tokens('a') == Tokens('a')
assert Tokens('a') != Tokens('b')
assert Tokens('a', relative=True) == Tokens('a', relative=True)
@@ -327,6 +328,12 @@ def test_tokens():
assert Tokens() != Tokens('a')
assert Tokens(workflow='a') == Tokens('a')
+ # test equality with non Tokens objects
+ assert Tokens('a') != 'a'
+ assert not Tokens('a') == 'a'
+ assert Tokens('a') != 1
+ assert not Tokens('a') == 1
+
tokens = Tokens('a//b')
tokens.update({'cycle': 'c', 'task': 'd'})
assert tokens == Tokens('a//c/d')
diff --git a/tests/unit/test_id_match.py b/tests/unit/test_id_match.py
index 33fcf87e3c4..d26e85b092d 100644
--- a/tests/unit/test_id_match.py
+++ b/tests/unit/test_id_match.py
@@ -44,6 +44,7 @@ def _task_proxy(id_, hier):
tdef = create_autospec(TaskDef, namespace_hierarchy=hier)
tdef.name = tokens['task']
return TaskProxy(
+ Tokens('~user/workflow'),
tdef,
start_point=IntegerPoint(tokens['cycle']),
status=tokens['task_sel'],
diff --git a/tests/unit/test_install.py b/tests/unit/test_install.py
new file mode 100644
index 00000000000..d617ca9a6a5
--- /dev/null
+++ b/tests/unit/test_install.py
@@ -0,0 +1,544 @@
+# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
+# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import logging
+import os
+import shutil
+from pathlib import Path
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+)
+
+import pytest
+
+from cylc.flow import workflow_files
+from cylc.flow.exceptions import (
+ InputError,
+ WorkflowFilesError,
+)
+from cylc.flow.workflow_files import (
+ WorkflowFiles,
+ get_workflow_source_dir,
+)
+from cylc.flow.install import (
+ NESTED_DIRS_MSG,
+ check_nested_dirs,
+ get_rsync_rund_cmd,
+ get_run_dir_info,
+ get_source_workflow_name,
+ install_workflow,
+ parse_cli_sym_dirs,
+ reinstall_workflow,
+ search_install_source_dirs,
+ validate_source_dir,
+)
+
+NonCallableFixture = Any
+
+
+# global.cylc[install]scan depth for these tests:
+MAX_SCAN_DEPTH = 3
+
+
+@pytest.fixture
+def glbl_cfg_max_scan_depth(mock_glbl_cfg: Callable) -> None:
+ mock_glbl_cfg(
+ 'cylc.flow.install.glbl_cfg',
+ f'''
+ [install]
+ max depth = {MAX_SCAN_DEPTH}
+ '''
+ )
+
+
+@pytest.mark.parametrize(
+ 'workflow_name, err_expected',
+ [
+ ('foo/' * (MAX_SCAN_DEPTH - 1), False),
+ ('foo/' * MAX_SCAN_DEPTH, True) # /run1 takes it beyond max depth
+ ]
+)
+def test_install_workflow__max_depth(
+ workflow_name: str,
+ err_expected: bool,
+ tmp_run_dir: Callable,
+ tmp_src_dir: Callable,
+ glbl_cfg_max_scan_depth: NonCallableFixture
+):
+ """Test that trying to install beyond max depth fails."""
+ tmp_run_dir()
+ src_dir = tmp_src_dir('bar')
+ if err_expected:
+ with pytest.raises(WorkflowFilesError) as exc_info:
+ install_workflow(src_dir, workflow_name)
+ assert "would exceed global.cylc[install]max depth" in str(
+ exc_info.value
+ )
+ else:
+ install_workflow(src_dir, workflow_name)
+
+
+@pytest.mark.parametrize(
+ 'flow_file, expected_exc',
+ [
+ (WorkflowFiles.FLOW_FILE, WorkflowFilesError),
+ (WorkflowFiles.SUITE_RC, WorkflowFilesError),
+ (None, None)
+ ]
+)
+def test_install_workflow__next_to_flow_file(
+ flow_file: Optional[str],
+ expected_exc: Optional[Type[Exception]],
+ tmp_run_dir: Callable,
+ tmp_src_dir: Callable
+):
+ """Test that you can't install into a dir that contains a workflow file."""
+ # Setup
+ cylc_run_dir: Path = tmp_run_dir()
+ workflow_dir = cylc_run_dir / 'faden'
+ workflow_dir.mkdir()
+ src_dir: Path = tmp_src_dir('faden')
+ if flow_file:
+ (workflow_dir / flow_file).touch()
+ # Test
+ if expected_exc:
+ with pytest.raises(expected_exc) as exc_info:
+ install_workflow(src_dir, 'faden')
+ assert "Nested run directories not allowed" in str(exc_info.value)
+ else:
+ install_workflow(src_dir, 'faden')
+
+
+def test_install_workflow__symlink_target_exists(
+ tmp_path: Path,
+ tmp_src_dir: Callable,
+ tmp_run_dir: Callable,
+ mock_glbl_cfg: Callable,
+):
+ """Test that you can't install workflow when run dir symlink dir target
+ already exists."""
+ reg = 'smeagol'
+ src_dir: Path = tmp_src_dir(reg)
+ tmp_run_dir()
+ sym_run = tmp_path / 'sym-run'
+ sym_log = tmp_path / 'sym-log'
+ mock_glbl_cfg(
+ 'cylc.flow.pathutil.glbl_cfg',
+ f'''
+ [install]
+ [[symlink dirs]]
+ [[[localhost]]]
+ run = {sym_run}
+ log = {sym_log}
+ '''
+ )
+ msg = "Symlink dir target already exists: .*{}"
+ # Test:
+ (sym_run / 'cylc-run' / reg / 'run1').mkdir(parents=True)
+ with pytest.raises(WorkflowFilesError, match=msg.format(sym_run)):
+ install_workflow(src_dir)
+
+ shutil.rmtree(sym_run)
+ (
+ sym_log / 'cylc-run' / reg / 'run1' / WorkflowFiles.LogDir.DIRNAME
+ ).mkdir(parents=True)
+ with pytest.raises(WorkflowFilesError, match=msg.format(sym_log)):
+ install_workflow(src_dir)
+
+
+def test_check_nested_dirs(
+ tmp_run_dir: Callable,
+ glbl_cfg_max_scan_depth: NonCallableFixture
+):
+ """Test that check_nested_dirs() raises when a parent dir is a
+ workflow directory."""
+ cylc_run_dir: Path = tmp_run_dir()
+ test_dir = cylc_run_dir.joinpath('a/' * (MAX_SCAN_DEPTH + 3))
+ # note we check beyond max scan depth (because we're checking upwards)
+ test_dir.mkdir(parents=True)
+ # Parents are not run dirs - ok:
+ check_nested_dirs(test_dir)
+ # Parent contains a run dir but that run dir is not direct ancestor
+ # of our test dir - ok:
+ tmp_run_dir('a/Z')
+ check_nested_dirs(test_dir)
+ # Now make run dir out of parent - not ok:
+ tmp_run_dir('a')
+ with pytest.raises(WorkflowFilesError) as exc:
+ check_nested_dirs(test_dir)
+ assert str(exc.value) == NESTED_DIRS_MSG.format(
+ dir_type='run', dest=test_dir, existing=(cylc_run_dir / 'a')
+ )
+
+
+@pytest.mark.parametrize(
+ 'named_run', [True, False]
+)
+@pytest.mark.parametrize(
+ 'test_install_path, existing_install_path',
+ [
+ pytest.param(
+ f'{"child/" * (MAX_SCAN_DEPTH + 3)}',
+ '',
+ id="Check parents (beyond max scan depth)"
+ ),
+ pytest.param(
+ '',
+ f'{"child/" * MAX_SCAN_DEPTH}',
+ id="Check children up to max scan depth"
+ )
+ ]
+)
+def test_check_nested_dirs_install_dirs(
+ tmp_run_dir: Callable,
+ glbl_cfg_max_scan_depth: NonCallableFixture,
+ test_install_path: str,
+ existing_install_path: str,
+ named_run: bool
+):
+ """Test that check nested dirs looks both up and down a tree for
+ WorkflowFiles.Install.DIRNAME.
+
+ Params:
+ test_install_path: Path relative to ~/cylc-run/thing where we are
+ trying to install a workflow.
+ existing_install_path: Path relative to ~/cylc-run/thing where there
+ is an existing install dir.
+ named_run: Whether the workflow we are trying to install has
+ named/numbered run.
+ """
+ cylc_run_dir: Path = tmp_run_dir()
+ existing_install: Path = tmp_run_dir(
+ f'thing/{existing_install_path}/run1', installed=True, named=True
+ ).parent
+ test_install_dir = cylc_run_dir / 'thing' / test_install_path
+ test_run_dir = test_install_dir / 'run1' if named_run else test_install_dir
+ with pytest.raises(WorkflowFilesError) as exc:
+ check_nested_dirs(test_run_dir, test_install_dir)
+ assert str(exc.value) == NESTED_DIRS_MSG.format(
+ dir_type='install', dest=test_run_dir, existing=existing_install
+ )
+
+
+def test_get_workflow_source_dir_numbered_run(tmp_path):
+ """Test get_workflow_source_dir returns correct source for numbered run"""
+ cylc_install_dir = (
+ tmp_path /
+ "cylc-run" /
+ "flow-name" /
+ "_cylc-install")
+ cylc_install_dir.mkdir(parents=True)
+ run_dir = (tmp_path / "cylc-run" / "flow-name" / "run1")
+ run_dir.mkdir()
+ source_dir = (tmp_path / "cylc-source" / "flow-name")
+ source_dir.mkdir(parents=True)
+ assert get_workflow_source_dir(run_dir) == (None, None)
+ (cylc_install_dir / "source").symlink_to(source_dir)
+ assert get_workflow_source_dir(run_dir) == (
+ str(source_dir), cylc_install_dir / "source")
+
+
+def test_get_workflow_source_dir_named_run(tmp_path):
+ """Test get_workflow_source_dir returns correct source for named run"""
+ cylc_install_dir = (
+ tmp_path /
+ "cylc-run" /
+ "flow-name" /
+ "_cylc-install")
+ cylc_install_dir.mkdir(parents=True)
+ source_dir = (tmp_path / "cylc-source" / "flow-name")
+ source_dir.mkdir(parents=True)
+ (cylc_install_dir / "source").symlink_to(source_dir)
+ assert get_workflow_source_dir(
+ cylc_install_dir.parent) == (
+ str(source_dir),
+ cylc_install_dir / "source")
+
+
+def test_reinstall_workflow(tmp_path, capsys):
+
+ cylc_install_dir = (
+ tmp_path /
+ "cylc-run" /
+ "flow-name" /
+ "_cylc-install")
+ cylc_install_dir.mkdir(parents=True)
+ source_dir = (tmp_path / "cylc-source" / "flow-name")
+ source_dir.mkdir(parents=True)
+ (source_dir / "flow.cylc").touch()
+
+ (cylc_install_dir / "source").symlink_to(source_dir)
+ run_dir = cylc_install_dir.parent
+ reinstall_workflow(source_dir, "flow-name", run_dir)
+ assert capsys.readouterr().out == (
+ f"REINSTALLED flow-name from {source_dir}\n")
+
+
+@pytest.mark.parametrize(
+ 'filename, expected_err',
+ [('flow.cylc', None),
+ ('suite.rc', None),
+ ('fluff.txt', (WorkflowFilesError, "Could not find workflow 'baa/baa'"))]
+)
+def test_search_install_source_dirs(
+ filename: str, expected_err: Optional[Tuple[Type[Exception], str]],
+ tmp_path: Path, mock_glbl_cfg: Callable):
+ """Test search_install_source_dirs().
+
+ Params:
+ filename: A file to insert into one of the source dirs.
+ expected_err: Exception and message expected to be raised.
+ """
+ horse_dir = tmp_path / 'horse'
+ horse_dir.mkdir()
+ sheep_dir = tmp_path / 'sheep'
+ source_dir = sheep_dir / 'baa' / 'baa'
+ source_dir.mkdir(parents=True)
+ source_dir_file = source_dir / filename
+ source_dir_file.touch()
+ mock_glbl_cfg(
+ 'cylc.flow.install.glbl_cfg',
+ f'''
+ [install]
+ source dirs = {horse_dir}, {sheep_dir}
+ '''
+ )
+ if expected_err:
+ err, msg = expected_err
+ with pytest.raises(err) as exc:
+ search_install_source_dirs('baa/baa')
+ assert msg in str(exc.value)
+ else:
+ ret = search_install_source_dirs('baa/baa')
+ assert ret == source_dir
+ assert ret.is_absolute()
+
+
+def test_search_install_source_dirs_empty(mock_glbl_cfg: Callable):
+ """Test search_install_source_dirs() when no source dirs configured."""
+ mock_glbl_cfg(
+ 'cylc.flow.install.glbl_cfg',
+ '''
+ [install]
+ source dirs =
+ '''
+ )
+ with pytest.raises(WorkflowFilesError) as exc:
+ search_install_source_dirs('foo')
+ assert str(exc.value) == (
+ "Cannot find workflow as 'global.cylc[install]source dirs' "
+ "does not contain any paths")
+
+
+@pytest.mark.parametrize(
+ 'path, expected',
+ [
+ ('~/isla/nublar/dennis/nedry', 'dennis/nedry'),
+ ('~/isla/sorna/paul/kirby', 'paul/kirby'),
+ ('~/mos/eisley/owen/skywalker', 'skywalker')
+ ]
+)
+def test_get_source_workflow_name(
+ path: str,
+ expected: str,
+ mock_glbl_cfg: Callable
+):
+ mock_glbl_cfg(
+ 'cylc.flow.install.glbl_cfg',
+ '''
+ [install]
+ source dirs = ~/isla/nublar, ${HOME}/isla/sorna
+ '''
+ )
+ assert get_source_workflow_name(
+ Path(path).expanduser().resolve()) == expected
+
+
+def test_get_rsync_rund_cmd(
+ tmp_src_dir: Callable,
+ tmp_run_dir: Callable
+):
+ """Test rsync command for cylc install/reinstall excludes cylc dirs.
+ """
+ src_dir = tmp_src_dir('foo')
+ cylc_run_dir: Path = tmp_run_dir('rsync_flow', installed=True, named=False)
+ for wdir in [
+ WorkflowFiles.WORK_DIR,
+ WorkflowFiles.SHARE_DIR,
+ WorkflowFiles.LogDir.DIRNAME,
+ ]:
+ cylc_run_dir.joinpath(wdir).mkdir(exist_ok=True)
+ actual_cmd = get_rsync_rund_cmd(src_dir, cylc_run_dir)
+ assert actual_cmd == [
+ 'rsync', '-a', '--checksum', '--out-format=%o %n%L', '--no-t',
+ '--exclude=/log', '--exclude=/work', '--exclude=/share',
+ '--exclude=/_cylc-install', '--exclude=/.service',
+ f'{src_dir}/', f'{cylc_run_dir}/']
+
+
+@pytest.mark.parametrize(
+ 'args, expected_relink, expected_run_num, expected_run_dir',
+ [
+ (
+ ['{cylc_run}/numbered', None, False],
+ True, 1, '{cylc_run}/numbered/run1'
+ ),
+ (
+ ['{cylc_run}/named', 'dukat', False],
+ False, None, '{cylc_run}/named/dukat'
+ ),
+ (
+ ['{cylc_run}/unnamed', None, True],
+ False, None, '{cylc_run}/unnamed'
+ ),
+ ]
+)
+def test_get_run_dir_info(
+ args: list,
+ expected_relink: bool,
+ expected_run_num: Optional[int],
+ expected_run_dir: Union[Path, str],
+ tmp_run_dir: Callable
+):
+ """Test get_run_dir_info().
+
+ Params:
+ args: Input args to function.
+ expected_*: Expected return values.
+ """
+ # Setup
+ cylc_run_dir: Path = tmp_run_dir()
+ sub = lambda x: Path(x.format(cylc_run=cylc_run_dir)) # noqa: E731
+ args[0] = sub(args[0])
+ expected_run_dir = sub(expected_run_dir)
+ # Test
+ assert get_run_dir_info(*args) == (
+ expected_relink, expected_run_num, expected_run_dir
+ )
+ assert expected_run_dir.is_absolute()
+
+
+def test_get_run_dir_info__increment_run_num(tmp_run_dir: Callable):
+ """Test that get_run_dir_info() increments run number and unlinks runN."""
+ # Setup
+ cylc_run_dir: Path = tmp_run_dir()
+ run_dir: Path = tmp_run_dir('gowron/run1')
+ runN = run_dir.parent / WorkflowFiles.RUN_N
+ assert os.path.lexists(runN)
+ # Test
+ assert get_run_dir_info(cylc_run_dir / 'gowron', None, False) == (
+ True, 2, cylc_run_dir / 'gowron' / 'run2'
+ )
+ assert not os.path.lexists(runN)
+
+
+def test_get_run_dir_info__fail(tmp_run_dir: Callable):
+ # Test that you can't install named runs when numbered runs exist
+ cylc_run_dir: Path = tmp_run_dir()
+ run_dir: Path = tmp_run_dir('martok/run1')
+ with pytest.raises(WorkflowFilesError) as excinfo:
+ get_run_dir_info(run_dir.parent, 'targ', False)
+ assert "contains installed numbered runs" in str(excinfo.value)
+
+ # Test that you can install numbered run in an empty dir
+ base_dir = cylc_run_dir / 'odo'
+ base_dir.mkdir()
+ get_run_dir_info(base_dir, None, False)
+ # But not when named runs exist
+ tmp_run_dir('odo/meter')
+ with pytest.raises(WorkflowFilesError) as excinfo:
+ get_run_dir_info(base_dir, None, False)
+ assert "contains an installed workflow"
+
+
+@pytest.mark.parametrize(
+ 'symlink_dirs, err_msg, expected',
+ [
+ ('log=$shortbread, share= $bourbon,share/cycle= $digestive, ',
+ "There is an error in --symlink-dirs option:",
+ None
+ ),
+ ('log=$shortbread share= $bourbon share/cycle= $digestive ',
+ "There is an error in --symlink-dirs option:"
+ " log=$shortbread share= $bourbon share/cycle= $digestive . "
+ "Try entering option in the form --symlink-dirs="
+ "'log=$DIR, share=$DIR2, ...'",
+ None
+ ),
+ ('run=$NICE, log= $Garibaldi, share/cycle=$RichTea', None,
+ {'localhost': {
+ 'run': '$NICE',
+ 'log': '$Garibaldi',
+ 'share/cycle': '$RichTea'
+ }}
+ ),
+ ('some_other_dir=$bourbon',
+ 'some_other_dir not a valid entry for --symlink-dirs',
+ {'some_other_dir': '£bourbon'}
+ ),
+ ]
+)
+def test_parse_cli_sym_dirs(
+ symlink_dirs: str,
+ err_msg: str,
+ expected: Dict[str, Dict[str, Any]]
+):
+ """Test parse_cli_sym_dirs returns dict or correctly raises errors on cli
+ symlink dir options"""
+ if err_msg is not None:
+ with pytest.raises(InputError) as exc:
+ parse_cli_sym_dirs(symlink_dirs)
+ assert(err_msg) in str(exc)
+
+ else:
+ actual = parse_cli_sym_dirs(symlink_dirs)
+
+ assert actual == expected
+
+
+def test_validate_source_dir(tmp_run_dir: Callable, tmp_src_dir: Callable):
+ cylc_run_dir: Path = tmp_run_dir()
+ src_dir: Path = tmp_src_dir('ludlow')
+ validate_source_dir(src_dir, 'ludlow')
+ # Test that src dir must have flow file
+ (src_dir / WorkflowFiles.FLOW_FILE).unlink()
+ with pytest.raises(WorkflowFilesError):
+ validate_source_dir(src_dir, 'ludlow')
+ # Test that reserved dirnames not allowed in src dir
+ src_dir = tmp_src_dir('roland')
+ (src_dir / 'log').mkdir()
+ with pytest.raises(WorkflowFilesError) as exc_info:
+ validate_source_dir(src_dir, 'roland')
+ assert "exists in source directory" in str(exc_info.value)
+ # Test that src dir is allowed to be inside ~/cylc-run
+ src_dir = cylc_run_dir / 'dieter'
+ src_dir.mkdir()
+ (src_dir / WorkflowFiles.FLOW_FILE).touch()
+ validate_source_dir(src_dir, 'dieter')
+ # Test that src dir is not allowed to be an installed dir.
+ src_dir = cylc_run_dir / 'ajay'
+ src_dir.mkdir()
+ (src_dir / WorkflowFiles.Install.DIRNAME).mkdir()
+ (src_dir / WorkflowFiles.FLOW_FILE).touch()
+ with pytest.raises(WorkflowFilesError) as exc_info:
+ validate_source_dir(src_dir, 'ajay')
+ assert "exists in source directory" in str(exc_info.value)
diff --git a/tests/unit/test_scheduler_cli.py b/tests/unit/test_scheduler_cli.py
index 34a32e8c195..a08e4d8e161 100644
--- a/tests/unit/test_scheduler_cli.py
+++ b/tests/unit/test_scheduler_cli.py
@@ -21,6 +21,7 @@
from cylc.flow.exceptions import ServiceFileError
from cylc.flow.scheduler_cli import (
+ _distribute,
_version_check,
)
@@ -222,3 +223,39 @@ def test_version_check_no_db(tmp_path):
"""It should pass if there is no DB file (e.g. on workflow first start)."""
db_file = tmp_path / 'db' # non-existent file
assert _version_check(db_file, False, False)
+
+
+@pytest.mark.parametrize(
+ 'cli_colour, is_terminal, distribute_colour',
+ [
+ ('never', True, '--color=never'),
+ ('auto', True, '--color=always'),
+ ('always', True, '--color=always'),
+ ('never', False, '--color=never'),
+ ('auto', False, '--color=never'),
+ ('always', False, '--color=never'),
+ ]
+)
+def test_distribute_colour(
+ monkeymock,
+ cli_colour,
+ is_terminal,
+ distribute_colour,
+):
+ """It should start detached workflows with the correct --colour option.
+
+ The is_terminal test will fail for detached scheduler processes which means
+ that the colour formatting will be stripped for startup. This includes
+ the Cylc header logo and any warnings/errors raised during config parsing.
+
+ In order to preserver colour formatting we must set the `--colour` arg to
+ `always` when we want the detached process to start in colour mode.
+
+ See https://github.com/cylc/cylc-flow/issues/5159
+ """
+ monkeymock('cylc.flow.scheduler_cli.sys.exit')
+ _is_terminal = monkeymock('cylc.flow.scheduler_cli.is_terminal')
+ _is_terminal.return_value = is_terminal
+ _cylc_server_cmd = monkeymock('cylc.flow.scheduler_cli.cylc_server_cmd')
+ _distribute('myhost', 'foo', 'foo/run1', cli_colour)
+ assert distribute_colour in _cylc_server_cmd.call_args[0][0]
diff --git a/tests/unit/test_workflow_files.py b/tests/unit/test_workflow_files.py
index 3805bb613d6..90fe3327845 100644
--- a/tests/unit/test_workflow_files.py
+++ b/tests/unit/test_workflow_files.py
@@ -14,84 +14,48 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-from glob import iglob
import logging
-import os
from pathlib import Path
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Optional,
+ Type,
+ Union,
+)
+
import pytest
-import re
-import shutil
-from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
-from unittest import mock
-from cylc.flow import CYLC_LOG
from cylc.flow import workflow_files
from cylc.flow.exceptions import (
- CylcError,
- PlatformError,
- ServiceFileError,
InputError,
WorkflowFilesError,
)
-from cylc.flow.pathutil import parse_rm_dirs
-from cylc.flow.scripts.clean import CleanOptions
from cylc.flow.workflow_files import (
- NESTED_DIRS_MSG,
WorkflowFiles,
- _clean_using_glob,
- _remote_clean_cmd,
+ abort_if_flow_file_in_path,
check_flow_file,
- check_nested_dirs,
check_reserved_dir_names,
- clean,
detect_both_flow_and_suite,
- get_rsync_rund_cmd,
- get_run_dir_info,
- get_source_workflow_name,
get_symlink_dirs,
- get_workflow_source_dir,
- glob_in_run_dir,
infer_latest_run,
- init_clean,
- install_workflow,
is_forbidden,
is_installed,
- parse_cli_sym_dirs,
- reinstall_workflow,
- search_install_source_dirs,
- validate_source_dir,
validate_workflow_name,
- abort_if_flow_file_in_path
)
-from .conftest import MonkeyMock
from .filetree import (
FILETREE_1,
FILETREE_2,
FILETREE_3,
FILETREE_4,
create_filetree,
- get_filetree_as_list
)
NonCallableFixture = Any
-# global.cylc[install]scan depth for these tests:
-MAX_SCAN_DEPTH = 3
-
-
-@pytest.fixture
-def glbl_cfg_max_scan_depth(mock_glbl_cfg: Callable) -> None:
- mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
- f'''
- [install]
- max depth = {MAX_SCAN_DEPTH}
- '''
- )
-
-
@pytest.mark.parametrize(
'path, expected',
[('a/b/c', '/mock_cylc_dir/a/b/c'),
@@ -128,80 +92,6 @@ def test_is_valid_run_dir(is_abs_path: bool, tmp_run_dir: Callable):
assert workflow_files.is_valid_run_dir(Path(prefix, 'foo/bar')) is True
-def test_check_nested_dirs(
- tmp_run_dir: Callable,
- glbl_cfg_max_scan_depth: NonCallableFixture
-):
- """Test that check_nested_dirs() raises when a parent dir is a
- workflow directory."""
- cylc_run_dir: Path = tmp_run_dir()
- test_dir = cylc_run_dir.joinpath('a/' * (MAX_SCAN_DEPTH + 3))
- # note we check beyond max scan depth (because we're checking upwards)
- test_dir.mkdir(parents=True)
- # Parents are not run dirs - ok:
- check_nested_dirs(test_dir)
- # Parent contains a run dir but that run dir is not direct ancestor
- # of our test dir - ok:
- tmp_run_dir('a/Z')
- check_nested_dirs(test_dir)
- # Now make run dir out of parent - not ok:
- tmp_run_dir('a')
- with pytest.raises(WorkflowFilesError) as exc:
- check_nested_dirs(test_dir)
- assert str(exc.value) == NESTED_DIRS_MSG.format(
- dir_type='run', dest=test_dir, existing=(cylc_run_dir / 'a')
- )
-
-
-@pytest.mark.parametrize(
- 'named_run', [True, False]
-)
-@pytest.mark.parametrize(
- 'test_install_path, existing_install_path',
- [
- pytest.param(
- f'{"child/" * (MAX_SCAN_DEPTH + 3)}',
- '',
- id="Check parents (beyond max scan depth)"
- ),
- pytest.param(
- '',
- f'{"child/" * MAX_SCAN_DEPTH}',
- id="Check children up to max scan depth"
- )
- ]
-)
-def test_check_nested_dirs_install_dirs(
- tmp_run_dir: Callable,
- glbl_cfg_max_scan_depth: NonCallableFixture,
- test_install_path: str,
- existing_install_path: str,
- named_run: bool
-):
- """Test that check nested dirs looks both up and down a tree for
- WorkflowFiles.Install.DIRNAME.
-
- Params:
- test_install_path: Path relative to ~/cylc-run/thing where we are
- trying to install a workflow.
- existing_install_path: Path relative to ~/cylc-run/thing where there
- is an existing install dir.
- named_run: Whether the workflow we are trying to install has
- named/numbered run.
- """
- cylc_run_dir: Path = tmp_run_dir()
- existing_install: Path = tmp_run_dir(
- f'thing/{existing_install_path}/run1', installed=True, named=True
- ).parent
- test_install_dir = cylc_run_dir / 'thing' / test_install_path
- test_run_dir = test_install_dir / 'run1' if named_run else test_install_dir
- with pytest.raises(WorkflowFilesError) as exc:
- check_nested_dirs(test_run_dir, test_install_dir)
- assert str(exc.value) == NESTED_DIRS_MSG.format(
- dir_type='install', dest=test_run_dir, existing=existing_install
- )
-
-
@pytest.mark.parametrize(
'reg, expected_err, expected_msg',
[('foo/bar/', None, None),
@@ -370,396 +260,6 @@ def test_infer_latest_run__bad(
assert str(excinfo.value) == err_msg
-@pytest.mark.parametrize(
- 'reg, stopped, err, err_msg',
- [
- ('foo/..', True, WorkflowFilesError,
- "cannot be a path that points to the cylc-run directory or above"),
- ('foo/../..', True, WorkflowFilesError,
- "cannot be a path that points to the cylc-run directory or above"),
- ('foo', False, ServiceFileError, "Cannot clean running workflow"),
- ]
-)
-def test_clean_check__fail(
- reg: str,
- stopped: bool,
- err: Type[Exception],
- err_msg: str,
- monkeypatch: pytest.MonkeyPatch,
- tmp_path: Path,
-) -> None:
- """Test that _clean_check() fails appropriately.
-
- Params:
- reg: Workflow name.
- stopped: Whether the workflow is stopped when _clean_check() is called.
- err: Expected error class.
- err_msg: Message that is expected to be in the exception.
- """
- def mocked_detect_old_contact_file(*a, **k):
- if not stopped:
- raise ServiceFileError('Mocked error')
-
- monkeypatch.setattr(
- 'cylc.flow.workflow_files.detect_old_contact_file',
- mocked_detect_old_contact_file
- )
-
- with pytest.raises(err) as exc:
- workflow_files._clean_check(CleanOptions(), reg, tmp_path)
- assert err_msg in str(exc.value)
-
-
-@pytest.mark.parametrize(
- 'db_platforms, opts, clean_called, remote_clean_called',
- [
- pytest.param(
- ['localhost', 'localhost'], {}, True, False,
- id="Only platform in DB is localhost"
- ),
- pytest.param(
- ['horse'], {}, True, True,
- id="Remote platform in DB"
- ),
- pytest.param(
- ['horse'], {'local_only': True}, True, False,
- id="Local clean only"
- ),
- pytest.param(
- ['horse'], {'remote_only': True}, False, True,
- id="Remote clean only"
- )
- ]
-)
-def test_init_clean(
- db_platforms: List[str],
- opts: Dict[str, Any],
- clean_called: bool,
- remote_clean_called: bool,
- monkeypatch: pytest.MonkeyPatch, monkeymock: MonkeyMock,
- tmp_run_dir: Callable
-) -> None:
- """Test the init_clean() function logic.
-
- Params:
- db_platforms: Platform names that would be loaded from the database.
- opts: Any options passed to the cylc clean CLI.
- clean_called: If a local clean is expected to go ahead.
- remote_clean_called: If a remote clean is expected to go ahead.
- """
- reg = 'foo/bar/'
- rdir = tmp_run_dir(reg, installed=True)
- Path(rdir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
- monkeypatch.setattr('cylc.flow.workflow_files.get_platforms_from_db',
- lambda x: set(db_platforms))
-
- init_clean(reg, opts=CleanOptions(**opts))
- assert mock_clean.called is clean_called
- assert mock_remote_clean.called is remote_clean_called
-
-
-def test_init_clean__no_dir(
- monkeymock: MonkeyMock, tmp_run_dir: Callable,
- caplog: pytest.LogCaptureFixture
-) -> None:
- """Test init_clean() when the run dir doesn't exist"""
- caplog.set_level(logging.INFO, CYLC_LOG)
- tmp_run_dir()
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
-
- init_clean('foo/bar', opts=CleanOptions())
- assert "No directory to clean" in caplog.text
- assert mock_clean.called is False
- assert mock_remote_clean.called is False
-
-
-def test_init_clean__no_db(
- monkeymock: MonkeyMock, tmp_run_dir: Callable,
- caplog: pytest.LogCaptureFixture
-) -> None:
- """Test init_clean() when the workflow database doesn't exist"""
- caplog.set_level(logging.INFO, CYLC_LOG)
- tmp_run_dir('bespin')
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
-
- init_clean('bespin', opts=CleanOptions())
- assert (
- "No workflow database for bespin - will only clean locally"
- ) in caplog.text
- assert mock_clean.called is True
- assert mock_remote_clean.called is False
-
-
-def test_init_clean__remote_only_no_db(
- monkeymock: MonkeyMock, tmp_run_dir: Callable
-) -> None:
- """Test remote-only init_clean() when the workflow DB doesn't exist"""
- tmp_run_dir('hoth')
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
-
- with pytest.raises(ServiceFileError) as exc:
- init_clean('hoth', opts=CleanOptions(remote_only=True))
- assert (
- "No workflow database for hoth - cannot perform remote clean"
- ) in str(exc.value)
- assert mock_clean.called is False
- assert mock_remote_clean.called is False
-
-
-def test_init_clean__running_workflow(
- monkeypatch: pytest.MonkeyPatch, tmp_run_dir: Callable
-) -> None:
- """Test init_clean() fails when workflow is still running"""
- def mock_err(*args, **kwargs):
- raise ServiceFileError("Mocked error")
- monkeypatch.setattr('cylc.flow.workflow_files.detect_old_contact_file',
- mock_err)
- tmp_run_dir('yavin')
-
- with pytest.raises(ServiceFileError) as exc:
- init_clean('yavin', opts=CleanOptions())
- assert "Cannot clean running workflow" in str(exc.value)
-
-
-@pytest.mark.parametrize(
- 'rm_dirs, expected_clean, expected_remote_clean',
- [(None, None, []),
- (["r2d2:c3po"], {"r2d2", "c3po"}, ["r2d2:c3po"])]
-)
-def test_init_clean__rm_dirs(
- rm_dirs: Optional[List[str]],
- expected_clean: Set[str],
- expected_remote_clean: List[str],
- monkeymock: MonkeyMock, monkeypatch: pytest.MonkeyPatch,
- tmp_run_dir: Callable
-) -> None:
- """Test init_clean() with the --rm option.
-
- Params:
- rm_dirs: Dirs given by --rm option.
- expected_clean: The dirs that are expected to be passed to clean().
- expected_remote_clean: The dirs that are expected to be passed to
- remote_clean().
- """
- reg = 'dagobah'
- run_dir: Path = tmp_run_dir(reg)
- Path(run_dir, WorkflowFiles.Service.DIRNAME, WorkflowFiles.Service.DB).touch()
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
- platforms = {'platform_one'}
- monkeypatch.setattr('cylc.flow.workflow_files.get_platforms_from_db',
- lambda x: platforms)
- opts = CleanOptions(rm_dirs=rm_dirs) if rm_dirs else CleanOptions()
-
- init_clean(reg, opts=opts)
- mock_clean.assert_called_with(reg, run_dir, expected_clean)
- mock_remote_clean.assert_called_with(
- reg, platforms, expected_remote_clean, opts.remote_timeout)
-
-
-@pytest.mark.parametrize(
- 'reg, symlink_dirs, rm_dirs, expected_deleted, expected_remaining',
- [
- pytest.param(
- 'foo/bar',
- {},
- None,
- ['cylc-run/foo'],
- ['cylc-run'],
- id="Basic clean"
- ),
- pytest.param(
- 'foo/bar/baz',
- {
- 'log': 'sym-log',
- 'share': 'sym-share',
- 'share/cycle': 'sym-cycle',
- 'work': 'sym-work'
- },
- None,
- ['cylc-run/foo', 'sym-log/cylc-run/foo', 'sym-share/cylc-run/foo',
- 'sym-cycle/cylc-run/foo', 'sym-work/cylc-run/foo'],
- ['cylc-run', 'sym-log/cylc-run', 'sym-share/cylc-run',
- 'sym-cycle/cylc-run', 'sym-work/cylc-run'],
- id="Symlink dirs"
- ),
- pytest.param(
- 'foo',
- {
- 'run': 'sym-run',
- 'log': 'sym-log',
- 'share': 'sym-share',
- 'share/cycle': 'sym-cycle',
- 'work': 'sym-work'
- },
- None,
- ['cylc-run/foo', 'sym-run/cylc-run/foo', 'sym-log/cylc-run/foo',
- 'sym-share/cylc-run/foo', 'sym-cycle/cylc-run/foo',
- 'sym-work/cylc-run/foo'],
- ['cylc-run', 'sym-run/cylc-run', 'sym-log/cylc-run',
- 'sym-share/cylc-run', 'sym-cycle/cylc-run',
- 'sym-work'],
- id="Symlink dirs including run dir"
- ),
- pytest.param(
- 'foo',
- {},
- {'log', 'share'},
- ['cylc-run/foo/log', 'cylc-run/foo/share'],
- ['cylc-run/foo/work'],
- id="Targeted clean"
- ),
- pytest.param(
- 'foo',
- {'log': 'sym-log'},
- {'log'},
- ['cylc-run/foo/log', 'sym-log/cylc-run/foo'],
- ['cylc-run/foo/work', 'cylc-run/foo/share/cycle',
- 'sym-log/cylc-run'],
- id="Targeted clean with symlink dirs"
- ),
- pytest.param(
- 'foo',
- {},
- {'share/cy*'},
- ['cylc-run/foo/share/cycle'],
- ['cylc-run/foo/log', 'cylc-run/foo/work', 'cylc-run/foo/share'],
- id="Targeted clean with glob"
- ),
- pytest.param(
- 'foo',
- {'log': 'sym-log'},
- {'w*', 'wo*', 'l*', 'lo*'},
- ['cylc-run/foo/work', 'cylc-run/foo/log', 'sym-log/cylc-run/foo'],
- ['cylc-run/foo/share', 'cylc-run/foo/share/cycle'],
- id="Targeted clean with degenerate glob"
- ),
- ]
-)
-def test_clean(
- reg: str,
- symlink_dirs: Dict[str, str],
- rm_dirs: Optional[Set[str]],
- expected_deleted: List[str],
- expected_remaining: List[str],
- tmp_path: Path, tmp_run_dir: Callable
-) -> None:
- """Test the clean() function.
-
- Params:
- reg: Workflow name.
- symlink_dirs: As you would find in the global config
- under [symlink dirs][platform].
- rm_dirs: As passed to clean().
- expected_deleted: Dirs (relative paths under tmp_path) that are
- expected to be cleaned.
- expected_remaining: Any dirs (relative paths under tmp_path) that are
- not expected to be cleaned.
- """
- # --- Setup ---
- run_dir: Path = tmp_run_dir(reg)
-
- if 'run' in symlink_dirs:
- target = tmp_path / symlink_dirs['run'] / 'cylc-run' / reg
- target.mkdir(parents=True)
- shutil.rmtree(run_dir)
- run_dir.symlink_to(target)
- symlink_dirs.pop('run')
- for symlink_name, target_name in symlink_dirs.items():
- target = tmp_path / target_name / 'cylc-run' / reg / symlink_name
- target.mkdir(parents=True)
- symlink = run_dir / symlink_name
- symlink.symlink_to(target)
- for d_name in ('log', 'share', 'share/cycle', 'work'):
- if d_name not in symlink_dirs:
- (run_dir / d_name).mkdir()
-
- for rel_path in [*expected_deleted, *expected_remaining]:
- assert (tmp_path / rel_path).exists()
-
- # --- The actual test ---
- workflow_files.clean(reg, run_dir, rm_dirs)
- for rel_path in expected_deleted:
- assert (tmp_path / rel_path).exists() is False
- assert (tmp_path / rel_path).is_symlink() is False
- for rel_path in expected_remaining:
- assert (tmp_path / rel_path).exists()
-
-
-def test_clean__broken_symlink_run_dir(
- tmp_path: Path, tmp_run_dir: Callable
-) -> None:
- """Test clean() successfully remove a run dir that is a broken symlink."""
- # Setup
- reg = 'foo/bar'
- run_dir: Path = tmp_run_dir(reg)
- target = tmp_path.joinpath('rabbow/cylc-run', reg)
- target.mkdir(parents=True)
- shutil.rmtree(run_dir)
- run_dir.symlink_to(target)
- target.rmdir()
- assert run_dir.parent.exists() is True # cylc-run/foo should exist
- # Test
- workflow_files.clean(reg, run_dir)
- assert run_dir.parent.exists() is False # cylc-run/foo should be gone
- assert target.parent.exists() is False # rabbow/cylc-run/foo too
-
-
-def test_clean__bad_symlink_dir_wrong_type(
- tmp_path: Path, tmp_run_dir: Callable
-) -> None:
- """Test clean() raises error when a symlink dir actually points to a file
- instead of a dir"""
- reg = 'foo'
- run_dir: Path = tmp_run_dir(reg)
- symlink = run_dir.joinpath('log')
- target = tmp_path.joinpath('sym-log', 'cylc-run', reg, 'meow.txt')
- target.parent.mkdir(parents=True)
- target.touch()
- symlink.symlink_to(target)
-
- with pytest.raises(WorkflowFilesError) as exc:
- workflow_files.clean(reg, run_dir)
- assert "Invalid symlink at" in str(exc.value)
- assert symlink.exists() is True
-
-
-def test_clean__bad_symlink_dir_wrong_form(
- tmp_path: Path, tmp_run_dir: Callable
-) -> None:
- """Test clean() raises error when a symlink dir points to an
- unexpected dir"""
- run_dir: Path = tmp_run_dir('foo')
- symlink = run_dir.joinpath('log')
- target = tmp_path.joinpath('sym-log', 'oops', 'log')
- target.mkdir(parents=True)
- symlink.symlink_to(target)
-
- with pytest.raises(WorkflowFilesError) as exc:
- workflow_files.clean('foo', run_dir)
- assert 'should end with "cylc-run/foo/log"' in str(exc.value)
- assert symlink.exists() is True
-
-
-@pytest.mark.parametrize('pattern', ['thing/', 'thing/*'])
-def test_clean__rm_dir_not_file(pattern: str, tmp_run_dir: Callable):
- """Test clean() does not remove a file when the rm_dir glob pattern would
- match a dir only."""
- reg = 'foo'
- run_dir: Path = tmp_run_dir(reg)
- a_file = run_dir.joinpath('thing')
- a_file.touch()
- rm_dirs = parse_rm_dirs([pattern])
-
- workflow_files.clean(reg, run_dir, rm_dirs)
- assert a_file.exists()
-
-
@pytest.mark.parametrize(
'filetree, expected',
[
@@ -814,760 +314,7 @@ def test_get_symlink_dirs(
assert get_symlink_dirs(reg, cylc_run_dir / reg) == expected
-@pytest.mark.parametrize(
- 'pattern, filetree, expected_matches',
- [
- pytest.param(
- '**',
- FILETREE_1,
- ['cylc-run/foo/bar',
- 'cylc-run/foo/bar/log'],
- id="filetree1 **"
- ),
- pytest.param(
- '*',
- FILETREE_1,
- ['cylc-run/foo/bar/flow.cylc',
- 'cylc-run/foo/bar/log',
- 'cylc-run/foo/bar/mirkwood',
- 'cylc-run/foo/bar/rincewind.txt'],
- id="filetree1 *"
- ),
- pytest.param(
- '**/*.txt',
- FILETREE_1,
- ['cylc-run/foo/bar/log/bib/fortuna.txt',
- 'cylc-run/foo/bar/log/temba.txt',
- 'cylc-run/foo/bar/rincewind.txt'],
- id="filetree1 **/*.txt"
- ),
- pytest.param(
- '**',
- FILETREE_2,
- ['cylc-run/foo/bar',
- 'cylc-run/foo/bar/share',
- 'cylc-run/foo/bar/share/cycle'],
- id="filetree2 **"
- ),
- pytest.param(
- '**',
- FILETREE_3,
- ['cylc-run/foo/bar',
- 'cylc-run/foo/bar/share/cycle'],
- id="filetree3 **"
- ),
- pytest.param(
- '**/s*',
- FILETREE_3,
- ['cylc-run/foo/bar/share',
- 'cylc-run/foo/bar/share/cycle/sokath.txt'],
- id="filetree3 **/s*"
- ),
- pytest.param(
- '**',
- FILETREE_4,
- ['cylc-run/foo/bar',
- 'cylc-run/foo/bar/share/cycle'],
- id="filetree4 **"
- ),
- ]
-)
-def test_glob_in_run_dir(
- pattern: str,
- filetree: Dict[str, Any],
- expected_matches: List[str],
- tmp_path: Path, tmp_run_dir: Callable
-) -> None:
- """Test that glob_in_run_dir() returns the minimal set of results with
- no redundant paths.
- """
- # Setup
- cylc_run_dir: Path = tmp_run_dir()
- reg = 'foo/bar'
- run_dir = cylc_run_dir / reg
- create_filetree(filetree, tmp_path, tmp_path)
- symlink_dirs = [run_dir / i for i in get_symlink_dirs(reg, run_dir)]
- expected = [tmp_path / i for i in expected_matches]
- # Test
- assert glob_in_run_dir(run_dir, pattern, symlink_dirs) == expected
-
-
-@pytest.fixture
-def filetree_for_testing_cylc_clean(tmp_path: Path):
- """Fixture that creates a filetree from the given dict, and returns which
- files are expected to be deleted and which aren't.
-
- See tests/unit/filetree.py
-
- Args:
- reg: Workflow name.
- initial_filetree: The filetree before cleaning.
- filetree_left_behind: The filetree that is expected to be left behind
- after cleaning, excluding the 'you-shall-not-pass/' directory,
- which is always expected to be left behind.
-
- Returns:
- run_dir: Workflow run dir.
- files_to_delete: List of files that are expected to be deleted.
- files_not_to_delete: List of files that are not expected to be deleted.
- """
- def _filetree_for_testing_cylc_clean(
- reg: str,
- initial_filetree: Dict[str, Any],
- filetree_left_behind: Dict[str, Any]
- ) -> Tuple[Path, List[str], List[str]]:
- create_filetree(initial_filetree, tmp_path, tmp_path)
- files_not_to_delete = [
- os.path.normpath(i) for i in
- iglob(str(tmp_path / 'you-shall-not-pass/**'), recursive=True)
- ]
- files_not_to_delete.extend(
- get_filetree_as_list(filetree_left_behind, tmp_path)
- )
- files_to_delete = list(
- set(get_filetree_as_list(initial_filetree, tmp_path)).difference(
- files_not_to_delete
- )
- )
- run_dir = tmp_path / 'cylc-run' / reg
- return run_dir, files_to_delete, files_not_to_delete
- return _filetree_for_testing_cylc_clean
-
-
-@pytest.mark.parametrize(
- 'pattern, initial_filetree, filetree_left_behind',
- [
- pytest.param(
- '**',
- FILETREE_1,
- {
- 'cylc-run': {'foo': {}},
- 'sym': {'cylc-run': {'foo': {'bar': {}}}}
- }
- ),
- pytest.param(
- '*/**',
- FILETREE_1,
- {
- 'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- 'rincewind.txt': Path('whatever')
- }}},
- 'sym': {'cylc-run': {'foo': {'bar': {}}}}
- }
- ),
- pytest.param(
- '**/*.txt',
- FILETREE_1,
- {
- 'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- 'log': Path('whatever'),
- 'mirkwood': Path('whatever')
- }}},
- 'sym': {'cylc-run': {'foo': {'bar': {
- 'log': {
- 'darmok': Path('whatever'),
- 'bib': {}
- }
- }}}}
- }
- )
- ]
-)
-def test__clean_using_glob(
- pattern: str,
- initial_filetree: Dict[str, Any],
- filetree_left_behind: Dict[str, Any],
- filetree_for_testing_cylc_clean: Callable
-) -> None:
- """Test _clean_using_glob(), particularly that it does not follow and
- delete symlinks (apart from the standard symlink dirs).
- Params:
- pattern: The glob pattern to test.
- initial_filetree: The filetree to test against.
- files_left_behind: The filetree expected to remain after
- _clean_using_glob() is called (excluding
- /you-shall-not-pass, which is always expected to remain).
- """
- # --- Setup ---
- run_dir: Path
- files_to_delete: List[str]
- files_not_to_delete: List[str]
- run_dir, files_to_delete, files_not_to_delete = (
- filetree_for_testing_cylc_clean(
- 'foo/bar', initial_filetree, filetree_left_behind)
- )
- # --- Test ---
- _clean_using_glob(run_dir, pattern, symlink_dirs=['log'])
- for file in files_not_to_delete:
- assert os.path.exists(file) is True
- for file in files_to_delete:
- assert os.path.lexists(file) is False
-
-
-@pytest.mark.parametrize(
- 'rm_dirs, initial_filetree, filetree_left_behind',
- [
- pytest.param(
- {'**'},
- FILETREE_1,
- {
- 'cylc-run': {},
- 'sym': {'cylc-run': {}}
- },
- id="filetree1 **"
- ),
- pytest.param(
- {'*/**'},
- FILETREE_1,
- {
- 'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- 'rincewind.txt': Path('whatever')
- }}},
- 'sym': {'cylc-run': {}}
- },
- id="filetree1 */**"
- ),
- pytest.param(
- {'**/*.txt'},
- FILETREE_1,
- {
- 'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- 'log': Path('whatever'),
- 'mirkwood': Path('whatever')
- }}},
- 'sym': {'cylc-run': {'foo': {'bar': {
- 'log': {
- 'darmok': Path('whatever'),
- 'bib': {}
- }
- }}}}
- },
- id="filetree1 **/*.txt"
- ),
- pytest.param(
- {'**/cycle'},
- FILETREE_2,
- {
- 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
- 'sym-run': {'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- 'share': Path('sym-share/cylc-run/foo/bar/share')
- }}}},
- 'sym-share': {'cylc-run': {'foo': {'bar': {
- 'share': {}
- }}}},
- 'sym-cycle': {'cylc-run': {}}
- },
- id="filetree2 **/cycle"
- ),
- pytest.param(
- {'share'},
- FILETREE_2,
- {
- 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
- 'sym-run': {'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- 'flow.cylc': None,
- }}}},
- 'sym-share': {'cylc-run': {}},
- 'sym-cycle': {'cylc-run': {'foo': {'bar': {
- 'share': {
- 'cycle': {
- 'macklunkey.txt': None
- }
- }
- }}}}
- },
- id="filetree2 share"
- ),
- pytest.param(
- {'**'},
- FILETREE_2,
- {
- 'cylc-run': {},
- 'sym-run': {'cylc-run': {}},
- 'sym-share': {'cylc-run': {}},
- 'sym-cycle': {'cylc-run': {}}
- },
- id="filetree2 **"
- ),
- pytest.param(
- {'*'},
- FILETREE_2,
- {
- 'cylc-run': {'foo': {'bar': Path('sym-run/cylc-run/foo/bar')}},
- 'sym-run': {'cylc-run': {'foo': {'bar': {
- '.service': {'db': None},
- }}}},
- 'sym-share': {'cylc-run': {}},
- 'sym-cycle': {'cylc-run': {'foo': {'bar': {
- 'share': {
- 'cycle': {
- 'macklunkey.txt': None
- }
- }
- }}}}
- },
- id="filetree2 *"
- ),
- pytest.param( # Check https://bugs.python.org/issue35201 has no effect
- {'non-exist/**'},
- FILETREE_2,
- FILETREE_2,
- id="filetree2 non-exist/**"
- ),
- pytest.param(
- {'**'},
- FILETREE_3,
- {
- 'cylc-run': {},
- 'sym-run': {'cylc-run': {}},
- 'sym-cycle': {'cylc-run': {}},
- },
- id="filetree3 **"
- ),
- pytest.param(
- {'**'},
- FILETREE_4,
- {
- 'cylc-run': {},
- 'sym-cycle': {'cylc-run': {}},
- },
- id="filetree4 **"
- )
- ],
-)
-def test_clean__targeted(
- rm_dirs: Set[str],
- initial_filetree: Dict[str, Any],
- filetree_left_behind: Dict[str, Any],
- caplog: pytest.LogCaptureFixture, tmp_run_dir: Callable,
- filetree_for_testing_cylc_clean: Callable
-) -> None:
- """Test clean(), particularly that it does not follow and delete symlinks
- (apart from the standard symlink dirs).
-
- This is similar to test__clean_using_glob(), but the filetree expected to
- remain after cleaning is different due to the tidy up of empty dirs.
-
- Params:
- rm_dirs: The glob patterns to test.
- initial_filetree: The filetree to test against.
- files_left_behind: The filetree expected to remain after
- clean() is called (excluding /you-shall-not-pass,
- which is always expected to remain).
- """
- # --- Setup ---
- caplog.set_level(logging.DEBUG, CYLC_LOG)
- tmp_run_dir()
- reg = 'foo/bar'
- run_dir: Path
- files_to_delete: List[str]
- files_not_to_delete: List[str]
- run_dir, files_to_delete, files_not_to_delete = (
- filetree_for_testing_cylc_clean(
- reg, initial_filetree, filetree_left_behind)
- )
- # --- Test ---
- workflow_files.clean(reg, run_dir, rm_dirs)
- for file in files_not_to_delete:
- assert os.path.exists(file) is True
- for file in files_to_delete:
- assert os.path.lexists(file) is False
-
-
-@pytest.mark.parametrize(
- 'rm_dirs',
- [
- [".."],
- ["foo:.."],
- ["foo/../../meow"]
- ]
-)
-def test_init_clean__targeted_bad(
- rm_dirs: List[str],
- tmp_run_dir: Callable,
- monkeymock: MonkeyMock
-):
- """Test init_clean() fails when abusing --rm option."""
- tmp_run_dir('chalmers')
- mock_clean = monkeymock('cylc.flow.workflow_files.clean')
- mock_remote_clean = monkeymock('cylc.flow.workflow_files.remote_clean')
- with pytest.raises(InputError) as exc_info:
- init_clean('chalmers', opts=CleanOptions(rm_dirs=rm_dirs))
- assert "cannot take paths that point to the run directory or above" in str(
- exc_info.value
- )
- mock_clean.assert_not_called()
- mock_remote_clean.assert_not_called()
-
-
-PLATFORMS = {
- 'enterprise': {
- 'hosts': ['kirk', 'picard'],
- 'install target': 'picard',
- 'name': 'enterprise'
- },
- 'voyager': {
- 'hosts': ['janeway'],
- 'install target': 'janeway',
- 'name': 'voyager'
- },
- 'stargazer': {
- 'hosts': ['picard'],
- 'install target': 'picard',
- 'name': 'stargazer'
- },
- 'exeter': {
- 'hosts': ['localhost'],
- 'install target': 'localhost',
- 'name': 'exeter'
- }
-}
-
-
-@pytest.mark.parametrize(
- ('install_targets_map', 'failed_platforms', 'expected_platforms',
- 'exc_expected', 'expected_err_msgs'),
- [
- pytest.param(
- {'localhost': [PLATFORMS['exeter']]}, None, None, False, [],
- id="Only localhost install target - no remote clean"
- ),
- pytest.param(
- {
- 'localhost': [PLATFORMS['exeter']],
- 'picard': [PLATFORMS['enterprise']]
- },
- None, ['enterprise'], False, [],
- id="Localhost and remote install target"
- ),
- pytest.param(
- {
- 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
- 'janeway': [PLATFORMS['voyager']]
- },
- None, ['enterprise', 'voyager'], False, [],
- id="Only remote install targets"
- ),
- pytest.param(
- {
- 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
- 'janeway': [PLATFORMS['voyager']]
- },
- {'enterprise': 255},
- ['enterprise', 'stargazer', 'voyager'],
- False,
- [],
- id="Install target with 1 failed, 1 successful platform"
- ),
- pytest.param(
- {
- 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']],
- 'janeway': [PLATFORMS['voyager']]
- },
- {'enterprise': 255, 'stargazer': 255},
- ['enterprise', 'stargazer', 'voyager'],
- True,
- ["Could not clean foo on install target: picard"],
- id="Install target with all failed platforms"
- ),
- pytest.param(
- {
- 'picard': [PLATFORMS['enterprise']],
- 'janeway': [PLATFORMS['voyager']]
- },
- {'enterprise': 255, 'voyager': 255},
- ['enterprise', 'voyager'],
- True,
- ["Could not clean foo on install target: picard",
- "Could not clean foo on install target: janeway"],
- id="All install targets have all failed platforms"
- ),
- pytest.param(
- {
- 'picard': [PLATFORMS['enterprise'], PLATFORMS['stargazer']]
- },
- {'enterprise': 1},
- ['enterprise'],
- True,
- ["Could not clean foo on install target: picard"],
- id=("Remote clean cmd fails on a platform for non-SSH reason - "
- "does not retry")
- ),
- ]
-)
-def test_remote_clean(
- install_targets_map: Dict[str, Any],
- failed_platforms: Optional[Dict[str, int]],
- expected_platforms: Optional[List[str]],
- exc_expected: bool,
- expected_err_msgs: List[str],
- monkeymock: MonkeyMock, monkeypatch: pytest.MonkeyPatch,
- caplog: pytest.LogCaptureFixture, log_filter: Callable
-) -> None:
- """Test remote_clean() logic.
-
- Params:
- install_targets_map The map that would be returned by
- platforms.get_install_target_to_platforms_map()
- failed_platforms: If specified, any platforms that clean will
- artificially fail on in this test case. The key is the platform
- name, the value is the remote clean cmd return code.
- expected_platforms: If specified, all the platforms that the
- remote clean cmd is expected to run on.
- exc_expected: If a CylcError is expected to be raised.
- expected_err_msgs: List of error messages expected to be in the log.
- """
- # ----- Setup -----
- caplog.set_level(logging.DEBUG, CYLC_LOG)
- monkeypatch.setattr(
- 'cylc.flow.workflow_files.get_install_target_to_platforms_map',
- lambda x: install_targets_map)
- # Remove randomness:
- monkeymock('cylc.flow.workflow_files.shuffle')
-
- def mocked_remote_clean_cmd_side_effect(reg, platform, rm_dirs, timeout):
- proc_ret_code = 0
- if failed_platforms and platform['name'] in failed_platforms:
- proc_ret_code = failed_platforms[platform['name']]
- return mock.Mock(
- poll=lambda: proc_ret_code,
- communicate=lambda: ("Mocked stdout", "Mocked stderr"),
- args=[]
- )
-
- mocked_remote_clean_cmd = monkeymock(
- 'cylc.flow.workflow_files._remote_clean_cmd',
- spec=_remote_clean_cmd,
- side_effect=mocked_remote_clean_cmd_side_effect)
- rm_dirs = ["whatever"]
- # ----- Test -----
- reg = 'foo'
- platform_names = (
- "This arg bypassed as we provide the install targets map in the test")
- if exc_expected:
- with pytest.raises(CylcError) as exc:
- workflow_files.remote_clean(
- reg, platform_names, rm_dirs, timeout='irrelevant')
- assert "Remote clean failed" in str(exc.value)
- else:
- workflow_files.remote_clean(
- reg, platform_names, rm_dirs, timeout='irrelevant')
- for msg in expected_err_msgs:
- assert log_filter(caplog, level=logging.ERROR, contains=msg)
- if expected_platforms:
- for p_name in expected_platforms:
- mocked_remote_clean_cmd.assert_any_call(
- reg, PLATFORMS[p_name], rm_dirs, 'irrelevant')
- else:
- mocked_remote_clean_cmd.assert_not_called()
- if failed_platforms:
- for p_name in failed_platforms:
- assert f"{p_name} - {PlatformError.MSG_TIDY}" in caplog.text
-
-
-@pytest.mark.parametrize(
- 'rm_dirs, expected_args',
- [
- (None, []),
- (['holodeck', 'ten_forward'],
- ['--rm', 'holodeck', '--rm', 'ten_forward'])
- ]
-)
-def test_remote_clean_cmd(
- rm_dirs: Optional[List[str]],
- expected_args: List[str],
- monkeymock: MonkeyMock
-) -> None:
- """Test _remote_clean_cmd()
-
- Params:
- rm_dirs: Argument passed to _remote_clean_cmd().
- expected_args: Expected CLI arguments of the cylc clean command that
- gets constructed.
- """
- reg = 'jean/luc/picard'
- platform = {
- 'name': 'enterprise',
- 'install target': 'mars',
- 'hosts': ['Trill'],
- 'selection': {'method': 'definition order'}
- }
- mock_construct_ssh_cmd = monkeymock(
- 'cylc.flow.workflow_files.construct_ssh_cmd', return_value=['blah'])
- monkeymock('cylc.flow.workflow_files.Popen')
-
- workflow_files._remote_clean_cmd(reg, platform, rm_dirs, timeout='dunno')
- args, kwargs = mock_construct_ssh_cmd.call_args
- constructed_cmd = args[0]
- assert constructed_cmd == ['clean', '--local-only', reg, *expected_args]
-
-
-def test_clean_top_level(tmp_run_dir: Callable):
- """Test that cleaning last remaining run dir inside a workflow dir removes
- the top level dir if it's empty (excluding _cylc-install)."""
- # Setup
- reg = 'blue/planet/run1'
- run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
- cylc_install_dir = run_dir.parent / WorkflowFiles.Install.DIRNAME
- assert cylc_install_dir.is_dir()
- runN_symlink = run_dir.parent / WorkflowFiles.RUN_N
- assert runN_symlink.exists()
- # Test
- clean(reg, run_dir)
- assert not run_dir.parent.parent.exists()
- # Now check that if the top level dir is not empty, it doesn't get removed
- run_dir: Path = tmp_run_dir(reg, installed=True, named=True)
- jellyfish_file = (run_dir.parent / 'jellyfish.txt')
- jellyfish_file.touch()
- clean(reg, run_dir)
- assert cylc_install_dir.is_dir()
- assert jellyfish_file.exists()
-
-
-def test_get_workflow_source_dir_numbered_run(tmp_path):
- """Test get_workflow_source_dir returns correct source for numbered run"""
- cylc_install_dir = (
- tmp_path /
- "cylc-run" /
- "flow-name" /
- "_cylc-install")
- cylc_install_dir.mkdir(parents=True)
- run_dir = (tmp_path / "cylc-run" / "flow-name" / "run1")
- run_dir.mkdir()
- source_dir = (tmp_path / "cylc-source" / "flow-name")
- source_dir.mkdir(parents=True)
- assert get_workflow_source_dir(run_dir) == (None, None)
- (cylc_install_dir / "source").symlink_to(source_dir)
- assert get_workflow_source_dir(run_dir) == (
- str(source_dir), cylc_install_dir / "source")
-
-
-def test_get_workflow_source_dir_named_run(tmp_path):
- """Test get_workflow_source_dir returns correct source for named run"""
- cylc_install_dir = (
- tmp_path /
- "cylc-run" /
- "flow-name" /
- "_cylc-install")
- cylc_install_dir.mkdir(parents=True)
- source_dir = (tmp_path / "cylc-source" / "flow-name")
- source_dir.mkdir(parents=True)
- (cylc_install_dir / "source").symlink_to(source_dir)
- assert get_workflow_source_dir(
- cylc_install_dir.parent) == (
- str(source_dir),
- cylc_install_dir / "source")
-
-
-def test_reinstall_workflow(tmp_path, capsys):
-
- cylc_install_dir = (
- tmp_path /
- "cylc-run" /
- "flow-name" /
- "_cylc-install")
- cylc_install_dir.mkdir(parents=True)
- source_dir = (tmp_path / "cylc-source" / "flow-name")
- source_dir.mkdir(parents=True)
- (source_dir / "flow.cylc").touch()
-
- (cylc_install_dir / "source").symlink_to(source_dir)
- run_dir = cylc_install_dir.parent
- reinstall_workflow(source_dir, "flow-name", run_dir)
- assert capsys.readouterr().out == (
- f"REINSTALLED flow-name from {source_dir}\n")
-
-
-@pytest.mark.parametrize(
- 'filename, expected_err',
- [('flow.cylc', None),
- ('suite.rc', None),
- ('fluff.txt', (WorkflowFilesError, "Could not find workflow 'baa/baa'"))]
-)
-def test_search_install_source_dirs(
- filename: str, expected_err: Optional[Tuple[Type[Exception], str]],
- tmp_path: Path, mock_glbl_cfg: Callable):
- """Test search_install_source_dirs().
-
- Params:
- filename: A file to insert into one of the source dirs.
- expected_err: Exception and message expected to be raised.
- """
- horse_dir = tmp_path / 'horse'
- horse_dir.mkdir()
- sheep_dir = tmp_path / 'sheep'
- source_dir = sheep_dir / 'baa' / 'baa'
- source_dir.mkdir(parents=True)
- source_dir_file = source_dir / filename
- source_dir_file.touch()
- mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
- f'''
- [install]
- source dirs = {horse_dir}, {sheep_dir}
- '''
- )
- if expected_err:
- err, msg = expected_err
- with pytest.raises(err) as exc:
- search_install_source_dirs('baa/baa')
- assert msg in str(exc.value)
- else:
- ret = search_install_source_dirs('baa/baa')
- assert ret == source_dir
- assert ret.is_absolute()
-
-
-def test_search_install_source_dirs_empty(mock_glbl_cfg: Callable):
- """Test search_install_source_dirs() when no source dirs configured."""
- mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
- '''
- [install]
- source dirs =
- '''
- )
- with pytest.raises(WorkflowFilesError) as exc:
- search_install_source_dirs('foo')
- assert str(exc.value) == (
- "Cannot find workflow as 'global.cylc[install]source dirs' "
- "does not contain any paths")
-
-
-@pytest.mark.parametrize(
- 'path, expected',
- [
- ('~/isla/nublar/dennis/nedry', 'dennis/nedry'),
- ('~/isla/sorna/paul/kirby', 'paul/kirby'),
- ('~/mos/eisley/owen/skywalker', 'skywalker')
- ]
-)
-def test_get_source_workflow_name(
- path: str,
- expected: str,
- mock_glbl_cfg: Callable
-):
- mock_glbl_cfg(
- 'cylc.flow.workflow_files.glbl_cfg',
- '''
- [install]
- source dirs = ~/isla/nublar, ${HOME}/isla/sorna
- '''
- )
- assert get_source_workflow_name(
- Path(path).expanduser().resolve()) == expected
@pytest.mark.parametrize(
@@ -1736,51 +483,6 @@ def test_check_flow_file_symlink(
assert result == run_dir / expected_file
-@pytest.mark.parametrize(
- 'symlink_dirs, err_msg, expected',
- [
- ('log=$shortbread, share= $bourbon,share/cycle= $digestive, ',
- "There is an error in --symlink-dirs option:",
- None
- ),
- ('log=$shortbread share= $bourbon share/cycle= $digestive ',
- "There is an error in --symlink-dirs option:"
- " log=$shortbread share= $bourbon share/cycle= $digestive . "
- "Try entering option in the form --symlink-dirs="
- "'log=$DIR, share=$DIR2, ...'",
- None
- ),
- ('run=$NICE, log= $Garibaldi, share/cycle=$RichTea', None,
- {'localhost': {
- 'run': '$NICE',
- 'log': '$Garibaldi',
- 'share/cycle': '$RichTea'
- }}
- ),
- ('some_other_dir=$bourbon',
- 'some_other_dir not a valid entry for --symlink-dirs',
- {'some_other_dir': '£bourbon'}
- ),
- ]
-)
-def test_parse_cli_sym_dirs(
- symlink_dirs: str,
- err_msg: str,
- expected: Dict[str, Dict[str, Any]]
-):
- """Test parse_cli_sym_dirs returns dict or correctly raises errors on cli
- symlink dir options"""
- if err_msg is not None:
- with pytest.raises(InputError) as exc:
- parse_cli_sym_dirs(symlink_dirs)
- assert(err_msg) in str(exc)
-
- else:
- actual = parse_cli_sym_dirs(symlink_dirs)
-
- assert actual == expected
-
-
@pytest.mark.parametrize(
'reg, installed, named, expected',
[('reg1/run1', True, True, True),
@@ -1794,256 +496,6 @@ def test_is_installed(tmp_run_dir: Callable, reg, installed, named, expected):
assert actual == expected
-def test_get_rsync_rund_cmd(
- tmp_src_dir: Callable,
- tmp_run_dir: Callable
-):
- """Test rsync command for cylc install/reinstall excludes cylc dirs.
- """
- src_dir = tmp_src_dir('foo')
- cylc_run_dir: Path = tmp_run_dir('rsync_flow', installed=True, named=False)
- for wdir in [
- WorkflowFiles.WORK_DIR,
- WorkflowFiles.SHARE_DIR,
- WorkflowFiles.LogDir.DIRNAME,
- ]:
- cylc_run_dir.joinpath(wdir).mkdir(exist_ok=True)
- actual_cmd = get_rsync_rund_cmd(src_dir, cylc_run_dir)
- assert actual_cmd == [
- 'rsync', '-a', '--checksum', '--out-format=%o %n%L', '--no-t',
- '--exclude=/log', '--exclude=/work', '--exclude=/share',
- '--exclude=/_cylc-install', '--exclude=/.service',
- f'{src_dir}/', f'{cylc_run_dir}/']
-
-
-@pytest.mark.parametrize(
- 'expect, dirs',
- [
- (['run1'], ['run1', 'run2']),
- (['run1', 'run11'], ['run1', 'run11', 'run2']),
- (['run1200'], ['run1200', 'run1201']),
- (['foo'], ['foo', 'bar']),
- ]
-)
-def test_delete_runN(tmp_path, expect, dirs):
- """It deletes the runN symlink.
- """
- for dir_ in dirs:
- (tmp_path / dir_).mkdir()
- if re.findall(r'run\d*', dirs[-1]):
- (Path(tmp_path / 'runN')).symlink_to(dirs[-1])
- clean(str(tmp_path.name) + '/' + dirs[-1], tmp_path / dirs[-1])
- assert sorted([i.stem for i in tmp_path.glob('*')]) == sorted(expect)
-
-
-def test_delete_runN_skipif_cleanedrun_not_runN(tmp_path):
- """It doesn't delete the symlink dir to be cleaned is not runN"""
- for folder in ['run1', 'run2']:
- (tmp_path / folder).mkdir()
- (tmp_path / 'runN').symlink_to(tmp_path / 'run2')
- clean(str(tmp_path.name) + '/' + 'run1', tmp_path / 'run1')
- assert sorted([i.stem for i in tmp_path.glob('*')]) == ['run2', 'runN']
-
-
-@pytest.mark.parametrize(
- 'workflow_name, err_expected',
- [
- ('foo/' * (MAX_SCAN_DEPTH - 1), False),
- ('foo/' * MAX_SCAN_DEPTH, True) # /run1 takes it beyond max depth
- ]
-)
-def test_install_workflow__max_depth(
- workflow_name: str,
- err_expected: bool,
- tmp_run_dir: Callable,
- tmp_src_dir: Callable,
- glbl_cfg_max_scan_depth: NonCallableFixture
-):
- """Test that trying to install beyond max depth fails."""
- tmp_run_dir()
- src_dir = tmp_src_dir('bar')
- if err_expected:
- with pytest.raises(WorkflowFilesError) as exc_info:
- install_workflow(src_dir, workflow_name)
- assert "would exceed global.cylc[install]max depth" in str(
- exc_info.value
- )
- else:
- install_workflow(src_dir, workflow_name)
-
-
-@pytest.mark.parametrize(
- 'flow_file, expected_exc',
- [
- (WorkflowFiles.FLOW_FILE, WorkflowFilesError),
- (WorkflowFiles.SUITE_RC, WorkflowFilesError),
- (None, None)
- ]
-)
-def test_install_workflow__next_to_flow_file(
- flow_file: Optional[str],
- expected_exc: Optional[Type[Exception]],
- tmp_run_dir: Callable,
- tmp_src_dir: Callable
-):
- """Test that you can't install into a dir that contains a workflow file."""
- # Setup
- cylc_run_dir: Path = tmp_run_dir()
- workflow_dir = cylc_run_dir / 'faden'
- workflow_dir.mkdir()
- src_dir: Path = tmp_src_dir('faden')
- if flow_file:
- (workflow_dir / flow_file).touch()
- # Test
- if expected_exc:
- with pytest.raises(expected_exc) as exc_info:
- install_workflow(src_dir, 'faden')
- assert "Nested run directories not allowed" in str(exc_info.value)
- else:
- install_workflow(src_dir, 'faden')
-
-
-def test_install_workflow__symlink_target_exists(
- tmp_path: Path,
- tmp_src_dir: Callable,
- tmp_run_dir: Callable,
- mock_glbl_cfg: Callable,
-):
- """Test that you can't install workflow when run dir symlink dir target
- already exists."""
- reg = 'smeagol'
- src_dir: Path = tmp_src_dir(reg)
- tmp_run_dir()
- sym_run = tmp_path / 'sym-run'
- sym_log = tmp_path / 'sym-log'
- mock_glbl_cfg(
- 'cylc.flow.pathutil.glbl_cfg',
- f'''
- [install]
- [[symlink dirs]]
- [[[localhost]]]
- run = {sym_run}
- log = {sym_log}
- '''
- )
- msg = "Symlink dir target already exists: .*{}"
- # Test:
- (sym_run / 'cylc-run' / reg / 'run1').mkdir(parents=True)
- with pytest.raises(WorkflowFilesError, match=msg.format(sym_run)):
- install_workflow(src_dir)
-
- shutil.rmtree(sym_run)
- (
- sym_log / 'cylc-run' / reg / 'run1' / WorkflowFiles.LogDir.DIRNAME
- ).mkdir(parents=True)
- with pytest.raises(WorkflowFilesError, match=msg.format(sym_log)):
- install_workflow(src_dir)
-
-
-def test_validate_source_dir(tmp_run_dir: Callable, tmp_src_dir: Callable):
- cylc_run_dir: Path = tmp_run_dir()
- src_dir: Path = tmp_src_dir('ludlow')
- validate_source_dir(src_dir, 'ludlow')
- # Test that src dir must have flow file
- (src_dir / WorkflowFiles.FLOW_FILE).unlink()
- with pytest.raises(WorkflowFilesError):
- validate_source_dir(src_dir, 'ludlow')
- # Test that reserved dirnames not allowed in src dir
- src_dir = tmp_src_dir('roland')
- (src_dir / 'log').mkdir()
- with pytest.raises(WorkflowFilesError) as exc_info:
- validate_source_dir(src_dir, 'roland')
- assert "exists in source directory" in str(exc_info.value)
- # Test that src dir is allowed to be inside ~/cylc-run
- src_dir = cylc_run_dir / 'dieter'
- src_dir.mkdir()
- (src_dir / WorkflowFiles.FLOW_FILE).touch()
- validate_source_dir(src_dir, 'dieter')
- # Test that src dir is not allowed to be an installed dir.
- src_dir = cylc_run_dir / 'ajay'
- src_dir.mkdir()
- (src_dir / WorkflowFiles.Install.DIRNAME).mkdir()
- (src_dir / WorkflowFiles.FLOW_FILE).touch()
- with pytest.raises(WorkflowFilesError) as exc_info:
- validate_source_dir(src_dir, 'ajay')
- assert "exists in source directory" in str(exc_info.value)
-
-
-@pytest.mark.parametrize(
- 'args, expected_relink, expected_run_num, expected_run_dir',
- [
- (
- ['{cylc_run}/numbered', None, False],
- True, 1, '{cylc_run}/numbered/run1'
- ),
- (
- ['{cylc_run}/named', 'dukat', False],
- False, None, '{cylc_run}/named/dukat'
- ),
- (
- ['{cylc_run}/unnamed', None, True],
- False, None, '{cylc_run}/unnamed'
- ),
- ]
-)
-def test_get_run_dir_info(
- args: list,
- expected_relink: bool,
- expected_run_num: Optional[int],
- expected_run_dir: Union[Path, str],
- tmp_run_dir: Callable
-):
- """Test get_run_dir_info().
-
- Params:
- args: Input args to function.
- expected_*: Expected return values.
- """
- # Setup
- cylc_run_dir: Path = tmp_run_dir()
- sub = lambda x: Path(x.format(cylc_run=cylc_run_dir)) # noqa: E731
- args[0] = sub(args[0])
- expected_run_dir = sub(expected_run_dir)
- # Test
- assert get_run_dir_info(*args) == (
- expected_relink, expected_run_num, expected_run_dir
- )
- assert expected_run_dir.is_absolute()
-
-
-def test_get_run_dir_info__increment_run_num(tmp_run_dir: Callable):
- """Test that get_run_dir_info() increments run number and unlinks runN."""
- # Setup
- cylc_run_dir: Path = tmp_run_dir()
- run_dir: Path = tmp_run_dir('gowron/run1')
- runN = run_dir.parent / WorkflowFiles.RUN_N
- assert os.path.lexists(runN)
- # Test
- assert get_run_dir_info(cylc_run_dir / 'gowron', None, False) == (
- True, 2, cylc_run_dir / 'gowron' / 'run2'
- )
- assert not os.path.lexists(runN)
-
-
-def test_get_run_dir_info__fail(tmp_run_dir: Callable):
- # Test that you can't install named runs when numbered runs exist
- cylc_run_dir: Path = tmp_run_dir()
- run_dir: Path = tmp_run_dir('martok/run1')
- with pytest.raises(WorkflowFilesError) as excinfo:
- get_run_dir_info(run_dir.parent, 'targ', False)
- assert "contains installed numbered runs" in str(excinfo.value)
-
- # Test that you can install numbered run in an empty dir
- base_dir = cylc_run_dir / 'odo'
- base_dir.mkdir()
- get_run_dir_info(base_dir, None, False)
- # But not when named runs exist
- tmp_run_dir('odo/meter')
- with pytest.raises(WorkflowFilesError) as excinfo:
- get_run_dir_info(base_dir, None, False)
- assert "contains an installed workflow"
-
-
def test_validate_abort_if_flow_file_in_path():
assert abort_if_flow_file_in_path(Path("path/to/wflow")) is None
with pytest.raises(InputError) as exc_info:
diff --git a/tests/unit/test_xtrigger_mgr.py b/tests/unit/test_xtrigger_mgr.py
index ec547364f0c..89cc0024ddf 100644
--- a/tests/unit/test_xtrigger_mgr.py
+++ b/tests/unit/test_xtrigger_mgr.py
@@ -20,6 +20,7 @@
from cylc.flow import CYLC_LOG
from cylc.flow.cycling.iso8601 import ISO8601Point, ISO8601Sequence, init
from cylc.flow.exceptions import XtriggerConfigError
+from cylc.flow.id import Tokens
from cylc.flow.subprocctx import SubFuncContext
from cylc.flow.task_proxy import TaskProxy
from cylc.flow.taskdef import TaskDef
@@ -158,7 +159,7 @@ def test_housekeeping_with_xtrigger_satisfied(xtrigger_mgr):
sequence = ISO8601Sequence('P1D', '2019')
tdef.xtrig_labels[sequence] = ["get_name"]
start_point = ISO8601Point('2019')
- itask = TaskProxy(tdef, start_point)
+ itask = TaskProxy(Tokens('~user/workflow'), tdef, start_point)
# pretend the function has been activated
xtrigger_mgr.active.append(xtrig.get_signature())
xtrigger_mgr.callback(xtrig)
@@ -205,7 +206,7 @@ def test__call_xtriggers_async(xtrigger_mgr):
init()
start_point = ISO8601Point('2019')
# create task proxy
- itask = TaskProxy(tdef, start_point)
+ itask = TaskProxy(Tokens('~user/workflow'), tdef, start_point)
# we start with no satisfied xtriggers, and nothing active
assert len(xtrigger_mgr.sat_xtrig) == 0
@@ -306,7 +307,7 @@ def test_check_xtriggers(xtrigger_mgr):
sequence = ISO8601Sequence('P1D', '2019')
tdef1.xtrig_labels[sequence] = ["get_name"]
start_point = ISO8601Point('2019')
- itask1 = TaskProxy(tdef1, start_point)
+ itask1 = TaskProxy(Tokens('~user/workflow'), tdef1, start_point)
itask1.state.xtriggers["get_name"] = False # satisfied?
# add a clock xtrigger
@@ -330,7 +331,7 @@ def test_check_xtriggers(xtrigger_mgr):
init()
start_point = ISO8601Point('20000101T0000+05')
# create task proxy
- TaskProxy(tdef2, start_point)
+ TaskProxy(Tokens('~user/workflow'), tdef2, start_point)
xtrigger_mgr.check_xtriggers(itask1, lambda foo: None)
# won't be satisfied, as it is async, we are are not calling callback