diff --git a/changes.d/6039.feat.md b/changes.d/6039.feat.md new file mode 100644 index 00000000000..d3a39814e2c --- /dev/null +++ b/changes.d/6039.feat.md @@ -0,0 +1 @@ +Add a new mode task run mode "skip" which overrides workflow live mode task submission. \ No newline at end of file diff --git a/cylc/flow/cfgspec/workflow.py b/cylc/flow/cfgspec/workflow.py index 934897bdbb4..dce1b0316a0 100644 --- a/cylc/flow/cfgspec/workflow.py +++ b/cylc/flow/cfgspec/workflow.py @@ -57,6 +57,8 @@ fail_if_platform_and_host_conflict, get_platform_deprecated_settings, is_platform_definition_subshell) from cylc.flow.task_events_mgr import EventData +from cylc.flow.task_state import RunMode + # Regex to check whether a string is a command REC_COMMAND = re.compile(r'(`|\$\()\s*(.*)\s*([`)])$') @@ -1334,6 +1336,27 @@ def get_script_common_text(this: str, example: Optional[str] = None): "[platforms][]submission retry delays" ) ) + Conf( + 'run mode', VDR.V_STRING, + options=list(RunMode.OVERRIDING_MODES.value) + [''], + default='', + desc=f''' + For a workflow run in live mode run this task in skip + mode. + + {RunMode.LIVE.value}: + {RunMode.LIVE.describe()} + {RunMode.SKIP.value}: + {RunMode.SKIP.describe()} + + + .. seealso:: + + :ref:`task-run-modes` + + .. versionadded:: 8.4.0 + + ''') with Conf('meta', desc=r''' Metadata for the task or task family. @@ -1406,7 +1429,43 @@ def get_script_common_text(this: str, example: Optional[str] = None): determine how an event handler responds to task failure events. ''') + with Conf('skip', desc=''' + Task configuration for task :ref:`task-run-modes.skip`. + For a full description of skip run mode see + :ref:`task-run-modes.skip`. + + .. versionadded:: 8.4.0 + '''): + Conf( + 'outputs', + VDR.V_STRING_LIST, + desc=''' + Outputs to be emitted by a task in skip mode. + + * By default, all required outputs will be generated + plus succeeded if success is optional. + * If skip-mode outputs is specified and does not + include either succeeded or failed then succeeded + will be produced. + * The outputs submitted and started are always + produced and do not need to be defined in outputs. + + .. versionadded:: 8.4.0 + ''' + ) + Conf( + 'disable task event handlers', + VDR.V_BOOLEAN, + default=True, + desc=''' + Task event handlers are turned off by default for + skip mode tasks. Changing this setting to ``False`` + will re-enable task event handlers. + + .. versionadded:: 8.4.0 + ''' + ) with Conf('simulation', desc=''' Task configuration for workflow *simulation* and *dummy* run modes. diff --git a/cylc/flow/commands.py b/cylc/flow/commands.py index 173984f17e0..5e013d89c2c 100644 --- a/cylc/flow/commands.py +++ b/cylc/flow/commands.py @@ -77,8 +77,9 @@ from cylc.flow.network.schema import WorkflowStopMode from cylc.flow.parsec.exceptions import ParsecError from cylc.flow.task_id import TaskID -from cylc.flow.task_state import TASK_STATUSES_ACTIVE, TASK_STATUS_FAILED -from cylc.flow.workflow_status import RunMode, StopMode +from cylc.flow.task_state import ( + TASK_STATUSES_ACTIVE, TASK_STATUS_FAILED, RunMode) +from cylc.flow.workflow_status import StopMode from metomi.isodatetime.parsers import TimePointParser @@ -247,7 +248,7 @@ async def poll_tasks(schd: 'Scheduler', tasks: Iterable[str]): """Poll pollable tasks or a task or family if options are provided.""" validate.is_tasks(tasks) yield - if schd.get_run_mode() == RunMode.SIMULATION: + if schd.get_run_mode() == RunMode.SIMULATION.value: yield 0 itasks, _, bad_items = schd.pool.filter_task_proxies(tasks) schd.task_job_mgr.poll_task_jobs(schd.workflow, itasks) @@ -260,7 +261,7 @@ async def kill_tasks(schd: 'Scheduler', tasks: Iterable[str]): validate.is_tasks(tasks) yield itasks, _, bad_items = schd.pool.filter_task_proxies(tasks) - if schd.get_run_mode() == RunMode.SIMULATION: + if schd.get_run_mode() == RunMode.SIMULATION.value: for itask in itasks: if itask.state(*TASK_STATUSES_ACTIVE): itask.state_reset(TASK_STATUS_FAILED) diff --git a/cylc/flow/config.py b/cylc/flow/config.py index df27078ee44..9ae2a3e132c 100644 --- a/cylc/flow/config.py +++ b/cylc/flow/config.py @@ -82,7 +82,7 @@ ) from cylc.flow.print_tree import print_tree from cylc.flow.task_qualifiers import ALT_QUALIFIERS -from cylc.flow.simulation import configure_sim_modes +from cylc.flow.run_modes.nonlive import run_mode_validate_checks from cylc.flow.subprocctx import SubFuncContext from cylc.flow.task_events_mgr import ( EventData, @@ -99,6 +99,7 @@ get_trigger_completion_variable_maps, trigger_to_completion_variable, ) +from cylc.flow.task_state import RunMode from cylc.flow.task_trigger import TaskTrigger, Dependency from cylc.flow.taskdef import TaskDef from cylc.flow.unicode_rules import ( @@ -114,7 +115,6 @@ WorkflowFiles, check_deprecation, ) -from cylc.flow.workflow_status import RunMode from cylc.flow.xtrigger_mgr import XtriggerCollator if TYPE_CHECKING: @@ -513,10 +513,6 @@ def __init__( self.process_runahead_limit() - run_mode = self.run_mode() - if run_mode in {RunMode.SIMULATION, RunMode.DUMMY}: - configure_sim_modes(self.taskdefs.values(), run_mode) - self.configure_workflow_state_polling_tasks() self._check_task_event_handlers() @@ -567,6 +563,8 @@ def __init__( self.mem_log("config.py: end init config") + run_mode_validate_checks(self.taskdefs) + @staticmethod def _warn_if_queues_have_implicit_tasks( config, taskdefs, max_warning_lines diff --git a/cylc/flow/data_messages.proto b/cylc/flow/data_messages.proto index c0af5094c0d..f259a735f0a 100644 --- a/cylc/flow/data_messages.proto +++ b/cylc/flow/data_messages.proto @@ -128,6 +128,7 @@ message PbRuntime { optional string environment = 16; optional string outputs = 17; optional string completion = 18; + optional string run_mode = 19; } diff --git a/cylc/flow/data_messages_pb2.py b/cylc/flow/data_messages_pb2.py index 7fb5ae84d24..0f16888d6bd 100644 --- a/cylc/flow/data_messages_pb2.py +++ b/cylc/flow/data_messages_pb2.py @@ -14,7 +14,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\xe1\x06\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x17\n\ncompletion\x18\x12 \x01(\tH\x11\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputsB\r\n\x0b_completion\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13\x64\x61ta_messages.proto\"\x96\x01\n\x06PbMeta\x12\x12\n\x05title\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x18\n\x0b\x64\x65scription\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x10\n\x03URL\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x19\n\x0cuser_defined\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_titleB\x0e\n\x0c_descriptionB\x06\n\x04_URLB\x0f\n\r_user_defined\"\xaa\x01\n\nPbTimeZone\x12\x12\n\x05hours\x18\x01 \x01(\x05H\x00\x88\x01\x01\x12\x14\n\x07minutes\x18\x02 \x01(\x05H\x01\x88\x01\x01\x12\x19\n\x0cstring_basic\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1c\n\x0fstring_extended\x18\x04 \x01(\tH\x03\x88\x01\x01\x42\x08\n\x06_hoursB\n\n\x08_minutesB\x0f\n\r_string_basicB\x12\n\x10_string_extended\"\'\n\x0fPbTaskProxyRefs\x12\x14\n\x0ctask_proxies\x18\x01 \x03(\t\"\xd4\x0c\n\nPbWorkflow\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06status\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x11\n\x04host\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x11\n\x04port\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x12\n\x05owner\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\r\n\x05tasks\x18\x08 \x03(\t\x12\x10\n\x08\x66\x61milies\x18\t \x03(\t\x12\x1c\n\x05\x65\x64ges\x18\n \x01(\x0b\x32\x08.PbEdgesH\x07\x88\x01\x01\x12\x18\n\x0b\x61pi_version\x18\x0b \x01(\x05H\x08\x88\x01\x01\x12\x19\n\x0c\x63ylc_version\x18\x0c \x01(\tH\t\x88\x01\x01\x12\x19\n\x0clast_updated\x18\r \x01(\x01H\n\x88\x01\x01\x12\x1a\n\x04meta\x18\x0e \x01(\x0b\x32\x07.PbMetaH\x0b\x88\x01\x01\x12&\n\x19newest_active_cycle_point\x18\x10 \x01(\tH\x0c\x88\x01\x01\x12&\n\x19oldest_active_cycle_point\x18\x11 \x01(\tH\r\x88\x01\x01\x12\x15\n\x08reloaded\x18\x12 \x01(\x08H\x0e\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x0f\x88\x01\x01\x12\x19\n\x0c\x63ycling_mode\x18\x14 \x01(\tH\x10\x88\x01\x01\x12\x32\n\x0cstate_totals\x18\x15 \x03(\x0b\x32\x1c.PbWorkflow.StateTotalsEntry\x12\x1d\n\x10workflow_log_dir\x18\x16 \x01(\tH\x11\x88\x01\x01\x12(\n\x0etime_zone_info\x18\x17 \x01(\x0b\x32\x0b.PbTimeZoneH\x12\x88\x01\x01\x12\x17\n\ntree_depth\x18\x18 \x01(\x05H\x13\x88\x01\x01\x12\x15\n\rjob_log_names\x18\x19 \x03(\t\x12\x14\n\x0cns_def_order\x18\x1a \x03(\t\x12\x0e\n\x06states\x18\x1b \x03(\t\x12\x14\n\x0ctask_proxies\x18\x1c \x03(\t\x12\x16\n\x0e\x66\x61mily_proxies\x18\x1d \x03(\t\x12\x17\n\nstatus_msg\x18\x1e \x01(\tH\x14\x88\x01\x01\x12\x1a\n\ris_held_total\x18\x1f \x01(\x05H\x15\x88\x01\x01\x12\x0c\n\x04jobs\x18 \x03(\t\x12\x15\n\x08pub_port\x18! \x01(\x05H\x16\x88\x01\x01\x12\x17\n\nbroadcasts\x18\" \x01(\tH\x17\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18# \x01(\x05H\x18\x88\x01\x01\x12=\n\x12latest_state_tasks\x18$ \x03(\x0b\x32!.PbWorkflow.LatestStateTasksEntry\x12\x13\n\x06pruned\x18% \x01(\x08H\x19\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18& \x01(\x05H\x1a\x88\x01\x01\x12\x1b\n\x0estates_updated\x18\' \x01(\x08H\x1b\x88\x01\x01\x12\x1c\n\x0fn_edge_distance\x18( \x01(\x05H\x1c\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1aI\n\x15LatestStateTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1f\n\x05value\x18\x02 \x01(\x0b\x32\x10.PbTaskProxyRefs:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\t\n\x07_statusB\x07\n\x05_hostB\x07\n\x05_portB\x08\n\x06_ownerB\x08\n\x06_edgesB\x0e\n\x0c_api_versionB\x0f\n\r_cylc_versionB\x0f\n\r_last_updatedB\x07\n\x05_metaB\x1c\n\x1a_newest_active_cycle_pointB\x1c\n\x1a_oldest_active_cycle_pointB\x0b\n\t_reloadedB\x0b\n\t_run_modeB\x0f\n\r_cycling_modeB\x13\n\x11_workflow_log_dirB\x11\n\x0f_time_zone_infoB\r\n\x0b_tree_depthB\r\n\x0b_status_msgB\x10\n\x0e_is_held_totalB\x0b\n\t_pub_portB\r\n\x0b_broadcastsB\x12\n\x10_is_queued_totalB\t\n\x07_prunedB\x14\n\x12_is_runahead_totalB\x11\n\x0f_states_updatedB\x12\n\x10_n_edge_distance\"\x85\x07\n\tPbRuntime\x12\x15\n\x08platform\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06script\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0binit_script\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x17\n\nenv_script\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\nerr_script\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x18\n\x0b\x65xit_script\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x17\n\npre_script\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x18\n\x0bpost_script\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x19\n\x0cwork_sub_dir\x18\t \x01(\tH\x08\x88\x01\x01\x12(\n\x1b\x65xecution_polling_intervals\x18\n \x01(\tH\t\x88\x01\x01\x12#\n\x16\x65xecution_retry_delays\x18\x0b \x01(\tH\n\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0c \x01(\tH\x0b\x88\x01\x01\x12)\n\x1csubmission_polling_intervals\x18\r \x01(\tH\x0c\x88\x01\x01\x12$\n\x17submission_retry_delays\x18\x0e \x01(\tH\r\x88\x01\x01\x12\x17\n\ndirectives\x18\x0f \x01(\tH\x0e\x88\x01\x01\x12\x18\n\x0b\x65nvironment\x18\x10 \x01(\tH\x0f\x88\x01\x01\x12\x14\n\x07outputs\x18\x11 \x01(\tH\x10\x88\x01\x01\x12\x17\n\ncompletion\x18\x12 \x01(\tH\x11\x88\x01\x01\x12\x15\n\x08run_mode\x18\x13 \x01(\tH\x12\x88\x01\x01\x42\x0b\n\t_platformB\t\n\x07_scriptB\x0e\n\x0c_init_scriptB\r\n\x0b_env_scriptB\r\n\x0b_err_scriptB\x0e\n\x0c_exit_scriptB\r\n\x0b_pre_scriptB\x0e\n\x0c_post_scriptB\x0f\n\r_work_sub_dirB\x1e\n\x1c_execution_polling_intervalsB\x19\n\x17_execution_retry_delaysB\x17\n\x15_execution_time_limitB\x1f\n\x1d_submission_polling_intervalsB\x1a\n\x18_submission_retry_delaysB\r\n\x0b_directivesB\x0e\n\x0c_environmentB\n\n\x08_outputsB\r\n\x0b_completionB\x0b\n\t_run_mode\"\x9d\x05\n\x05PbJob\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x17\n\nsubmit_num\x18\x03 \x01(\x05H\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x17\n\ntask_proxy\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x1b\n\x0esubmitted_time\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x19\n\x0cstarted_time\x18\x07 \x01(\tH\x06\x88\x01\x01\x12\x1a\n\rfinished_time\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x06job_id\x18\t \x01(\tH\x08\x88\x01\x01\x12\x1c\n\x0fjob_runner_name\x18\n \x01(\tH\t\x88\x01\x01\x12!\n\x14\x65xecution_time_limit\x18\x0e \x01(\x02H\n\x88\x01\x01\x12\x15\n\x08platform\x18\x0f \x01(\tH\x0b\x88\x01\x01\x12\x18\n\x0bjob_log_dir\x18\x11 \x01(\tH\x0c\x88\x01\x01\x12\x11\n\x04name\x18\x1e \x01(\tH\r\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x1f \x01(\tH\x0e\x88\x01\x01\x12\x10\n\x08messages\x18 \x03(\t\x12 \n\x07runtime\x18! \x01(\x0b\x32\n.PbRuntimeH\x0f\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\r\n\x0b_submit_numB\x08\n\x06_stateB\r\n\x0b_task_proxyB\x11\n\x0f_submitted_timeB\x0f\n\r_started_timeB\x10\n\x0e_finished_timeB\t\n\x07_job_idB\x12\n\x10_job_runner_nameB\x17\n\x15_execution_time_limitB\x0b\n\t_platformB\x0e\n\x0c_job_log_dirB\x07\n\x05_nameB\x0e\n\x0c_cycle_pointB\n\n\x08_runtimeJ\x04\x08\x1d\x10\x1e\"\xe2\x02\n\x06PbTask\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x1e\n\x11mean_elapsed_time\x18\x05 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x0f\n\x07proxies\x18\x07 \x03(\t\x12\x11\n\tnamespace\x18\x08 \x03(\t\x12\x0f\n\x07parents\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x06\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x07\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x14\n\x12_mean_elapsed_timeB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xd8\x01\n\nPbPollTask\x12\x18\n\x0blocal_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x15\n\x08workflow\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x19\n\x0cremote_proxy\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\treq_state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x19\n\x0cgraph_string\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x0e\n\x0c_local_proxyB\x0b\n\t_workflowB\x0f\n\r_remote_proxyB\x0c\n\n_req_stateB\x0f\n\r_graph_string\"\xcb\x01\n\x0bPbCondition\x12\x17\n\ntask_proxy\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x17\n\nexpr_alias\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\treq_state\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x14\n\x07message\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\r\n\x0b_task_proxyB\r\n\x0b_expr_aliasB\x0c\n\n_req_stateB\x0c\n\n_satisfiedB\n\n\x08_message\"\x96\x01\n\x0ePbPrerequisite\x12\x17\n\nexpression\x18\x01 \x01(\tH\x00\x88\x01\x01\x12 \n\nconditions\x18\x02 \x03(\x0b\x32\x0c.PbCondition\x12\x14\n\x0c\x63ycle_points\x18\x03 \x03(\t\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x01\x88\x01\x01\x42\r\n\x0b_expressionB\x0c\n\n_satisfied\"\x8c\x01\n\x08PbOutput\x12\x12\n\x05label\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x14\n\x07message\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x16\n\tsatisfied\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x11\n\x04time\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\xa5\x01\n\tPbTrigger\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\x05label\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x14\n\x07message\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x16\n\tsatisfied\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x11\n\x04time\x18\x05 \x01(\x01H\x04\x88\x01\x01\x42\x05\n\x03_idB\x08\n\x06_labelB\n\n\x08_messageB\x0c\n\n_satisfiedB\x07\n\x05_time\"\x91\x08\n\x0bPbTaskProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04task\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x12\n\x05state\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x06 \x01(\x05H\x05\x88\x01\x01\x12\x18\n\x0bjob_submits\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12*\n\x07outputs\x18\t \x03(\x0b\x32\x19.PbTaskProxy.OutputsEntry\x12\x11\n\tnamespace\x18\x0b \x03(\t\x12&\n\rprerequisites\x18\x0c \x03(\x0b\x32\x0f.PbPrerequisite\x12\x0c\n\x04jobs\x18\r \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\x0f \x01(\tH\x07\x88\x01\x01\x12\x11\n\x04name\x18\x10 \x01(\tH\x08\x88\x01\x01\x12\x14\n\x07is_held\x18\x11 \x01(\x08H\t\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x12 \x03(\t\x12\x11\n\tancestors\x18\x13 \x03(\t\x12\x16\n\tflow_nums\x18\x14 \x01(\tH\n\x88\x01\x01\x12=\n\x11\x65xternal_triggers\x18\x17 \x03(\x0b\x32\".PbTaskProxy.ExternalTriggersEntry\x12.\n\txtriggers\x18\x18 \x03(\x0b\x32\x1b.PbTaskProxy.XtriggersEntry\x12\x16\n\tis_queued\x18\x19 \x01(\x08H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x1a \x01(\x08H\x0c\x88\x01\x01\x12\x16\n\tflow_wait\x18\x1b \x01(\x08H\r\x88\x01\x01\x12 \n\x07runtime\x18\x1c \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x1d \x01(\x05H\x0f\x88\x01\x01\x1a\x39\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x18\n\x05value\x18\x02 \x01(\x0b\x32\t.PbOutput:\x02\x38\x01\x1a\x43\n\x15\x45xternalTriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x1a<\n\x0eXtriggersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x19\n\x05value\x18\x02 \x01(\x0b\x32\n.PbTrigger:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_taskB\x08\n\x06_stateB\x0e\n\x0c_cycle_pointB\x08\n\x06_depthB\x0e\n\x0c_job_submitsB\x0f\n\r_first_parentB\x07\n\x05_nameB\n\n\x08_is_heldB\x0c\n\n_flow_numsB\x0c\n\n_is_queuedB\x0e\n\x0c_is_runaheadB\x0c\n\n_flow_waitB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xc8\x02\n\x08PbFamily\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x11\n\x04name\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x1a\n\x04meta\x18\x04 \x01(\x0b\x32\x07.PbMetaH\x03\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x05 \x01(\x05H\x04\x88\x01\x01\x12\x0f\n\x07proxies\x18\x06 \x03(\t\x12\x0f\n\x07parents\x18\x07 \x03(\t\x12\x13\n\x0b\x63hild_tasks\x18\x08 \x03(\t\x12\x16\n\x0e\x63hild_families\x18\t \x03(\t\x12\x19\n\x0c\x66irst_parent\x18\n \x01(\tH\x05\x88\x01\x01\x12 \n\x07runtime\x18\x0b \x01(\x0b\x32\n.PbRuntimeH\x06\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x07\n\x05_nameB\x07\n\x05_metaB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_runtime\"\xae\x06\n\rPbFamilyProxy\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x18\n\x0b\x63ycle_point\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x11\n\x04name\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x13\n\x06\x66\x61mily\x18\x05 \x01(\tH\x04\x88\x01\x01\x12\x12\n\x05state\x18\x06 \x01(\tH\x05\x88\x01\x01\x12\x12\n\x05\x64\x65pth\x18\x07 \x01(\x05H\x06\x88\x01\x01\x12\x19\n\x0c\x66irst_parent\x18\x08 \x01(\tH\x07\x88\x01\x01\x12\x13\n\x0b\x63hild_tasks\x18\n \x03(\t\x12\x16\n\x0e\x63hild_families\x18\x0b \x03(\t\x12\x14\n\x07is_held\x18\x0c \x01(\x08H\x08\x88\x01\x01\x12\x11\n\tancestors\x18\r \x03(\t\x12\x0e\n\x06states\x18\x0e \x03(\t\x12\x35\n\x0cstate_totals\x18\x0f \x03(\x0b\x32\x1f.PbFamilyProxy.StateTotalsEntry\x12\x1a\n\ris_held_total\x18\x10 \x01(\x05H\t\x88\x01\x01\x12\x16\n\tis_queued\x18\x11 \x01(\x08H\n\x88\x01\x01\x12\x1c\n\x0fis_queued_total\x18\x12 \x01(\x05H\x0b\x88\x01\x01\x12\x18\n\x0bis_runahead\x18\x13 \x01(\x08H\x0c\x88\x01\x01\x12\x1e\n\x11is_runahead_total\x18\x14 \x01(\x05H\r\x88\x01\x01\x12 \n\x07runtime\x18\x15 \x01(\x0b\x32\n.PbRuntimeH\x0e\x88\x01\x01\x12\x18\n\x0bgraph_depth\x18\x16 \x01(\x05H\x0f\x88\x01\x01\x1a\x32\n\x10StateTotalsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\x0e\n\x0c_cycle_pointB\x07\n\x05_nameB\t\n\x07_familyB\x08\n\x06_stateB\x08\n\x06_depthB\x0f\n\r_first_parentB\n\n\x08_is_heldB\x10\n\x0e_is_held_totalB\x0c\n\n_is_queuedB\x12\n\x10_is_queued_totalB\x0e\n\x0c_is_runaheadB\x14\n\x12_is_runahead_totalB\n\n\x08_runtimeB\x0e\n\x0c_graph_depth\"\xbc\x01\n\x06PbEdge\x12\x12\n\x05stamp\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x0f\n\x02id\x18\x02 \x01(\tH\x01\x88\x01\x01\x12\x13\n\x06source\x18\x03 \x01(\tH\x02\x88\x01\x01\x12\x13\n\x06target\x18\x04 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07suicide\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x11\n\x04\x63ond\x18\x06 \x01(\x08H\x05\x88\x01\x01\x42\x08\n\x06_stampB\x05\n\x03_idB\t\n\x07_sourceB\t\n\x07_targetB\n\n\x08_suicideB\x07\n\x05_cond\"{\n\x07PbEdges\x12\x0f\n\x02id\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\r\n\x05\x65\x64ges\x18\x02 \x03(\t\x12+\n\x16workflow_polling_tasks\x18\x03 \x03(\x0b\x32\x0b.PbPollTask\x12\x0e\n\x06leaves\x18\x04 \x03(\t\x12\x0c\n\x04\x66\x65\x65t\x18\x05 \x03(\tB\x05\n\x03_id\"\xf2\x01\n\x10PbEntireWorkflow\x12\"\n\x08workflow\x18\x01 \x01(\x0b\x32\x0b.PbWorkflowH\x00\x88\x01\x01\x12\x16\n\x05tasks\x18\x02 \x03(\x0b\x32\x07.PbTask\x12\"\n\x0ctask_proxies\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x14\n\x04jobs\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x1b\n\x08\x66\x61milies\x18\x05 \x03(\x0b\x32\t.PbFamily\x12&\n\x0e\x66\x61mily_proxies\x18\x06 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x16\n\x05\x65\x64ges\x18\x07 \x03(\x0b\x32\x07.PbEdgeB\x0b\n\t_workflow\"\xaf\x01\n\x07\x45\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbEdge\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbEdge\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xb3\x01\n\x07\x46\x44\x65ltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x18\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\t.PbFamily\x12\x1a\n\x07updated\x18\x04 \x03(\x0b\x32\t.PbFamily\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xbe\x01\n\x08\x46PDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1d\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x1f\n\x07updated\x18\x04 \x03(\x0b\x32\x0e.PbFamilyProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xad\x01\n\x07JDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x15\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x06.PbJob\x12\x17\n\x07updated\x18\x04 \x03(\x0b\x32\x06.PbJob\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xaf\x01\n\x07TDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x16\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x07.PbTask\x12\x18\n\x07updated\x18\x04 \x03(\x0b\x32\x07.PbTask\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xba\x01\n\x08TPDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x15\n\x08\x63hecksum\x18\x02 \x01(\x03H\x01\x88\x01\x01\x12\x1b\n\x05\x61\x64\x64\x65\x64\x18\x03 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x1d\n\x07updated\x18\x04 \x03(\x0b\x32\x0c.PbTaskProxy\x12\x0e\n\x06pruned\x18\x05 \x03(\t\x12\x15\n\x08reloaded\x18\x06 \x01(\x08H\x02\x88\x01\x01\x42\x07\n\x05_timeB\x0b\n\t_checksumB\x0b\n\t_reloaded\"\xc3\x01\n\x07WDeltas\x12\x11\n\x04time\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x1f\n\x05\x61\x64\x64\x65\x64\x18\x02 \x01(\x0b\x32\x0b.PbWorkflowH\x01\x88\x01\x01\x12!\n\x07updated\x18\x03 \x01(\x0b\x32\x0b.PbWorkflowH\x02\x88\x01\x01\x12\x15\n\x08reloaded\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\x13\n\x06pruned\x18\x05 \x01(\tH\x04\x88\x01\x01\x42\x07\n\x05_timeB\x08\n\x06_addedB\n\n\x08_updatedB\x0b\n\t_reloadedB\t\n\x07_pruned\"\xd1\x01\n\tAllDeltas\x12\x1a\n\x08\x66\x61milies\x18\x01 \x01(\x0b\x32\x08.FDeltas\x12!\n\x0e\x66\x61mily_proxies\x18\x02 \x01(\x0b\x32\t.FPDeltas\x12\x16\n\x04jobs\x18\x03 \x01(\x0b\x32\x08.JDeltas\x12\x17\n\x05tasks\x18\x04 \x01(\x0b\x32\x08.TDeltas\x12\x1f\n\x0ctask_proxies\x18\x05 \x01(\x0b\x32\t.TPDeltas\x12\x17\n\x05\x65\x64ges\x18\x06 \x01(\x0b\x32\x08.EDeltas\x12\x1a\n\x08workflow\x18\x07 \x01(\x0b\x32\x08.WDeltasb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -46,55 +46,55 @@ _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_start=1493 _globals['_PBWORKFLOW_LATESTSTATETASKSENTRY']._serialized_end=1566 _globals['_PBRUNTIME']._serialized_start=2014 - _globals['_PBRUNTIME']._serialized_end=2879 - _globals['_PBJOB']._serialized_start=2882 - _globals['_PBJOB']._serialized_end=3551 - _globals['_PBTASK']._serialized_start=3554 - _globals['_PBTASK']._serialized_end=3908 - _globals['_PBPOLLTASK']._serialized_start=3911 - _globals['_PBPOLLTASK']._serialized_end=4127 - _globals['_PBCONDITION']._serialized_start=4130 - _globals['_PBCONDITION']._serialized_end=4333 - _globals['_PBPREREQUISITE']._serialized_start=4336 - _globals['_PBPREREQUISITE']._serialized_end=4486 - _globals['_PBOUTPUT']._serialized_start=4489 - _globals['_PBOUTPUT']._serialized_end=4629 - _globals['_PBTRIGGER']._serialized_start=4632 - _globals['_PBTRIGGER']._serialized_end=4797 - _globals['_PBTASKPROXY']._serialized_start=4800 - _globals['_PBTASKPROXY']._serialized_end=5841 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5451 - _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5508 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5510 - _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5577 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5579 - _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5639 - _globals['_PBFAMILY']._serialized_start=5844 - _globals['_PBFAMILY']._serialized_end=6172 - _globals['_PBFAMILYPROXY']._serialized_start=6175 - _globals['_PBFAMILYPROXY']._serialized_end=6989 + _globals['_PBRUNTIME']._serialized_end=2915 + _globals['_PBJOB']._serialized_start=2918 + _globals['_PBJOB']._serialized_end=3587 + _globals['_PBTASK']._serialized_start=3590 + _globals['_PBTASK']._serialized_end=3944 + _globals['_PBPOLLTASK']._serialized_start=3947 + _globals['_PBPOLLTASK']._serialized_end=4163 + _globals['_PBCONDITION']._serialized_start=4166 + _globals['_PBCONDITION']._serialized_end=4369 + _globals['_PBPREREQUISITE']._serialized_start=4372 + _globals['_PBPREREQUISITE']._serialized_end=4522 + _globals['_PBOUTPUT']._serialized_start=4525 + _globals['_PBOUTPUT']._serialized_end=4665 + _globals['_PBTRIGGER']._serialized_start=4668 + _globals['_PBTRIGGER']._serialized_end=4833 + _globals['_PBTASKPROXY']._serialized_start=4836 + _globals['_PBTASKPROXY']._serialized_end=5877 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_start=5487 + _globals['_PBTASKPROXY_OUTPUTSENTRY']._serialized_end=5544 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_start=5546 + _globals['_PBTASKPROXY_EXTERNALTRIGGERSENTRY']._serialized_end=5613 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_start=5615 + _globals['_PBTASKPROXY_XTRIGGERSENTRY']._serialized_end=5675 + _globals['_PBFAMILY']._serialized_start=5880 + _globals['_PBFAMILY']._serialized_end=6208 + _globals['_PBFAMILYPROXY']._serialized_start=6211 + _globals['_PBFAMILYPROXY']._serialized_end=7025 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_start=1441 _globals['_PBFAMILYPROXY_STATETOTALSENTRY']._serialized_end=1491 - _globals['_PBEDGE']._serialized_start=6992 - _globals['_PBEDGE']._serialized_end=7180 - _globals['_PBEDGES']._serialized_start=7182 - _globals['_PBEDGES']._serialized_end=7305 - _globals['_PBENTIREWORKFLOW']._serialized_start=7308 - _globals['_PBENTIREWORKFLOW']._serialized_end=7550 - _globals['_EDELTAS']._serialized_start=7553 - _globals['_EDELTAS']._serialized_end=7728 - _globals['_FDELTAS']._serialized_start=7731 - _globals['_FDELTAS']._serialized_end=7910 - _globals['_FPDELTAS']._serialized_start=7913 - _globals['_FPDELTAS']._serialized_end=8103 - _globals['_JDELTAS']._serialized_start=8106 - _globals['_JDELTAS']._serialized_end=8279 - _globals['_TDELTAS']._serialized_start=8282 - _globals['_TDELTAS']._serialized_end=8457 - _globals['_TPDELTAS']._serialized_start=8460 - _globals['_TPDELTAS']._serialized_end=8646 - _globals['_WDELTAS']._serialized_start=8649 - _globals['_WDELTAS']._serialized_end=8844 - _globals['_ALLDELTAS']._serialized_start=8847 - _globals['_ALLDELTAS']._serialized_end=9056 + _globals['_PBEDGE']._serialized_start=7028 + _globals['_PBEDGE']._serialized_end=7216 + _globals['_PBEDGES']._serialized_start=7218 + _globals['_PBEDGES']._serialized_end=7341 + _globals['_PBENTIREWORKFLOW']._serialized_start=7344 + _globals['_PBENTIREWORKFLOW']._serialized_end=7586 + _globals['_EDELTAS']._serialized_start=7589 + _globals['_EDELTAS']._serialized_end=7764 + _globals['_FDELTAS']._serialized_start=7767 + _globals['_FDELTAS']._serialized_end=7946 + _globals['_FPDELTAS']._serialized_start=7949 + _globals['_FPDELTAS']._serialized_end=8139 + _globals['_JDELTAS']._serialized_start=8142 + _globals['_JDELTAS']._serialized_end=8315 + _globals['_TDELTAS']._serialized_start=8318 + _globals['_TDELTAS']._serialized_end=8493 + _globals['_TPDELTAS']._serialized_start=8496 + _globals['_TPDELTAS']._serialized_end=8682 + _globals['_WDELTAS']._serialized_start=8685 + _globals['_WDELTAS']._serialized_end=8880 + _globals['_ALLDELTAS']._serialized_start=8883 + _globals['_ALLDELTAS']._serialized_end=9092 # @@protoc_insertion_point(module_scope) diff --git a/cylc/flow/data_messages_pb2.pyi b/cylc/flow/data_messages_pb2.pyi index 4e96c6ed2da..8c80f7f8f10 100644 --- a/cylc/flow/data_messages_pb2.pyi +++ b/cylc/flow/data_messages_pb2.pyi @@ -6,7 +6,7 @@ from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Map DESCRIPTOR: _descriptor.FileDescriptor class PbMeta(_message.Message): - __slots__ = ["title", "description", "URL", "user_defined"] + __slots__ = ("title", "description", "URL", "user_defined") TITLE_FIELD_NUMBER: _ClassVar[int] DESCRIPTION_FIELD_NUMBER: _ClassVar[int] URL_FIELD_NUMBER: _ClassVar[int] @@ -18,7 +18,7 @@ class PbMeta(_message.Message): def __init__(self, title: _Optional[str] = ..., description: _Optional[str] = ..., URL: _Optional[str] = ..., user_defined: _Optional[str] = ...) -> None: ... class PbTimeZone(_message.Message): - __slots__ = ["hours", "minutes", "string_basic", "string_extended"] + __slots__ = ("hours", "minutes", "string_basic", "string_extended") HOURS_FIELD_NUMBER: _ClassVar[int] MINUTES_FIELD_NUMBER: _ClassVar[int] STRING_BASIC_FIELD_NUMBER: _ClassVar[int] @@ -30,22 +30,22 @@ class PbTimeZone(_message.Message): def __init__(self, hours: _Optional[int] = ..., minutes: _Optional[int] = ..., string_basic: _Optional[str] = ..., string_extended: _Optional[str] = ...) -> None: ... class PbTaskProxyRefs(_message.Message): - __slots__ = ["task_proxies"] + __slots__ = ("task_proxies",) TASK_PROXIES_FIELD_NUMBER: _ClassVar[int] task_proxies: _containers.RepeatedScalarFieldContainer[str] def __init__(self, task_proxies: _Optional[_Iterable[str]] = ...) -> None: ... class PbWorkflow(_message.Message): - __slots__ = ["stamp", "id", "name", "status", "host", "port", "owner", "tasks", "families", "edges", "api_version", "cylc_version", "last_updated", "meta", "newest_active_cycle_point", "oldest_active_cycle_point", "reloaded", "run_mode", "cycling_mode", "state_totals", "workflow_log_dir", "time_zone_info", "tree_depth", "job_log_names", "ns_def_order", "states", "task_proxies", "family_proxies", "status_msg", "is_held_total", "jobs", "pub_port", "broadcasts", "is_queued_total", "latest_state_tasks", "pruned", "is_runahead_total", "states_updated", "n_edge_distance"] + __slots__ = ("stamp", "id", "name", "status", "host", "port", "owner", "tasks", "families", "edges", "api_version", "cylc_version", "last_updated", "meta", "newest_active_cycle_point", "oldest_active_cycle_point", "reloaded", "run_mode", "cycling_mode", "state_totals", "workflow_log_dir", "time_zone_info", "tree_depth", "job_log_names", "ns_def_order", "states", "task_proxies", "family_proxies", "status_msg", "is_held_total", "jobs", "pub_port", "broadcasts", "is_queued_total", "latest_state_tasks", "pruned", "is_runahead_total", "states_updated", "n_edge_distance") class StateTotalsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: int def __init__(self, key: _Optional[str] = ..., value: _Optional[int] = ...) -> None: ... class LatestStateTasksEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -132,7 +132,7 @@ class PbWorkflow(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., status: _Optional[str] = ..., host: _Optional[str] = ..., port: _Optional[int] = ..., owner: _Optional[str] = ..., tasks: _Optional[_Iterable[str]] = ..., families: _Optional[_Iterable[str]] = ..., edges: _Optional[_Union[PbEdges, _Mapping]] = ..., api_version: _Optional[int] = ..., cylc_version: _Optional[str] = ..., last_updated: _Optional[float] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., newest_active_cycle_point: _Optional[str] = ..., oldest_active_cycle_point: _Optional[str] = ..., reloaded: bool = ..., run_mode: _Optional[str] = ..., cycling_mode: _Optional[str] = ..., state_totals: _Optional[_Mapping[str, int]] = ..., workflow_log_dir: _Optional[str] = ..., time_zone_info: _Optional[_Union[PbTimeZone, _Mapping]] = ..., tree_depth: _Optional[int] = ..., job_log_names: _Optional[_Iterable[str]] = ..., ns_def_order: _Optional[_Iterable[str]] = ..., states: _Optional[_Iterable[str]] = ..., task_proxies: _Optional[_Iterable[str]] = ..., family_proxies: _Optional[_Iterable[str]] = ..., status_msg: _Optional[str] = ..., is_held_total: _Optional[int] = ..., jobs: _Optional[_Iterable[str]] = ..., pub_port: _Optional[int] = ..., broadcasts: _Optional[str] = ..., is_queued_total: _Optional[int] = ..., latest_state_tasks: _Optional[_Mapping[str, PbTaskProxyRefs]] = ..., pruned: bool = ..., is_runahead_total: _Optional[int] = ..., states_updated: bool = ..., n_edge_distance: _Optional[int] = ...) -> None: ... class PbRuntime(_message.Message): - __slots__ = ["platform", "script", "init_script", "env_script", "err_script", "exit_script", "pre_script", "post_script", "work_sub_dir", "execution_polling_intervals", "execution_retry_delays", "execution_time_limit", "submission_polling_intervals", "submission_retry_delays", "directives", "environment", "outputs", "completion"] + __slots__ = ("platform", "script", "init_script", "env_script", "err_script", "exit_script", "pre_script", "post_script", "work_sub_dir", "execution_polling_intervals", "execution_retry_delays", "execution_time_limit", "submission_polling_intervals", "submission_retry_delays", "directives", "environment", "outputs", "completion", "run_mode") PLATFORM_FIELD_NUMBER: _ClassVar[int] SCRIPT_FIELD_NUMBER: _ClassVar[int] INIT_SCRIPT_FIELD_NUMBER: _ClassVar[int] @@ -151,6 +151,7 @@ class PbRuntime(_message.Message): ENVIRONMENT_FIELD_NUMBER: _ClassVar[int] OUTPUTS_FIELD_NUMBER: _ClassVar[int] COMPLETION_FIELD_NUMBER: _ClassVar[int] + RUN_MODE_FIELD_NUMBER: _ClassVar[int] platform: str script: str init_script: str @@ -169,10 +170,11 @@ class PbRuntime(_message.Message): environment: str outputs: str completion: str - def __init__(self, platform: _Optional[str] = ..., script: _Optional[str] = ..., init_script: _Optional[str] = ..., env_script: _Optional[str] = ..., err_script: _Optional[str] = ..., exit_script: _Optional[str] = ..., pre_script: _Optional[str] = ..., post_script: _Optional[str] = ..., work_sub_dir: _Optional[str] = ..., execution_polling_intervals: _Optional[str] = ..., execution_retry_delays: _Optional[str] = ..., execution_time_limit: _Optional[str] = ..., submission_polling_intervals: _Optional[str] = ..., submission_retry_delays: _Optional[str] = ..., directives: _Optional[str] = ..., environment: _Optional[str] = ..., outputs: _Optional[str] = ..., completion: _Optional[str] = ...) -> None: ... + run_mode: str + def __init__(self, platform: _Optional[str] = ..., script: _Optional[str] = ..., init_script: _Optional[str] = ..., env_script: _Optional[str] = ..., err_script: _Optional[str] = ..., exit_script: _Optional[str] = ..., pre_script: _Optional[str] = ..., post_script: _Optional[str] = ..., work_sub_dir: _Optional[str] = ..., execution_polling_intervals: _Optional[str] = ..., execution_retry_delays: _Optional[str] = ..., execution_time_limit: _Optional[str] = ..., submission_polling_intervals: _Optional[str] = ..., submission_retry_delays: _Optional[str] = ..., directives: _Optional[str] = ..., environment: _Optional[str] = ..., outputs: _Optional[str] = ..., completion: _Optional[str] = ..., run_mode: _Optional[str] = ...) -> None: ... class PbJob(_message.Message): - __slots__ = ["stamp", "id", "submit_num", "state", "task_proxy", "submitted_time", "started_time", "finished_time", "job_id", "job_runner_name", "execution_time_limit", "platform", "job_log_dir", "name", "cycle_point", "messages", "runtime"] + __slots__ = ("stamp", "id", "submit_num", "state", "task_proxy", "submitted_time", "started_time", "finished_time", "job_id", "job_runner_name", "execution_time_limit", "platform", "job_log_dir", "name", "cycle_point", "messages", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] SUBMIT_NUM_FIELD_NUMBER: _ClassVar[int] @@ -210,7 +212,7 @@ class PbJob(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., submit_num: _Optional[int] = ..., state: _Optional[str] = ..., task_proxy: _Optional[str] = ..., submitted_time: _Optional[str] = ..., started_time: _Optional[str] = ..., finished_time: _Optional[str] = ..., job_id: _Optional[str] = ..., job_runner_name: _Optional[str] = ..., execution_time_limit: _Optional[float] = ..., platform: _Optional[str] = ..., job_log_dir: _Optional[str] = ..., name: _Optional[str] = ..., cycle_point: _Optional[str] = ..., messages: _Optional[_Iterable[str]] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbTask(_message.Message): - __slots__ = ["stamp", "id", "name", "meta", "mean_elapsed_time", "depth", "proxies", "namespace", "parents", "first_parent", "runtime"] + __slots__ = ("stamp", "id", "name", "meta", "mean_elapsed_time", "depth", "proxies", "namespace", "parents", "first_parent", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] @@ -236,7 +238,7 @@ class PbTask(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., mean_elapsed_time: _Optional[float] = ..., depth: _Optional[int] = ..., proxies: _Optional[_Iterable[str]] = ..., namespace: _Optional[_Iterable[str]] = ..., parents: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbPollTask(_message.Message): - __slots__ = ["local_proxy", "workflow", "remote_proxy", "req_state", "graph_string"] + __slots__ = ("local_proxy", "workflow", "remote_proxy", "req_state", "graph_string") LOCAL_PROXY_FIELD_NUMBER: _ClassVar[int] WORKFLOW_FIELD_NUMBER: _ClassVar[int] REMOTE_PROXY_FIELD_NUMBER: _ClassVar[int] @@ -250,7 +252,7 @@ class PbPollTask(_message.Message): def __init__(self, local_proxy: _Optional[str] = ..., workflow: _Optional[str] = ..., remote_proxy: _Optional[str] = ..., req_state: _Optional[str] = ..., graph_string: _Optional[str] = ...) -> None: ... class PbCondition(_message.Message): - __slots__ = ["task_proxy", "expr_alias", "req_state", "satisfied", "message"] + __slots__ = ("task_proxy", "expr_alias", "req_state", "satisfied", "message") TASK_PROXY_FIELD_NUMBER: _ClassVar[int] EXPR_ALIAS_FIELD_NUMBER: _ClassVar[int] REQ_STATE_FIELD_NUMBER: _ClassVar[int] @@ -264,7 +266,7 @@ class PbCondition(_message.Message): def __init__(self, task_proxy: _Optional[str] = ..., expr_alias: _Optional[str] = ..., req_state: _Optional[str] = ..., satisfied: bool = ..., message: _Optional[str] = ...) -> None: ... class PbPrerequisite(_message.Message): - __slots__ = ["expression", "conditions", "cycle_points", "satisfied"] + __slots__ = ("expression", "conditions", "cycle_points", "satisfied") EXPRESSION_FIELD_NUMBER: _ClassVar[int] CONDITIONS_FIELD_NUMBER: _ClassVar[int] CYCLE_POINTS_FIELD_NUMBER: _ClassVar[int] @@ -276,7 +278,7 @@ class PbPrerequisite(_message.Message): def __init__(self, expression: _Optional[str] = ..., conditions: _Optional[_Iterable[_Union[PbCondition, _Mapping]]] = ..., cycle_points: _Optional[_Iterable[str]] = ..., satisfied: bool = ...) -> None: ... class PbOutput(_message.Message): - __slots__ = ["label", "message", "satisfied", "time"] + __slots__ = ("label", "message", "satisfied", "time") LABEL_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] SATISFIED_FIELD_NUMBER: _ClassVar[int] @@ -288,7 +290,7 @@ class PbOutput(_message.Message): def __init__(self, label: _Optional[str] = ..., message: _Optional[str] = ..., satisfied: bool = ..., time: _Optional[float] = ...) -> None: ... class PbTrigger(_message.Message): - __slots__ = ["id", "label", "message", "satisfied", "time"] + __slots__ = ("id", "label", "message", "satisfied", "time") ID_FIELD_NUMBER: _ClassVar[int] LABEL_FIELD_NUMBER: _ClassVar[int] MESSAGE_FIELD_NUMBER: _ClassVar[int] @@ -302,23 +304,23 @@ class PbTrigger(_message.Message): def __init__(self, id: _Optional[str] = ..., label: _Optional[str] = ..., message: _Optional[str] = ..., satisfied: bool = ..., time: _Optional[float] = ...) -> None: ... class PbTaskProxy(_message.Message): - __slots__ = ["stamp", "id", "task", "state", "cycle_point", "depth", "job_submits", "outputs", "namespace", "prerequisites", "jobs", "first_parent", "name", "is_held", "edges", "ancestors", "flow_nums", "external_triggers", "xtriggers", "is_queued", "is_runahead", "flow_wait", "runtime", "graph_depth"] + __slots__ = ("stamp", "id", "task", "state", "cycle_point", "depth", "job_submits", "outputs", "namespace", "prerequisites", "jobs", "first_parent", "name", "is_held", "edges", "ancestors", "flow_nums", "external_triggers", "xtriggers", "is_queued", "is_runahead", "flow_wait", "runtime", "graph_depth") class OutputsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: PbOutput def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[PbOutput, _Mapping]] = ...) -> None: ... class ExternalTriggersEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str value: PbTrigger def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[PbTrigger, _Mapping]] = ...) -> None: ... class XtriggersEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -375,7 +377,7 @@ class PbTaskProxy(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., task: _Optional[str] = ..., state: _Optional[str] = ..., cycle_point: _Optional[str] = ..., depth: _Optional[int] = ..., job_submits: _Optional[int] = ..., outputs: _Optional[_Mapping[str, PbOutput]] = ..., namespace: _Optional[_Iterable[str]] = ..., prerequisites: _Optional[_Iterable[_Union[PbPrerequisite, _Mapping]]] = ..., jobs: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., name: _Optional[str] = ..., is_held: bool = ..., edges: _Optional[_Iterable[str]] = ..., ancestors: _Optional[_Iterable[str]] = ..., flow_nums: _Optional[str] = ..., external_triggers: _Optional[_Mapping[str, PbTrigger]] = ..., xtriggers: _Optional[_Mapping[str, PbTrigger]] = ..., is_queued: bool = ..., is_runahead: bool = ..., flow_wait: bool = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ..., graph_depth: _Optional[int] = ...) -> None: ... class PbFamily(_message.Message): - __slots__ = ["stamp", "id", "name", "meta", "depth", "proxies", "parents", "child_tasks", "child_families", "first_parent", "runtime"] + __slots__ = ("stamp", "id", "name", "meta", "depth", "proxies", "parents", "child_tasks", "child_families", "first_parent", "runtime") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] NAME_FIELD_NUMBER: _ClassVar[int] @@ -401,9 +403,9 @@ class PbFamily(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., name: _Optional[str] = ..., meta: _Optional[_Union[PbMeta, _Mapping]] = ..., depth: _Optional[int] = ..., proxies: _Optional[_Iterable[str]] = ..., parents: _Optional[_Iterable[str]] = ..., child_tasks: _Optional[_Iterable[str]] = ..., child_families: _Optional[_Iterable[str]] = ..., first_parent: _Optional[str] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ...) -> None: ... class PbFamilyProxy(_message.Message): - __slots__ = ["stamp", "id", "cycle_point", "name", "family", "state", "depth", "first_parent", "child_tasks", "child_families", "is_held", "ancestors", "states", "state_totals", "is_held_total", "is_queued", "is_queued_total", "is_runahead", "is_runahead_total", "runtime", "graph_depth"] + __slots__ = ("stamp", "id", "cycle_point", "name", "family", "state", "depth", "first_parent", "child_tasks", "child_families", "is_held", "ancestors", "states", "state_totals", "is_held_total", "is_queued", "is_queued_total", "is_runahead", "is_runahead_total", "runtime", "graph_depth") class StateTotalsEntry(_message.Message): - __slots__ = ["key", "value"] + __slots__ = ("key", "value") KEY_FIELD_NUMBER: _ClassVar[int] VALUE_FIELD_NUMBER: _ClassVar[int] key: str @@ -454,7 +456,7 @@ class PbFamilyProxy(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., cycle_point: _Optional[str] = ..., name: _Optional[str] = ..., family: _Optional[str] = ..., state: _Optional[str] = ..., depth: _Optional[int] = ..., first_parent: _Optional[str] = ..., child_tasks: _Optional[_Iterable[str]] = ..., child_families: _Optional[_Iterable[str]] = ..., is_held: bool = ..., ancestors: _Optional[_Iterable[str]] = ..., states: _Optional[_Iterable[str]] = ..., state_totals: _Optional[_Mapping[str, int]] = ..., is_held_total: _Optional[int] = ..., is_queued: bool = ..., is_queued_total: _Optional[int] = ..., is_runahead: bool = ..., is_runahead_total: _Optional[int] = ..., runtime: _Optional[_Union[PbRuntime, _Mapping]] = ..., graph_depth: _Optional[int] = ...) -> None: ... class PbEdge(_message.Message): - __slots__ = ["stamp", "id", "source", "target", "suicide", "cond"] + __slots__ = ("stamp", "id", "source", "target", "suicide", "cond") STAMP_FIELD_NUMBER: _ClassVar[int] ID_FIELD_NUMBER: _ClassVar[int] SOURCE_FIELD_NUMBER: _ClassVar[int] @@ -470,7 +472,7 @@ class PbEdge(_message.Message): def __init__(self, stamp: _Optional[str] = ..., id: _Optional[str] = ..., source: _Optional[str] = ..., target: _Optional[str] = ..., suicide: bool = ..., cond: bool = ...) -> None: ... class PbEdges(_message.Message): - __slots__ = ["id", "edges", "workflow_polling_tasks", "leaves", "feet"] + __slots__ = ("id", "edges", "workflow_polling_tasks", "leaves", "feet") ID_FIELD_NUMBER: _ClassVar[int] EDGES_FIELD_NUMBER: _ClassVar[int] WORKFLOW_POLLING_TASKS_FIELD_NUMBER: _ClassVar[int] @@ -484,7 +486,7 @@ class PbEdges(_message.Message): def __init__(self, id: _Optional[str] = ..., edges: _Optional[_Iterable[str]] = ..., workflow_polling_tasks: _Optional[_Iterable[_Union[PbPollTask, _Mapping]]] = ..., leaves: _Optional[_Iterable[str]] = ..., feet: _Optional[_Iterable[str]] = ...) -> None: ... class PbEntireWorkflow(_message.Message): - __slots__ = ["workflow", "tasks", "task_proxies", "jobs", "families", "family_proxies", "edges"] + __slots__ = ("workflow", "tasks", "task_proxies", "jobs", "families", "family_proxies", "edges") WORKFLOW_FIELD_NUMBER: _ClassVar[int] TASKS_FIELD_NUMBER: _ClassVar[int] TASK_PROXIES_FIELD_NUMBER: _ClassVar[int] @@ -502,7 +504,7 @@ class PbEntireWorkflow(_message.Message): def __init__(self, workflow: _Optional[_Union[PbWorkflow, _Mapping]] = ..., tasks: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., task_proxies: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., jobs: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., families: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., family_proxies: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., edges: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ...) -> None: ... class EDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -518,7 +520,7 @@ class EDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbEdge, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class FDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -534,7 +536,7 @@ class FDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbFamily, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class FPDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -550,7 +552,7 @@ class FPDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbFamilyProxy, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class JDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -566,7 +568,7 @@ class JDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbJob, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class TDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -582,7 +584,7 @@ class TDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbTask, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class TPDeltas(_message.Message): - __slots__ = ["time", "checksum", "added", "updated", "pruned", "reloaded"] + __slots__ = ("time", "checksum", "added", "updated", "pruned", "reloaded") TIME_FIELD_NUMBER: _ClassVar[int] CHECKSUM_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] @@ -598,7 +600,7 @@ class TPDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., checksum: _Optional[int] = ..., added: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., updated: _Optional[_Iterable[_Union[PbTaskProxy, _Mapping]]] = ..., pruned: _Optional[_Iterable[str]] = ..., reloaded: bool = ...) -> None: ... class WDeltas(_message.Message): - __slots__ = ["time", "added", "updated", "reloaded", "pruned"] + __slots__ = ("time", "added", "updated", "reloaded", "pruned") TIME_FIELD_NUMBER: _ClassVar[int] ADDED_FIELD_NUMBER: _ClassVar[int] UPDATED_FIELD_NUMBER: _ClassVar[int] @@ -612,7 +614,7 @@ class WDeltas(_message.Message): def __init__(self, time: _Optional[float] = ..., added: _Optional[_Union[PbWorkflow, _Mapping]] = ..., updated: _Optional[_Union[PbWorkflow, _Mapping]] = ..., reloaded: bool = ..., pruned: _Optional[str] = ...) -> None: ... class AllDeltas(_message.Message): - __slots__ = ["families", "family_proxies", "jobs", "tasks", "task_proxies", "edges", "workflow"] + __slots__ = ("families", "family_proxies", "jobs", "tasks", "task_proxies", "edges", "workflow") FAMILIES_FIELD_NUMBER: _ClassVar[int] FAMILY_PROXIES_FIELD_NUMBER: _ClassVar[int] JOBS_FIELD_NUMBER: _ClassVar[int] diff --git a/cylc/flow/data_store_mgr.py b/cylc/flow/data_store_mgr.py index 459abf77e56..efd4bae8415 100644 --- a/cylc/flow/data_store_mgr.py +++ b/cylc/flow/data_store_mgr.py @@ -259,6 +259,7 @@ def runtime_from_config(rtconfig): pre_script=rtconfig['pre-script'], post_script=rtconfig['post-script'], work_sub_dir=rtconfig['work sub-directory'], + run_mode=rtconfig['run mode'], execution_time_limit=str(rtconfig['execution time limit'] or ''), execution_polling_intervals=listjoin( rtconfig['execution polling intervals'] diff --git a/cylc/flow/network/schema.py b/cylc/flow/network/schema.py index 355f12c4981..ba8cad6890d 100644 --- a/cylc/flow/network/schema.py +++ b/cylc/flow/network/schema.py @@ -51,6 +51,7 @@ from cylc.flow.id import Tokens from cylc.flow.task_outputs import SORT_ORDERS from cylc.flow.task_state import ( + RunMode, TASK_STATUSES_ORDERED, TASK_STATUS_DESC, TASK_STATUS_WAITING, @@ -66,6 +67,7 @@ from cylc.flow.workflow_status import StopMode if TYPE_CHECKING: + from enum import Enum from graphql import ResolveInfo from graphql.type.definition import ( GraphQLNamedType, @@ -596,6 +598,29 @@ class Meta: string_extended = String() +def describe_run_mode(run_mode: Optional['Enum']) -> str: + """Returns description for a workflow/task run mode.""" + if not run_mode: + return "" + return getattr(RunMode, run_mode.value.upper()).__doc__ + + +WorkflowRunMode = graphene.Enum( + 'WorkflowRunMode', + [(m.capitalize(), m) for m in RunMode.WORKFLOW_MODES.value], + description=describe_run_mode, +) +"""The run mode for the workflow.""" + + +TaskRunMode = graphene.Enum( + 'TaskRunMode', + [(m.capitalize(), m) for m in RunMode.WORKFLOW_MODES.value], + description=describe_run_mode, +) +"""The run mode for tasks.""" + + class Workflow(ObjectType): class Meta: description = 'Global workflow info.' @@ -823,6 +848,7 @@ class Meta: directives = graphene.List(RuntimeSetting, resolver=resolve_json_dump) environment = graphene.List(RuntimeSetting, resolver=resolve_json_dump) outputs = graphene.List(RuntimeSetting, resolver=resolve_json_dump) + run_mode = TaskRunMode(default_value=TaskRunMode.Live.name) RUNTIME_FIELD_TO_CFG_MAP = { @@ -1503,9 +1529,9 @@ class RuntimeConfiguration(String): class BroadcastMode(graphene.Enum): - Set = 'put_broadcast' - Clear = 'clear_broadcast' - Expire = 'expire_broadcast' + Set = cast('Enum', 'put_broadcast') + Clear = cast('Enum', 'clear_broadcast') + Expire = cast('Enum', 'expire_broadcast') @property def description(self): @@ -1630,10 +1656,10 @@ class WorkflowStopMode(graphene.Enum): # * Graphene requires special enums. # * We only want to offer a subset of stop modes (REQUEST_* only). - Clean = StopMode.REQUEST_CLEAN.value # type: graphene.Enum - Kill = StopMode.REQUEST_KILL.value # type: graphene.Enum - Now = StopMode.REQUEST_NOW.value # type: graphene.Enum - NowNow = StopMode.REQUEST_NOW_NOW.value # type: graphene.Enum + Clean = cast('Enum', StopMode.REQUEST_CLEAN.value) + Kill = cast('Enum', StopMode.REQUEST_KILL.value) + Now = cast('Enum', StopMode.REQUEST_NOW.value) + NowNow = cast('Enum', StopMode.REQUEST_NOW_NOW.value) @property def description(self): @@ -1690,7 +1716,7 @@ class Arguments: mode = BroadcastMode( # use the enum name as the default value # https://github.com/graphql-python/graphql-core-legacy/issues/166 - default_value=BroadcastMode.Set.name, # type: ignore + default_value=BroadcastMode.Set.name, description='What type of broadcast is this?', required=True ) diff --git a/cylc/flow/platforms.py b/cylc/flow/platforms.py index d06c84ade92..fa49e598ec2 100644 --- a/cylc/flow/platforms.py +++ b/cylc/flow/platforms.py @@ -31,6 +31,7 @@ PlatformLookupError, CylcError, NoHostsError, NoPlatformsError) from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.hostuserutil import is_remote_host +from cylc.flow.task_state import RunMode if TYPE_CHECKING: from cylc.flow.parsec.OrderedDict import OrderedDictWithDefaults @@ -265,6 +266,10 @@ def platform_from_name( platform_data['name'] = platform_name return platform_data + # If platform name in run mode and not otherwise defined: + if platform_name in RunMode.JOBLESS_MODES.value: + return platforms['localhost'] + raise PlatformLookupError( f"No matching platform \"{platform_name}\" found") @@ -647,7 +652,7 @@ def get_install_target_to_platforms_map( Return {install_target_1: [platform_1_dict, platform_2_dict, ...], ...} """ ret: Dict[str, List[Dict[str, Any]]] = {} - for p_name in set(platform_names): + for p_name in set(platform_names) - set(RunMode.JOBLESS_MODES.value): try: platform = platform_from_name(p_name) except PlatformLookupError as exc: @@ -656,6 +661,14 @@ def get_install_target_to_platforms_map( else: install_target = get_install_target_from_platform(platform) ret.setdefault(install_target, []).append(platform) + + # Map jobless modes to localhost. + if 'localhost' in ret: + ret['localhost'] += [ + {'name': mode} for mode in RunMode.JOBLESS_MODES.value] + else: + ret['localhost'] = [ + {'name': mode} for mode in RunMode.JOBLESS_MODES.value] return ret diff --git a/cylc/flow/prerequisite.py b/cylc/flow/prerequisite.py index 486c7e84ab3..04ea4596c09 100644 --- a/cylc/flow/prerequisite.py +++ b/cylc/flow/prerequisite.py @@ -72,6 +72,8 @@ def coerce(tuple_: AnyPrereqMessage) -> 'PrereqMessage': SatisfiedState = Literal[ 'satisfied naturally', 'satisfied from database', + 'satisfied by skip mode', + 'satisfied by simulation mode', 'force satisfied', False ] @@ -101,6 +103,12 @@ class Prerequisite: SATISFIED_TEMPLATE = 'bool(self._satisfied[("%s", "%s", "%s")])' MESSAGE_TEMPLATE = r'%s/%s %s' + DEP_STATE_SATISFIED: SatisfiedState = 'satisfied naturally' + DEP_STATE_SATISFIED_BY = 'satisfied by {} mode' + DEP_STATE_OVERRIDDEN = 'force satisfied' + DEP_STATE_UNSATISFIED = False + SATISFIED_MODE_RE = re.compile(r'satisfied by .* mode') + def __init__(self, point: 'PointBase'): # The cycle point to which this prerequisite belongs. # cylc.flow.cycling.PointBase @@ -253,13 +261,22 @@ def _eval_satisfied(self) -> bool: ) from None return res - def satisfy_me(self, outputs: Iterable['Tokens']) -> 'Set[Tokens]': + def satisfy_me( + self, outputs: Iterable['Tokens'], + mode: Literal['skip', 'live', 'simulation', 'skip'] = 'live' + ) -> 'Set[Tokens]': """Attempt to satisfy me with given outputs. Updates cache with the result. Return outputs that match. """ + satisfied_message: SatisfiedState + if mode != 'live': + satisfied_message = self.DEP_STATE_SATISFIED_BY.format( + mode) # type: ignore + else: + satisfied_message = self.DEP_STATE_SATISFIED valid = set() for output in outputs: prereq = PrereqMessage( @@ -268,7 +285,7 @@ def satisfy_me(self, outputs: Iterable['Tokens']) -> 'Set[Tokens]': if prereq not in self._satisfied: continue valid.add(output) - self[prereq] = 'satisfied naturally' + self[prereq] = satisfied_message return valid def api_dump(self) -> Optional[PbPrerequisite]: diff --git a/cylc/flow/run_modes/dummy.py b/cylc/flow/run_modes/dummy.py new file mode 100644 index 00000000000..91935ee5c3b --- /dev/null +++ b/cylc/flow/run_modes/dummy.py @@ -0,0 +1,125 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Utilities supporting dummy mode. + +Dummy mode shares settings with simulation mode. +""" + +from typing import TYPE_CHECKING, Any, Dict, Tuple + +from cylc.flow.run_modes.simulation import ( + ModeSettings, + disable_platforms, + get_simulated_run_len, + parse_fail_cycle_points +) +from cylc.flow.task_state import RunMode +from cylc.flow.platforms import get_platform + + +if TYPE_CHECKING: + from cylc.flow.task_job_mgr import TaskJobManager + from cylc.flow.task_proxy import TaskProxy + from typing_extensions import Literal + + +CLEAR_THESE_SCRIPTS = [ + 'init-script', + 'env-script', + 'pre-script', + 'post-script', + 'err-script', + 'exit-script', +] + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict[str, Any], + workflow: str, + now: Tuple[float, str] +) -> 'Literal[False]': + """Submit a task in dummy mode. + + Returns: + False indicating that TaskJobManager needs to continue running the + live mode path. + """ + configure_dummy_mode( + rtconfig, itask.tdef.rtconfig['simulation']['fail cycle points']) + + itask.summary['started_time'] = now[0] + task_job_mgr._set_retry_timers(itask, rtconfig) + + itask.mode_settings = ModeSettings( + itask, + task_job_mgr.workflow_db_mgr, + rtconfig + ) + + itask.platform = get_platform() + itask.platform['name'] = RunMode.DUMMY.value + itask.summary['job_runner_name'] = RunMode.DUMMY.value + itask.summary[task_job_mgr.KEY_EXECUTE_TIME_LIMIT] = ( + itask.mode_settings.simulated_run_length) + itask.jobs.append( + task_job_mgr.get_simulation_job_conf(itask, workflow)) + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'try_num': itask.get_try_num(), + } + ) + + return False + + +def configure_dummy_mode(rtc: Dict[str, Any], fallback: str) -> None: + """Adjust task defs for dummy mode. + """ + rtc['submission retry delays'] = [1] + # Generate dummy scripting. + + for script in CLEAR_THESE_SCRIPTS: + rtc[script] = '' + + rtc['script'] = build_dummy_script( + rtc, get_simulated_run_len(rtc)) + disable_platforms(rtc) + # Disable environment, in case it depends on env-script. + rtc['environment'] = {} + rtc["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtc["simulation"]["fail cycle points"], fallback + ) + + +def build_dummy_script(rtc: Dict[str, Any], sleep_sec: int) -> str: + """Create fake scripting for dummy mode script. + """ + script = "sleep %d" % sleep_sec + # Dummy message outputs. + for msg in rtc['outputs'].values(): + script += "\ncylc message '%s'" % msg + if rtc['simulation']['fail try 1 only']: + arg1 = "true" + else: + arg1 = "false" + arg2 = " ".join(rtc['simulation']['fail cycle points']) + script += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) + return script diff --git a/cylc/flow/run_modes/nonlive.py b/cylc/flow/run_modes/nonlive.py new file mode 100644 index 00000000000..5bea9f70be5 --- /dev/null +++ b/cylc/flow/run_modes/nonlive.py @@ -0,0 +1,55 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Utilities supporting all nonlive modes +""" +from typing import TYPE_CHECKING, Dict, List + +from cylc.flow import LOG +from cylc.flow.run_modes.skip import check_task_skip_config +from cylc.flow.task_state import RunMode + +if TYPE_CHECKING: + from cylc.flow.taskdef import TaskDef + + +def run_mode_validate_checks(taskdefs: 'Dict[str, TaskDef]') -> None: + """Warn user if any tasks have "run mode" set to skip. + """ + warn_nonlive: Dict[str, List[str]] = { + RunMode.SKIP.value: [], + } + + # Run through taskdefs looking for those with nonlive modes + for taskdef in taskdefs.values(): + # Add to list of tasks to be run in non-live modes: + if ( + taskdef.rtconfig.get('run mode', None) + in { + RunMode.SIMULATION.value, + RunMode.SKIP.value, + RunMode.DUMMY.value + } + ): + warn_nonlive[taskdef.rtconfig['run mode']].append(taskdef.name) + + # Run any mode specific validation checks: + check_task_skip_config(taskdef) + + if any(warn_nonlive.values()): + message = 'The following tasks are set to run in skip mode:' + for taskname in warn_nonlive[RunMode.SKIP.value]: + message += f'\n * {taskname}' + LOG.warning(message) diff --git a/cylc/flow/simulation.py b/cylc/flow/run_modes/simulation.py similarity index 62% rename from cylc/flow/simulation.py rename to cylc/flow/run_modes/simulation.py index 8ec4d279cb9..122277bcf4c 100644 --- a/cylc/flow/simulation.py +++ b/cylc/flow/run_modes/simulation.py @@ -13,40 +13,100 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -"""Utilities supporting simulation and skip modes +"""Utilities supporting simulation mode """ from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from logging import INFO +from typing import ( + TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union) from time import time from metomi.isodatetime.parsers import DurationParser from cylc.flow import LOG +from cylc.flow.cycling import PointBase from cylc.flow.cycling.loader import get_point from cylc.flow.exceptions import PointParsingError from cylc.flow.platforms import FORBIDDEN_WITH_PLATFORM +from cylc.flow.task_outputs import TASK_OUTPUT_SUBMITTED from cylc.flow.task_state import ( TASK_STATUS_RUNNING, TASK_STATUS_FAILED, TASK_STATUS_SUCCEEDED, ) from cylc.flow.wallclock import get_unix_time_from_time_string -from cylc.flow.workflow_status import RunMode +from cylc.flow.task_state import RunMode if TYPE_CHECKING: from cylc.flow.task_events_mgr import TaskEventsManager + from cylc.flow.task_job_mgr import TaskJobManager from cylc.flow.task_proxy import TaskProxy from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager - from cylc.flow.cycling import PointBase + from typing_extensions import Literal + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict[str, Any], + workflow: str, + now: Tuple[float, str] +) -> 'Literal[True]': + """Submit a task in simulation mode. + + Returns: + True - indicating that TaskJobManager need take no further action. + """ + configure_sim_mode( + rtconfig, + itask.tdef.rtconfig['simulation']['fail cycle points']) + itask.summary['started_time'] = now[0] + task_job_mgr._set_retry_timers(itask, rtconfig) + itask.mode_settings = ModeSettings( + itask, + task_job_mgr.workflow_db_mgr, + rtconfig + ) + itask.waiting_on_job_prep = False + itask.submit_num += 1 + + itask.platform = { + 'name': RunMode.SIMULATION.value, 'install target': 'localhost'} + itask.platform['name'] = RunMode.SIMULATION.value + itask.summary['job_runner_name'] = RunMode.SIMULATION.value + itask.summary[task_job_mgr.KEY_EXECUTE_TIME_LIMIT] = ( + itask.mode_settings.simulated_run_length + ) + itask.jobs.append( + task_job_mgr.get_simulation_job_conf(itask, workflow) + ) + task_job_mgr.task_events_mgr.process_message( + itask, INFO, TASK_OUTPUT_SUBMITTED, + ) + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'try_num': itask.get_try_num(), + 'flow_nums': str(list(itask.flow_nums)), + 'is_manual_submit': itask.is_manual_submit, + 'job_runner_name': RunMode.SIMULATION.value, + 'platform_name': RunMode.SIMULATION.value, + 'submit_status': 0 # Submission has succeeded + } + ) + itask.state.status = TASK_STATUS_RUNNING + return True @dataclass class ModeSettings: """A store of state for simulation modes. - Used instead of modifying the runtime config. + Used instead of modifying the runtime config. We want to leave the + config unchanged so that clearing a broadcast change of run mode + clears the run mode settings. Args: itask: @@ -79,20 +139,18 @@ def __init__( db_mgr: 'WorkflowDatabaseManager', rtconfig: Dict[str, Any] ): - # itask.summary['started_time'] and mode_settings.timeout need # repopulating from the DB on workflow restart: started_time = itask.summary['started_time'] try_num = None if started_time is None: - # Get DB info + # This is a restart - Get DB info db_info = db_mgr.pri_dao.select_task_job( itask.tokens['cycle'], itask.tokens['task'], itask.tokens['job'], ) - # Get the started time: if db_info['time_submit']: started_time = get_unix_time_from_time_string( db_info["time_submit"]) @@ -100,28 +158,20 @@ def __init__( else: started_time = time() - # Get the try number: try_num = db_info["try_num"] # Parse fail cycle points: - if rtconfig != itask.tdef.rtconfig: - try: - rtconfig["simulation"][ - "fail cycle points" - ] = parse_fail_cycle_points( - rtconfig["simulation"]["fail cycle points"] - ) - except PointParsingError as exc: - # Broadcast Fail CP didn't parse - LOG.warning( - 'Broadcast fail cycle point was invalid:\n' - f' {exc.args[0]}' - ) - rtconfig['simulation'][ - 'fail cycle points' - ] = itask.tdef.rtconfig['simulation']['fail cycle points'] + if not rtconfig: + rtconfig = itask.tdef.rtconfig + if rtconfig and rtconfig != itask.tdef.rtconfig: + rtconfig["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtconfig["simulation"]["fail cycle points"], + itask.tdef.rtconfig['simulation']['fail cycle points'] + ) - # Calculate simulation info: + # Calculate simulation outcome and run-time: self.simulated_run_length = ( get_simulated_run_len(rtconfig)) self.sim_task_fails = sim_task_failed( @@ -132,37 +182,39 @@ def __init__( self.timeout = started_time + self.simulated_run_length -def configure_sim_modes(taskdefs, sim_mode): - """Adjust task defs for simulation and dummy mode. - +def configure_sim_mode(rtc, fallback): + """Adjust task defs for simulation mode. + + Example: + >>> this = configure_sim_mode + >>> rtc = { + ... 'submission retry delays': [42, 24, 23], + ... 'environment': {'DoNot': '"WantThis"'}, + ... 'simulation': {'fail cycle points': ['all']} + ... } + >>> this(rtc, [53]) + >>> rtc['submission retry delays'] + [1] + >>> rtc['environment'] + {} + >>> rtc['simulation'] + {'fail cycle points': None} + >>> rtc['platform'] + 'localhost' """ - dummy_mode = (sim_mode == RunMode.DUMMY) - - for tdef in taskdefs: - # Compute simulated run time by scaling the execution limit. - rtc = tdef.rtconfig - - rtc['submission retry delays'] = [1] + rtc['submission retry delays'] = [1] - if dummy_mode: - # Generate dummy scripting. - rtc['init-script'] = "" - rtc['env-script'] = "" - rtc['pre-script'] = "" - rtc['post-script'] = "" - rtc['script'] = build_dummy_script( - rtc, get_simulated_run_len(rtc)) + disable_platforms(rtc) - disable_platforms(rtc) + # Disable environment, in case it depends on env-script. + rtc['environment'] = {} - # Disable environment, in case it depends on env-script. - rtc['environment'] = {} - - rtc["simulation"][ - "fail cycle points" - ] = parse_fail_cycle_points( - rtc["simulation"]["fail cycle points"] - ) + rtc["simulation"][ + "fail cycle points" + ] = parse_fail_cycle_points( + rtc["simulation"]["fail cycle points"], + fallback + ) def get_simulated_run_len(rtc: Dict[str, Any]) -> int: @@ -184,24 +236,6 @@ def get_simulated_run_len(rtc: Dict[str, Any]) -> int: return sleep_sec -def build_dummy_script(rtc: Dict[str, Any], sleep_sec: int) -> str: - """Create fake scripting for dummy mode. - - This is for Dummy mode only. - """ - script = "sleep %d" % sleep_sec - # Dummy message outputs. - for msg in rtc['outputs'].values(): - script += "\ncylc message '%s'" % msg - if rtc['simulation']['fail try 1 only']: - arg1 = "true" - else: - arg1 = "false" - arg2 = " ".join(rtc['simulation']['fail cycle points']) - script += "\ncylc__job__dummy_result %s %s || exit 1" % (arg1, arg2) - return script - - def disable_platforms( rtc: Dict[str, Any] ) -> None: @@ -222,7 +256,7 @@ def disable_platforms( def parse_fail_cycle_points( - f_pts_orig: List[str] + f_pts_orig: List[str], fallback ) -> 'Union[None, List[PointBase]]': """Parse `[simulation][fail cycle points]`. @@ -231,11 +265,11 @@ def parse_fail_cycle_points( Examples: >>> this = parse_fail_cycle_points - >>> this(['all']) is None + >>> this(['all'], ['42']) is None True - >>> this([]) + >>> this([], ['42']) [] - >>> this(None) is None + >>> this(None, ['42']) is None True """ f_pts: 'Optional[List[PointBase]]' = [] @@ -247,7 +281,16 @@ def parse_fail_cycle_points( elif f_pts_orig: f_pts = [] for point_str in f_pts_orig: - f_pts.append(get_point(point_str).standardise()) + if isinstance(point_str, PointBase): + f_pts.append(point_str) + else: + try: + f_pts.append(get_point(point_str).standardise()) + except PointParsingError: + LOG.warning( + f'Invalid ISO 8601 date representation: {point_str}' + ) + return fallback return f_pts @@ -266,13 +309,19 @@ def sim_time_check( now = time() sim_task_state_changed: bool = False for itask in itasks: - if itask.state.status != TASK_STATUS_RUNNING: + if ( + itask.state.status != TASK_STATUS_RUNNING + or itask.run_mode and itask.run_mode != RunMode.SIMULATION.value + ): continue # This occurs if the workflow has been restarted. if itask.mode_settings is None: rtconfig = task_events_manager.broadcast_mgr.get_updated_rtconfig( itask) + rtconfig = configure_sim_mode( + rtconfig, + itask.tdef.rtconfig['simulation']['fail cycle points']) itask.mode_settings = ModeSettings( itask, db_mgr, diff --git a/cylc/flow/run_modes/skip.py b/cylc/flow/run_modes/skip.py new file mode 100644 index 00000000000..960301bfabc --- /dev/null +++ b/cylc/flow/run_modes/skip.py @@ -0,0 +1,161 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Utilities supporting skip modes +""" +from logging import INFO +from typing import ( + TYPE_CHECKING, Dict, List, Tuple) + +from cylc.flow.exceptions import WorkflowConfigError +from cylc.flow.task_outputs import ( + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_STARTED +) +from cylc.flow.task_state import RunMode + +if TYPE_CHECKING: + from cylc.flow.taskdef import TaskDef + from cylc.flow.task_job_mgr import TaskJobManager + from cylc.flow.task_proxy import TaskProxy + from typing_extensions import Literal + + +def submit_task_job( + task_job_mgr: 'TaskJobManager', + itask: 'TaskProxy', + rtconfig: Dict, + now: Tuple[float, str] +) -> 'Literal[True]': + """Submit a task in skip mode. + + Returns: + True - indicating that TaskJobManager need take no further action. + """ + # Don't do anything if task is held: + if itask.state.is_held: + return True + + task_job_mgr._set_retry_timers(itask, rtconfig) + itask.summary['started_time'] = now[0] + itask.waiting_on_job_prep = False + itask.submit_num += 1 + + itask.platform = { + 'name': RunMode.SKIP.value, + 'install target': 'localhost', + 'hosts': ['localhost'], + 'disable task event handlers': + rtconfig['skip']['disable task event handlers'], + 'execution polling intervals': [] + } + itask.platform['name'] = RunMode.SKIP.value + itask.summary['job_runner_name'] = RunMode.SKIP.value + itask.run_mode = RunMode.SKIP.value + task_job_mgr.workflow_db_mgr.put_insert_task_jobs( + itask, { + 'time_submit': now[1], + 'try_num': itask.get_try_num(), + 'flow_nums': str(list(itask.flow_nums)), + 'is_manual_submit': itask.is_manual_submit, + 'job_runner_name': RunMode.SIMULATION.value, + 'platform_name': RunMode.SIMULATION.value, + 'submit_status': 0 # Submission has succeeded + } + ) + for output in process_outputs(itask, rtconfig): + task_job_mgr.task_events_mgr.process_message(itask, INFO, output) + + return True + + +def process_outputs(itask: 'TaskProxy', rtconfig: Dict) -> List[str]: + """Process Skip Mode Outputs: + + * By default, all required outputs will be generated plus succeeded + if success is optional. + * The outputs submitted and started are always produced and do not + need to be defined in outputs. + * If outputs is specified and does not include either + succeeded or failed then succeeded will be produced. + + Return: + A list of outputs to emit. + + """ + # Always produce `submitted` & `started` outputs first: + result: List[str] = [TASK_OUTPUT_SUBMITTED, TASK_OUTPUT_STARTED] + + conf_outputs = list(rtconfig['skip']['outputs']) + + # Send the rest of our outputs, unless they are succeeded or failed, + # which we hold back, to prevent warnings about pre-requisites being + # unmet being shown because a "finished" output happens to come first. + for message in itask.state.outputs.iter_required_messages( + exclude=( + TASK_OUTPUT_SUCCEEDED if TASK_OUTPUT_FAILED + in conf_outputs else TASK_OUTPUT_FAILED + ) + ): + trigger = itask.state.outputs._message_to_trigger[message] + # Send message unless it be succeeded/failed. + if ( + trigger not in { + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, + TASK_OUTPUT_SUBMITTED, + TASK_OUTPUT_STARTED, + } + and (not conf_outputs or trigger in conf_outputs) + ): + result.append(message) + + # Add optional outputs specified in skip settings: + for message, trigger in itask.state.outputs._message_to_trigger.items(): + if trigger in conf_outputs and trigger not in result: + result.append(message) + + # Send succeeded/failed last. + if TASK_OUTPUT_FAILED in conf_outputs: + result.append(TASK_OUTPUT_FAILED) + elif TASK_OUTPUT_SUCCEEDED and TASK_OUTPUT_SUCCEEDED not in result: + result.append(TASK_OUTPUT_SUCCEEDED) + + return result + + +def check_task_skip_config(tdef: 'TaskDef') -> None: + """Validate Skip Mode configuration. + + Raises: + * Error if outputs include succeeded and failed. + """ + skip_config = tdef.rtconfig.get('skip', {}) + if not skip_config: + return + skip_outputs = skip_config.get('outputs', {}) + if not skip_outputs: + return + + # Error if outputs include succeded and failed: + if ( + TASK_OUTPUT_SUCCEEDED in skip_outputs + and TASK_OUTPUT_FAILED in skip_outputs + ): + raise WorkflowConfigError( + f'Skip mode settings for task {tdef.name} has' + ' mutually exclusive outputs: succeeded AND failed.') diff --git a/cylc/flow/scheduler.py b/cylc/flow/scheduler.py index 92702b0b55e..133d430fff2 100644 --- a/cylc/flow/scheduler.py +++ b/cylc/flow/scheduler.py @@ -107,8 +107,13 @@ ) from cylc.flow.profiler import Profiler from cylc.flow.resources import get_resources -from cylc.flow.simulation import sim_time_check +from cylc.flow.run_modes.simulation import sim_time_check from cylc.flow.subprocpool import SubProcPool +from cylc.flow.templatevars import eval_var +from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager +from cylc.flow.workflow_events import WorkflowEventHandler +from cylc.flow.workflow_status import StopMode, AutoRestartMode +from cylc.flow.taskdef import TaskDef from cylc.flow.task_events_mgr import TaskEventsManager from cylc.flow.task_job_mgr import TaskJobManager from cylc.flow.task_pool import TaskPool @@ -127,9 +132,7 @@ TASK_STATUS_RUNNING, TASK_STATUS_SUBMITTED, TASK_STATUS_WAITING, -) -from cylc.flow.taskdef import TaskDef -from cylc.flow.templatevars import eval_var + RunMode) from cylc.flow.templatevars import get_template_vars from cylc.flow.timer import Timer from cylc.flow.util import cli_format @@ -138,9 +141,6 @@ get_time_string_from_unix_time as time2str, get_utc_mode, ) -from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager -from cylc.flow.workflow_events import WorkflowEventHandler -from cylc.flow.workflow_status import AutoRestartMode, RunMode, StopMode from cylc.flow.xtrigger_mgr import XtriggerManager if TYPE_CHECKING: @@ -1128,7 +1128,7 @@ def _set_workflow_params( LOG.info('LOADING workflow parameters') for key, value in params: if key == self.workflow_db_mgr.KEY_RUN_MODE: - self.options.run_mode = value or RunMode.LIVE + self.options.run_mode = value or RunMode.LIVE.value LOG.info(f"+ run mode = {value}") if value is None: continue @@ -1193,9 +1193,9 @@ def _load_template_vars(self, _, row): def run_event_handlers(self, event, reason=""): """Run a workflow event handler. - Run workflow events in simulation and dummy mode ONLY if enabled. + Run workflow events only in live mode or skip mode. """ - if self.get_run_mode() in {RunMode.SIMULATION, RunMode.DUMMY}: + if self.get_run_mode() in RunMode.NON_OVERRIDABLE_MODES.value: return self.workflow_event_handler.handle(self, event, str(reason)) @@ -1269,7 +1269,7 @@ def release_queued_tasks(self) -> bool: pre_prep_tasks, self.server.curve_auth, self.server.client_pub_key_dir, - is_simulation=(self.get_run_mode() == RunMode.SIMULATION) + run_mode=self.get_run_mode() ): if itask.flow_nums: flow = ','.join(str(i) for i in itask.flow_nums) @@ -1320,7 +1320,7 @@ def timeout_check(self): """Check workflow and task timers.""" self.check_workflow_timers() # check submission and execution timeout and polling timers - if self.get_run_mode() != RunMode.SIMULATION: + if self.get_run_mode() != RunMode.SIMULATION.value: self.task_job_mgr.check_task_jobs(self.workflow, self.pool) async def workflow_shutdown(self): @@ -1516,12 +1516,10 @@ async def _main_loop(self) -> None: if self.xtrigger_mgr.do_housekeeping: self.xtrigger_mgr.housekeep(self.pool.get_tasks()) - self.pool.clock_expire_tasks() self.release_queued_tasks() - if ( - self.get_run_mode() == RunMode.SIMULATION + self.options.run_mode == RunMode.SIMULATION.value and sim_time_check( self.task_events_mgr, self.pool.get_tasks(), diff --git a/cylc/flow/scheduler_cli.py b/cylc/flow/scheduler_cli.py index 390eba52338..ec450e4cad4 100644 --- a/cylc/flow/scheduler_cli.py +++ b/cylc/flow/scheduler_cli.py @@ -54,6 +54,7 @@ from cylc.flow.remote import cylc_server_cmd from cylc.flow.scheduler import Scheduler, SchedulerError from cylc.flow.scripts.common import cylc_header +from cylc.flow.task_state import RunMode from cylc.flow.workflow_db_mgr import WorkflowDatabaseManager from cylc.flow.workflow_files import ( SUITERC_DEPR_MSG, @@ -65,7 +66,6 @@ is_terminal, prompt, ) -from cylc.flow.workflow_status import RunMode if TYPE_CHECKING: from optparse import Values @@ -129,9 +129,15 @@ RUN_MODE = OptionSettings( ["-m", "--mode"], - help="Run mode: live, dummy, simulation (default live).", + help=( + f"Run mode: {RunMode.WORKFLOW_MODES.value} (default live)." + " Live mode executes the tasks as defined in the runtime section." + " Simulation, skip and dummy modes ignore part of tasks'" + " runtime configurations. Simulation and dummy modes are" + " designed for testing, and skip mode is for flow control." + ), metavar="STRING", action='store', dest="run_mode", - choices=[RunMode.LIVE, RunMode.DUMMY, RunMode.SIMULATION], + choices=list(RunMode.WORKFLOW_MODES.value), ) PLAY_RUN_MODE = deepcopy(RUN_MODE) diff --git a/cylc/flow/scripts/lint.py b/cylc/flow/scripts/lint.py index 7509c10a927..e26c7d00dd0 100755 --- a/cylc/flow/scripts/lint.py +++ b/cylc/flow/scripts/lint.py @@ -96,6 +96,10 @@ ) from cylc.flow.parsec.config import ParsecConfig from cylc.flow.scripts.cylc import DEAD_ENDS +from cylc.flow.task_outputs import ( + TASK_OUTPUT_SUCCEEDED, + TASK_OUTPUT_FAILED, +) from cylc.flow.terminal import cli_function @@ -375,6 +379,38 @@ def check_for_deprecated_task_event_template_vars( return None +BAD_SKIP_OUTS = re.compile(r'outputs\s*=\s*(.*)') + + +def check_skip_mode_outputs(line: str) -> Dict: + """Ensure skip mode output setting doesn't include: + + * succeeded _and_ failed: Mutually exclusive. + * submitted and started: These are emitted by skip mode anyway. + + n.b. + + This should be separable from ``[[outputs]]`` because it's a key + value pair not a section heading. + + Examples: + >>> this = check_skip_mode_outputs + >>> this('outputs = succeeded, failed') + {'description': 'are ... together', 'outputs': 'failed...succeeded'} + """ + + outputs = BAD_SKIP_OUTS.findall(line) + if outputs: + outputs = [i.strip() for i in outputs[0].split(',')] + if TASK_OUTPUT_FAILED in outputs and TASK_OUTPUT_SUCCEEDED in outputs: + return { + 'description': + 'are mutually exclusive and cannot be used together', + 'outputs': f'{TASK_OUTPUT_FAILED} and {TASK_OUTPUT_SUCCEEDED}' + } + return {} + + INDENTATION = re.compile(r'^(\s*)(.*)') @@ -620,7 +656,15 @@ def list_wrapper(line: str, check: Callable) -> Optional[Dict[str, str]]: ' directive can make your workflow more portable.' ), FUNCTION: check_wallclock_directives, - } + }, + 'S015': { + 'short': 'Task outputs {outputs}: {description}.', + FUNCTION: check_skip_mode_outputs + }, + 'S016': { + 'short': 'Run mode is not live: This task will only appear to run.', + FUNCTION: re.compile(r'run mode\s*=\s*[^l][^i][^v][^e]$').findall + }, } # Subset of deprecations which are tricky (impossible?) to scrape from the # upgrader. diff --git a/cylc/flow/scripts/set.py b/cylc/flow/scripts/set.py index b64cf74aba0..888ba20890e 100755 --- a/cylc/flow/scripts/set.py +++ b/cylc/flow/scripts/set.py @@ -65,6 +65,9 @@ # complete the succeeded output of 3/bar: $ cylc set --out=succeeded my_workflow//3/bar + # complete the outputs defined in [runtime][task][skip] + $ cylc set --out=skip my_workflow//3/bar + # satisfy the 3/foo:succeeded prerequisite of 3/bar: $ cylc set --pre=3/foo my_workflow//3/bar # or: @@ -154,8 +157,10 @@ def get_option_parser() -> COP: "-o", "--out", "--output", metavar="OUTPUT(s)", help=( "Complete task outputs. For multiple outputs re-use the" - " option, or give a comma-separated list of outputs, or" - ' use "--out=required" to complete all required outputs.' + " option, or give a comma-separated list of outputs." + ' Use "--out=required" to complete all required outputs.' + ' Use "--out=skip" to complete outputs defined in the tasks.' + ' [skip] configuration.' " OUTPUT format: trigger names as used in the graph." ), action="append", default=None, dest="outputs" diff --git a/cylc/flow/scripts/validate.py b/cylc/flow/scripts/validate.py index bd3e6098906..9e4f8f7cb89 100755 --- a/cylc/flow/scripts/validate.py +++ b/cylc/flow/scripts/validate.py @@ -54,15 +54,12 @@ from cylc.flow.task_proxy import TaskProxy from cylc.flow.templatevars import get_template_vars from cylc.flow.terminal import cli_function -from cylc.flow.scheduler_cli import RUN_MODE -from cylc.flow.workflow_status import RunMode +from cylc.flow.task_state import RunMode if TYPE_CHECKING: from cylc.flow.option_parsers import Values -VALIDATE_RUN_MODE = deepcopy(RUN_MODE) -VALIDATE_RUN_MODE.sources = {'validate'} VALIDATE_ICP_OPTION = deepcopy(ICP_OPTION) VALIDATE_ICP_OPTION.sources = {'validate'} VALIDATE_AGAINST_SOURCE_OPTION = deepcopy(AGAINST_SOURCE_OPTION) @@ -98,7 +95,6 @@ dest="profile_mode", sources={'validate'} ), - VALIDATE_RUN_MODE, VALIDATE_ICP_OPTION, ] @@ -128,7 +124,7 @@ def get_option_parser(): { 'check_circular': False, 'profile_mode': False, - 'run_mode': RunMode.LIVE + 'run_mode': RunMode.LIVE.value } ) diff --git a/cylc/flow/task_events_mgr.py b/cylc/flow/task_events_mgr.py index f9b4d4c3243..5bf6ccae66a 100644 --- a/cylc/flow/task_events_mgr.py +++ b/cylc/flow/task_events_mgr.py @@ -78,7 +78,8 @@ TASK_STATUS_FAILED, TASK_STATUS_EXPIRED, TASK_STATUS_SUCCEEDED, - TASK_STATUS_WAITING + TASK_STATUS_WAITING, + RunMode, ) from cylc.flow.task_outputs import ( TASK_OUTPUT_EXPIRED, @@ -98,7 +99,6 @@ get_template_variables as get_workflow_template_variables, process_mail_footer, ) -from cylc.flow.workflow_status import RunMode if TYPE_CHECKING: @@ -770,7 +770,7 @@ def process_message( # ... but either way update the job ID in the job proxy (it only # comes in via the submission message). - if itask.tdef.run_mode != RunMode.SIMULATION: + if itask.run_mode != RunMode.SIMULATION.value: job_tokens = itask.tokens.duplicate( job=str(itask.submit_num) ) @@ -893,7 +893,7 @@ def _process_message_check( if ( itask.state(TASK_STATUS_WAITING) # Polling in live mode only: - and itask.tdef.run_mode == RunMode.LIVE + and itask.run_mode == RunMode.LIVE.value and ( ( # task has a submit-retry lined up @@ -938,7 +938,7 @@ def _process_message_check( def setup_event_handlers(self, itask, event, message): """Set up handlers for a task event.""" - if itask.tdef.run_mode != RunMode.LIVE: + if RunMode.disable_task_event_handlers(itask): return msg = "" if message != f"job {event}": @@ -1383,8 +1383,12 @@ def _process_message_succeeded(self, itask, event_time, forced): "run_status": 0, "time_run_exit": event_time, }) - # Update mean elapsed time only on task succeeded. - if itask.summary['started_time'] is not None: + # Update mean elapsed time only on task succeeded, + # and only if task is running in live mode: + if ( + itask.summary['started_time'] is not None + and itask.run_mode == RunMode.LIVE.value + ): itask.tdef.elapsed_times.append( itask.summary['finished_time'] - itask.summary['started_time']) @@ -1463,7 +1467,7 @@ def _process_message_submitted( ) itask.set_summary_time('submitted', event_time) - if itask.tdef.run_mode == RunMode.SIMULATION: + if itask.run_mode == RunMode.SIMULATION.value: # Simulate job started as well. itask.set_summary_time('started', event_time) if itask.state_reset(TASK_STATUS_RUNNING, forced=forced): @@ -1500,7 +1504,7 @@ def _process_message_submitted( 'submitted', event_time, ) - if itask.tdef.run_mode == RunMode.SIMULATION: + if itask.run_mode == RunMode.SIMULATION.value: # Simulate job started as well. self.data_store_mgr.delta_job_time( job_tokens, @@ -1533,7 +1537,11 @@ def _insert_task_job( # not see previous submissions (so can't use itask.jobs[submit_num-1]). # And transient tasks, used for setting outputs and spawning children, # do not submit jobs. - if (itask.tdef.run_mode == RunMode.SIMULATION) or forced: + if ( + not itask.run_mode + or itask.run_mode in RunMode.JOBLESS_MODES.value + or forced + ): job_conf = {"submit_num": itask.submit_num} else: job_conf = itask.jobs[-1] diff --git a/cylc/flow/task_job_mgr.py b/cylc/flow/task_job_mgr.py index 185966ff12d..192e57fc73e 100644 --- a/cylc/flow/task_job_mgr.py +++ b/cylc/flow/task_job_mgr.py @@ -35,7 +35,7 @@ ) from shutil import rmtree from time import time -from typing import TYPE_CHECKING, Any, Union, Optional +from typing import TYPE_CHECKING, Any, List, Tuple, Union, Optional from cylc.flow import LOG from cylc.flow.job_runner_mgr import JobPollContext @@ -63,7 +63,12 @@ get_platform, ) from cylc.flow.remote import construct_ssh_cmd -from cylc.flow.simulation import ModeSettings +from cylc.flow.run_modes.simulation import ( + submit_task_job as simulation_submit_task_job) +from cylc.flow.run_modes.skip import ( + submit_task_job as skip_submit_task_job) +from cylc.flow.run_modes.dummy import ( + submit_task_job as dummy_submit_task_job) from cylc.flow.subprocctx import SubProcContext from cylc.flow.subprocpool import SubProcPool from cylc.flow.task_action_timer import ( @@ -103,7 +108,8 @@ TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING, TASK_STATUS_WAITING, - TASK_STATUSES_ACTIVE + TASK_STATUSES_ACTIVE, + RunMode ) from cylc.flow.wallclock import ( get_current_time_string, @@ -247,7 +253,7 @@ def prep_submit_task_jobs(self, workflow, itasks, check_syntax=True): return [prepared_tasks, bad_tasks] def submit_task_jobs(self, workflow, itasks, curve_auth, - client_pub_key_dir, is_simulation=False): + client_pub_key_dir, run_mode='live'): """Prepare for job submission and submit task jobs. Preparation (host selection, remote host init, and remote install) @@ -262,8 +268,8 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, Return (list): list of tasks that attempted submission. """ - if is_simulation: - return self._simulation_submit_task_jobs(itasks, workflow) + itasks, nonlive_tasks = self._nonlive_submit_task_jobs( + itasks, workflow, run_mode) # Prepare tasks for job submission prepared_tasks, bad_tasks = self.prep_submit_task_jobs( @@ -272,9 +278,10 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, # Reset consumed host selection results self.task_remote_mgr.subshell_eval_reset() - if not prepared_tasks: + if not prepared_tasks and not nonlive_tasks: return bad_tasks - + elif not prepared_tasks: + return nonlive_tasks auth_itasks = {} # {platform: [itask, ...], ...} for itask in prepared_tasks: @@ -282,8 +289,7 @@ def submit_task_jobs(self, workflow, itasks, curve_auth, auth_itasks.setdefault(platform_name, []) auth_itasks[platform_name].append(itask) # Submit task jobs for each platform - # Non-prepared tasks can be considered done for now: - done_tasks = bad_tasks + done_tasks = bad_tasks + nonlive_tasks for _, itasks in sorted(auth_itasks.items()): # Find the first platform where >1 host has not been tried and @@ -997,44 +1003,74 @@ def _set_retry_timers( except KeyError: itask.try_timers[key] = TaskActionTimer(delays=delays) - def _simulation_submit_task_jobs(self, itasks, workflow): - """Simulation mode task jobs submission.""" + def _nonlive_submit_task_jobs( + self: 'TaskJobManager', + itasks: 'List[TaskProxy]', + workflow: str, + workflow_run_mode: str, + ) -> 'Tuple[List[TaskProxy], List[TaskProxy]]': + """Identify task mode and carry out alternative submission + paths if required: + + * Simulation: Job submission. + * Skip: Entire job lifecycle happens here! + * Dummy: Pre-submission preparation (removing task scripts content) + before returning to live pathway. + * Live: return to main submission pathway without doing anything. + + Returns: + lively_tasks: + A list of tasks which require subsequent + processing **as if** they were live mode tasks. + (This includes live and dummy mode tasks) + nonlive_tasks: + A list of tasks which require no further processing + because their apparent execution is done entirely inside + the scheduler. (This includes skip and simulation mode tasks). + """ + lively_tasks: 'List[TaskProxy]' = [] + nonlive_tasks: 'List[TaskProxy]' = [] now = time() - now_str = get_time_string_from_unix_time(now) + now = (now, get_time_string_from_unix_time(now)) + for itask in itasks: - # Handle broadcasts + # Get task config with broadcasts applied: rtconfig = self.task_events_mgr.broadcast_mgr.get_updated_rtconfig( itask) - itask.summary['started_time'] = now - self._set_retry_timers(itask, rtconfig) - itask.mode_settings = ModeSettings( - itask, - self.workflow_db_mgr, - rtconfig - ) - - itask.waiting_on_job_prep = False - itask.submit_num += 1 + # Apply task run mode + if workflow_run_mode in RunMode.NON_OVERRIDABLE_MODES.value: + # Task run mode cannot override workflow run-mode sim or dummy: + run_mode = workflow_run_mode + else: + # If workflow mode is skip or live and task mode is set, + # override workflow mode, else use workflow mode. + run_mode = rtconfig.get('run mode', None) or workflow_run_mode + # Store the run mode of the this submission: + itask.run_mode = run_mode + + # Submit nonlive tasks, or add live-like (live or dummy) + # tasks to list of tasks to put through live + # submission pipeline - We decide based on the output + # of the submit method: + is_nonlive = False + if run_mode == RunMode.DUMMY.value: + is_nonlive = dummy_submit_task_job( + self, itask, rtconfig, workflow, now) + elif run_mode == RunMode.SIMULATION.value: + is_nonlive = simulation_submit_task_job( + self, itask, rtconfig, workflow, now) + elif run_mode == RunMode.SKIP.value: + is_nonlive = skip_submit_task_job( + self, itask, rtconfig, now) + + # Assign task to list: + if is_nonlive: + nonlive_tasks.append(itask) + else: + lively_tasks.append(itask) - itask.platform = {'name': 'SIMULATION'} - itask.summary['job_runner_name'] = 'SIMULATION' - itask.summary[self.KEY_EXECUTE_TIME_LIMIT] = ( - itask.mode_settings.simulated_run_length - ) - itask.jobs.append( - self.get_simulation_job_conf(itask, workflow) - ) - self.task_events_mgr.process_message( - itask, INFO, TASK_OUTPUT_SUBMITTED, - ) - self.workflow_db_mgr.put_insert_task_jobs( - itask, { - 'time_submit': now_str, - 'try_num': itask.get_try_num(), - } - ) - return itasks + return lively_tasks, nonlive_tasks def _submit_task_jobs_callback(self, ctx, workflow, itasks): """Callback when submit task jobs command exits.""" diff --git a/cylc/flow/task_outputs.py b/cylc/flow/task_outputs.py index 1af37e1554e..5fb5a934935 100644 --- a/cylc/flow/task_outputs.py +++ b/cylc/flow/task_outputs.py @@ -194,6 +194,7 @@ def get_completion_expression(tdef: 'TaskDef') -> str: def get_optional_outputs( expression: str, outputs: Iterable[str], + force_optional: "Optional[str]" = None ) -> Dict[str, Optional[bool]]: """Determine which outputs in an expression are optional. @@ -202,6 +203,8 @@ def get_optional_outputs( The completion expression. outputs: All outputs that apply to this task. + force_optional: + Don't have the CompletionEvaluator consider this output. Returns: dict: compvar: is_optional @@ -229,6 +232,14 @@ def get_optional_outputs( [('expired', True), ('failed', None), ('succeeded', False), ('x', False), ('y', False)] + >>> sorted(get_optional_outputs( + ... '(succeeded and towel) or (failed and bugblatter)', + ... {'succeeded', 'towel', 'failed', 'bugblatter'}, + ... 'failed' + ... ).items()) + [('bugblatter', True), ('failed', True), + ('succeeded', False), ('towel', False)] + """ # determine which triggers are used in the expression used_compvars = get_variable_names(expression) @@ -236,6 +247,9 @@ def get_optional_outputs( # all completion variables which could appear in the expression all_compvars = {trigger_to_completion_variable(out) for out in outputs} + # Allows exclusion of additional outcomes: + extra_excludes = {force_optional: False} if force_optional else {} + return { # output: is_optional # the outputs that are used in the expression **{ @@ -247,6 +261,7 @@ def get_optional_outputs( # (pre-conditions are considered separately) 'expired': False, 'submit_failed': False, + **extra_excludes }, ) for output in used_compvars @@ -609,16 +624,25 @@ def _is_compvar_complete(self, compvar: str) -> Optional[bool]: else: raise KeyError(compvar) - def iter_required_messages(self) -> Iterator[str]: + def iter_required_messages( + self, + exclude=None + ) -> Iterator[str]: """Yield task messages that are required for this task to be complete. Note, in some cases tasks might not have any required messages, e.g. "completion = succeeded or failed". + + Args: + exclude: Exclude one possible required messages, allowing + specification of all required outputs if succeeded or failed. """ for compvar, is_optional in get_optional_outputs( self._completion_expression, set(self._message_to_compvar.values()), + force_optional=exclude ).items(): + # breakpoint(header=f"=== {compvar=}, {is_optional=} ===") if is_optional is False: for message, _compvar in self._message_to_compvar.items(): if _compvar == compvar: diff --git a/cylc/flow/task_pool.py b/cylc/flow/task_pool.py index aeee7505e66..59c0a627299 100644 --- a/cylc/flow/task_pool.py +++ b/cylc/flow/task_pool.py @@ -53,6 +53,7 @@ from cylc.flow.task_id import TaskID from cylc.flow.task_proxy import TaskProxy from cylc.flow.task_state import ( + RunMode, TASK_STATUSES_ACTIVE, TASK_STATUSES_FINAL, TASK_STATUS_WAITING, @@ -70,6 +71,8 @@ ) from cylc.flow.wallclock import get_current_time_string from cylc.flow.platforms import get_platform +from cylc.flow.run_modes.skip import ( + process_outputs as get_skip_mode_outputs) from cylc.flow.task_outputs import ( TASK_OUTPUT_SUCCEEDED, TASK_OUTPUT_EXPIRED, @@ -1413,7 +1416,10 @@ def spawn_on_output(self, itask, output, forced=False): tasks = [c_task] for t in tasks: - t.satisfy_me([itask.tokens.duplicate(task_sel=output)]) + t.satisfy_me( + [itask.tokens.duplicate(task_sel=output)], + getattr(itask.tdef, 'run_mode', RunMode.LIVE.value) + ) self.data_store_mgr.delta_task_prerequisite(t) if not in_pool: self.add_to_pool(t) @@ -1537,7 +1543,8 @@ def spawn_on_all_outputs( continue if completed_only: c_task.satisfy_me( - [itask.tokens.duplicate(task_sel=message)] + [itask.tokens.duplicate(task_sel=message)], + itask.run_mode ) self.data_store_mgr.delta_task_prerequisite(c_task) self.add_to_pool(c_task) @@ -1856,7 +1863,8 @@ def _standardise_outputs( try: msg = tdef.outputs[output][0] except KeyError: - LOG.warning(f"output {point}/{tdef.name}:{output} not found") + LOG.warning( + f"output {point}/{tdef.name}:{output} not found") continue _outputs.append(msg) return _outputs @@ -1952,9 +1960,19 @@ def _set_outputs_itask( if not outputs: outputs = list(itask.state.outputs.iter_required_messages()) else: + # --out=skip is a shortcut to setting all the outputs that + # skip mode would. + skips = [] + if RunMode.SKIP.value in outputs: + # Check for broadcasts to task: + bc_mgr = self.task_events_mgr.broadcast_mgr + rtconfig = bc_mgr.get_updated_rtconfig(itask) + outputs.remove(RunMode.SKIP.value) + skips = get_skip_mode_outputs(itask, rtconfig) + itask.run_mode = RunMode.SKIP.value outputs = self._standardise_outputs( - itask.point, itask.tdef, outputs - ) + itask.point, itask.tdef, outputs) + outputs = list(set(outputs + skips)) for output in sorted(outputs, key=itask.state.outputs.output_sort_key): if itask.state.outputs.is_message_complete(output): diff --git a/cylc/flow/task_proxy.py b/cylc/flow/task_proxy.py index 27332ac9316..b25fa8403ae 100644 --- a/cylc/flow/task_proxy.py +++ b/cylc/flow/task_proxy.py @@ -40,6 +40,7 @@ from cylc.flow.platforms import get_platform from cylc.flow.task_action_timer import TimerFlags from cylc.flow.task_state import ( + RunMode, TaskState, TASK_STATUS_WAITING, TASK_STATUS_EXPIRED, @@ -190,6 +191,7 @@ class TaskProxy: 'point_as_seconds', 'poll_timer', 'reload_successor', + 'run_mode', 'submit_num', 'tdef', 'state', @@ -297,6 +299,7 @@ def __init__( self.graph_children = generate_graph_children(tdef, self.point) self.mode_settings: Optional['ModeSettings'] = None + self.run_mode: Optional[str] = None if self.tdef.expiration_offset is not None: self.expire_time = ( @@ -547,7 +550,7 @@ def state_reset( return False def satisfy_me( - self, task_messages: 'Iterable[Tokens]' + self, task_messages: 'Iterable[Tokens]', mode=RunMode.LIVE.value ) -> 'Set[Tokens]': """Try to satisfy my prerequisites with given output messages. @@ -557,7 +560,7 @@ def satisfy_me( Return a set of unmatched task messages. """ - used = self.state.satisfy_me(task_messages) + used = self.state.satisfy_me(task_messages, mode) return set(task_messages) - used def clock_expire(self) -> bool: diff --git a/cylc/flow/task_state.py b/cylc/flow/task_state.py index 9ecd9414d17..838414198f6 100644 --- a/cylc/flow/task_state.py +++ b/cylc/flow/task_state.py @@ -17,6 +17,7 @@ """Task state related logic.""" +from enum import Enum from typing import ( TYPE_CHECKING, Dict, @@ -40,6 +41,7 @@ if TYPE_CHECKING: from cylc.flow.cycling import PointBase + from cylc.flow.option_parsers import Values from cylc.flow.id import Tokens from cylc.flow.prerequisite import PrereqMessage from cylc.flow.taskdef import TaskDef @@ -177,6 +179,73 @@ } +class RunMode(Enum): + """The possible run modes of a task/workflow.""" + + LIVE = 'live' + """Task will run normally.""" + + SIMULATION = 'simulation' + """Simulates job submission with configurable exection time + and succeeded/failed outcomes(does not submit real jobs).""" + + DUMMY = 'dummy' + """Submits real jobs with empty scripts.""" + + SKIP = 'skip' + """Skips job submission; sets required outputs (by default) or + configured outputs.""" + + WORKFLOW_MODES = (LIVE, DUMMY, SIMULATION, SKIP) + """Workflow mode not sensible mode for workflow. + + n.b. not using a set to ensure ordering in CLI + """ + + OVERRIDING_MODES = frozenset({LIVE, SKIP}) + """Modes which can be set in task config.""" + + NON_OVERRIDABLE_MODES = frozenset({SIMULATION, DUMMY}) + + JOBLESS_MODES = frozenset({SKIP, SIMULATION}) + """Modes which completely ignore the standard submission path.""" + + def describe(self): + """Return user friendly description of run mode. + + For use by configuration spec documenter. + """ + if self == self.LIVE: + return "Task will run normally." + if self == self.SKIP: + return ( + "Skips job submission; sets required outputs" + " (by default) or configured outputs.") + raise KeyError(f'No description for {self}.') + + @staticmethod + def get(options: 'Values') -> str: + """Return the workflow run mode from the options.""" + return getattr(options, 'run_mode', None) or RunMode.LIVE.value + + @staticmethod + def disable_task_event_handlers(itask): + """Should we disable event handlers for this task? + + No event handlers in simulation mode, or in skip mode + if we don't deliberately enable them: + """ + mode = itask.run_mode + return ( + mode == RunMode.SIMULATION.value + or ( + mode == RunMode.SKIP.value + and itask.platform.get( + 'disable task event handlers', False) + ) + ) + + def status_leq(status_a, status_b): """"Return True if status_a <= status_b""" return (TASK_STATUSES_ORDERED.index(status_a) <= @@ -324,7 +393,8 @@ def __call__( def satisfy_me( self, - outputs: Iterable['Tokens'] + outputs: Iterable['Tokens'], + mode, ) -> Set['Tokens']: """Try to satisfy my prerequisites with given outputs. @@ -333,7 +403,7 @@ def satisfy_me( valid: Set[Tokens] = set() for prereq in (*self.prerequisites, *self.suicide_prerequisites): valid.update( - prereq.satisfy_me(outputs) + prereq.satisfy_me(outputs, mode) ) return valid diff --git a/cylc/flow/unicode_rules.py b/cylc/flow/unicode_rules.py index a6974888248..0dbb5aa22f9 100644 --- a/cylc/flow/unicode_rules.py +++ b/cylc/flow/unicode_rules.py @@ -23,7 +23,7 @@ _TASK_NAME_PREFIX, ) from cylc.flow.task_qualifiers import TASK_QUALIFIERS -from cylc.flow.task_state import TASK_STATUSES_ORDERED +from cylc.flow.task_state import TASK_STATUSES_ORDERED, RunMode ENGLISH_REGEX_MAP = { r'\w': 'alphanumeric', @@ -351,6 +351,8 @@ class TaskOutputValidator(UnicodeRuleChecker): not_starts_with('_cylc'), # blacklist keywords not_equals('required', 'optional', 'all', 'and', 'or'), + # blacklist Run Modes: + not_equals(RunMode.SKIP.value), # blacklist built-in task qualifiers and statuses (e.g. "waiting") not_equals(*sorted({*TASK_QUALIFIERS, *TASK_STATUSES_ORDERED})), ] diff --git a/cylc/flow/workflow_status.py b/cylc/flow/workflow_status.py index d6d6fb587dc..72761c08c87 100644 --- a/cylc/flow/workflow_status.py +++ b/cylc/flow/workflow_status.py @@ -23,8 +23,6 @@ from cylc.flow.wallclock import get_time_string_from_unix_time as time2str if TYPE_CHECKING: - from optparse import Values - from cylc.flow.cycling import PointBase from cylc.flow.scheduler import Scheduler from cylc.flow.task_pool import TaskPool @@ -202,21 +200,3 @@ def _get_earliest_stop_point_status_msg(pool: 'TaskPool') -> Optional[str]: if prop is None: return None return template % prop - - -class RunMode: - """The possible run modes of a workflow.""" - - LIVE = 'live' - """Workflow will run normally.""" - - SIMULATION = 'simulation' - """Workflow will run in simulation mode.""" - - DUMMY = 'dummy' - """Workflow will run in dummy mode.""" - - @staticmethod - def get(options: 'Values') -> str: - """Return the run mode from the options.""" - return getattr(options, 'run_mode', None) or RunMode.LIVE diff --git a/tests/functional/cylc-config/00-simple/section2.stdout b/tests/functional/cylc-config/00-simple/section2.stdout index 3d83ac15278..049db739435 100644 --- a/tests/functional/cylc-config/00-simple/section2.stdout +++ b/tests/functional/cylc-config/00-simple/section2.stdout @@ -15,10 +15,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -90,10 +94,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -165,10 +173,14 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -240,12 +252,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -316,12 +332,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -392,12 +412,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -468,12 +492,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -544,12 +572,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -620,12 +652,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -696,12 +732,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -772,12 +812,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = serial [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -848,12 +892,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = @@ -924,12 +972,16 @@ execution time limit = submission polling intervals = submission retry delays = + run mode = [[[directives]]] job_type = parallel [[[meta]]] title = description = URL = + [[[skip]]] + outputs = + disable task event handlers = True [[[simulation]]] default run length = PT10S speedup factor = diff --git a/tests/functional/cylc-set/09-set-skip.t b/tests/functional/cylc-set/09-set-skip.t new file mode 100644 index 00000000000..dd314283700 --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip.t @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +#------------------------------------------------------------------------------- +# +# Skip Mode proposal example: +# https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md +# The cylc set --out option should accept the skip value +# which should set the outputs defined in +# [runtime][][skip]outputs. + +. "$(dirname "$0")/test_header" +set_test_number 2 +reftest +exit diff --git a/tests/functional/cylc-set/09-set-skip/flow.cylc b/tests/functional/cylc-set/09-set-skip/flow.cylc new file mode 100644 index 00000000000..ef74c362773 --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip/flow.cylc @@ -0,0 +1,50 @@ +[meta] + test_description = """ + Test that cylc set --out skip satisfies + all outputs which are required by the graph. + """ + proposal url = https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + +[scheduler] + allow implicit tasks = true + [[events]] + expected task failures = 1/bar + +[scheduling] + [[graph]] + R1 = """ + # Optional out not created by set --out skip + foo:no? => not_this_task? + + # set --out skip creates required, started, submitted + # and succeeded (unless failed is set): + foo:yes => require_this_task + foo:submitted => submitted_emitted + foo:succeeded => succeeded_emitted + foo:started => skip_foo + + # set --out skip creates failed if that is required + # by skip mode settings: + bar:started => skip_bar + bar:failed? => bar_failed + """ + +[runtime] + [[foo]] + script = sleep 100 + [[[skip]]] + outputs = yes + [[[outputs]]] + no = 'Don\'t require this task' + yes = 'Require this task' + + [[bar]] + script = sleep 100 + [[[skip]]] + outputs = failed + + [[skip_foo]] + script = cylc set ${CYLC_WORKFLOW_ID}//1/foo --out skip + + [[skip_bar]] + script = cylc set ${CYLC_WORKFLOW_ID}//1/bar --out skip diff --git a/tests/functional/cylc-set/09-set-skip/reference.log b/tests/functional/cylc-set/09-set-skip/reference.log new file mode 100644 index 00000000000..6e7b636f540 --- /dev/null +++ b/tests/functional/cylc-set/09-set-skip/reference.log @@ -0,0 +1,8 @@ +1/bar -triggered off [] in flow 1 +1/foo -triggered off [] in flow 1 +1/submitted_emitted -triggered off ['1/foo'] in flow 1 +1/skip_bar -triggered off ['1/bar'] in flow 1 +1/skip_foo -triggered off ['1/foo'] in flow 1 +1/succeeded_emitted -triggered off ['1/foo'] in flow 1 +1/bar_failed -triggered off ['1/bar'] in flow 1 +1/require_this_task -triggered off ['1/foo'] in flow 1 diff --git a/tests/functional/modes/01-dummy.t b/tests/functional/run_modes/01-dummy.t similarity index 100% rename from tests/functional/modes/01-dummy.t rename to tests/functional/run_modes/01-dummy.t diff --git a/tests/functional/modes/01-dummy/flow.cylc b/tests/functional/run_modes/01-dummy/flow.cylc similarity index 100% rename from tests/functional/modes/01-dummy/flow.cylc rename to tests/functional/run_modes/01-dummy/flow.cylc diff --git a/tests/functional/modes/01-dummy/reference.log b/tests/functional/run_modes/01-dummy/reference.log similarity index 100% rename from tests/functional/modes/01-dummy/reference.log rename to tests/functional/run_modes/01-dummy/reference.log diff --git a/tests/functional/modes/02-dummy-message-outputs.t b/tests/functional/run_modes/02-dummy-message-outputs.t similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs.t rename to tests/functional/run_modes/02-dummy-message-outputs.t diff --git a/tests/functional/modes/02-dummy-message-outputs/flow.cylc b/tests/functional/run_modes/02-dummy-message-outputs/flow.cylc similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs/flow.cylc rename to tests/functional/run_modes/02-dummy-message-outputs/flow.cylc diff --git a/tests/functional/modes/02-dummy-message-outputs/reference.log b/tests/functional/run_modes/02-dummy-message-outputs/reference.log similarity index 100% rename from tests/functional/modes/02-dummy-message-outputs/reference.log rename to tests/functional/run_modes/02-dummy-message-outputs/reference.log diff --git a/tests/functional/modes/03-simulation.t b/tests/functional/run_modes/03-simulation.t similarity index 100% rename from tests/functional/modes/03-simulation.t rename to tests/functional/run_modes/03-simulation.t diff --git a/tests/functional/modes/03-simulation/flow.cylc b/tests/functional/run_modes/03-simulation/flow.cylc similarity index 100% rename from tests/functional/modes/03-simulation/flow.cylc rename to tests/functional/run_modes/03-simulation/flow.cylc diff --git a/tests/functional/modes/03-simulation/reference.log b/tests/functional/run_modes/03-simulation/reference.log similarity index 100% rename from tests/functional/modes/03-simulation/reference.log rename to tests/functional/run_modes/03-simulation/reference.log diff --git a/tests/functional/modes/04-simulation-runtime.t b/tests/functional/run_modes/04-simulation-runtime.t similarity index 100% rename from tests/functional/modes/04-simulation-runtime.t rename to tests/functional/run_modes/04-simulation-runtime.t diff --git a/tests/functional/modes/04-simulation-runtime/flow.cylc b/tests/functional/run_modes/04-simulation-runtime/flow.cylc similarity index 100% rename from tests/functional/modes/04-simulation-runtime/flow.cylc rename to tests/functional/run_modes/04-simulation-runtime/flow.cylc diff --git a/tests/functional/modes/04-simulation-runtime/reference.log b/tests/functional/run_modes/04-simulation-runtime/reference.log similarity index 100% rename from tests/functional/modes/04-simulation-runtime/reference.log rename to tests/functional/run_modes/04-simulation-runtime/reference.log diff --git a/tests/functional/modes/05-sim-trigger.t b/tests/functional/run_modes/05-sim-trigger.t similarity index 100% rename from tests/functional/modes/05-sim-trigger.t rename to tests/functional/run_modes/05-sim-trigger.t diff --git a/tests/functional/modes/05-sim-trigger/flow.cylc b/tests/functional/run_modes/05-sim-trigger/flow.cylc similarity index 100% rename from tests/functional/modes/05-sim-trigger/flow.cylc rename to tests/functional/run_modes/05-sim-trigger/flow.cylc diff --git a/tests/functional/modes/05-sim-trigger/reference.log b/tests/functional/run_modes/05-sim-trigger/reference.log similarity index 100% rename from tests/functional/modes/05-sim-trigger/reference.log rename to tests/functional/run_modes/05-sim-trigger/reference.log diff --git a/tests/functional/run_modes/06-run-mode-overrides.t b/tests/functional/run_modes/06-run-mode-overrides.t new file mode 100644 index 00000000000..f6d4faafb30 --- /dev/null +++ b/tests/functional/run_modes/06-run-mode-overrides.t @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Testing Skip mode functionality. + +. "$(dirname "$0")/test_header" +set_test_number 11 + +# Install and run the workflow in live mode (default). +# Check that tasks with run mode unset and run mode = live +# leave log files, and that skip mode tasks don't. +TEST_NAME="${TEST_NAME_BASE}:live-workflow" +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" +run_ok "${TEST_NAME}:validate" cylc validate "${WORKFLOW_NAME}" +workflow_run_ok "${TEST_NAME}:play" \ + cylc play "${WORKFLOW_NAME}" \ + --no-detach + +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1000" +run_fail "${TEST_NAME}:config run mode=skip" ls "${JOB_LOGS}/skip_" +for MODE in default live; do + named_grep_ok "${TEST_NAME}:config run mode=${MODE}" "===.*===" "${JOB_LOGS}/${MODE}_/NN/job.out" +done + +# After broadcasting a change in run_mode to task default_ it now runs +# in skip mode and fails to produce a log file: +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1001" +run_fail "${TEST_NAME}:broadcast run mode=skip" ls "${JOB_LOGS}/default_/" + +purge + +# Install and run the workflow in skip mode. +# Check that tasks with run mode unset and run mode = skip +# don't leave log files, and that skip mode tasks does. +TEST_NAME="${TEST_NAME_BASE}:skip-workflow" +install_workflow "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" +workflow_run_ok "${TEST_NAME}:run" \ + cylc play "${WORKFLOW_NAME}" \ + --no-detach \ + --mode skip \ + --set='changemode="live"' \ + --final-cycle-point=1000 + +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1000" +run_ok "${TEST_NAME}:run mode=live" ls "${JOB_LOGS}/live_" +run_fail "${TEST_NAME}:run mode=default" ls "${JOB_LOGS}/default_" +run_fail "${TEST_NAME}:run mode=skip" ls "${JOB_LOGS}/skip_" +JOB_LOGS="${WORKFLOW_RUN_DIR}/log/job/1000" +named_grep_ok "${TEST_NAME}:run mode=live" "===.*===" "${JOB_LOGS}/live_/NN/job.out" + +purge +exit 0 diff --git a/tests/functional/run_modes/06-run-mode-overrides/flow.cylc b/tests/functional/run_modes/06-run-mode-overrides/flow.cylc new file mode 100644 index 00000000000..6d1b1258833 --- /dev/null +++ b/tests/functional/run_modes/06-run-mode-overrides/flow.cylc @@ -0,0 +1,28 @@ +#!Jinja2 +[scheduler] + cycle point format = %Y + +[scheduling] + initial cycle point = 1000 + final cycle point = 1001 + [[graph]] + R1/1000 = default_ & live_ & skip_ => end + R1/1001 = end[-P1Y] => broadcaster => default_ + +[runtime] + [[root]] + script = echo "=== this task ran in live mode ===" + [[[simulation]]] + default run length = PT0S + [[default_, end]] + [[live_]] + run mode = live + [[skip_]] + run mode = skip + [[broadcaster]] + script = """ + cylc broadcast "${CYLC_WORKFLOW_ID}" \ + --name default_ \ + --point 1001 \ + --set='run mode="{{changemode | default("skip")}}"' + """ diff --git a/tests/functional/modes/test_header b/tests/functional/run_modes/test_header similarity index 100% rename from tests/functional/modes/test_header rename to tests/functional/run_modes/test_header diff --git a/tests/integration/run_modes/test_mode_overrides.py b/tests/integration/run_modes/test_mode_overrides.py new file mode 100644 index 00000000000..f9ab318e0e6 --- /dev/null +++ b/tests/integration/run_modes/test_mode_overrides.py @@ -0,0 +1,152 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Test that using [runtime][TASK]run mode works in each mode. + +Point 3 of the Skip Mode proposal +https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + +| The run mode should be controlled by a new task configuration +| [runtime][]run mode with the default being live. +| As a runtime configuration, this can be defined in the workflow +| for development / testing purposes or set by cylc broadcast. + +n.b: This is pretty much a functional test and +probably ought to be labelled as such, but uses the +integration test framework. +""" + +import pytest + + +@pytest.mark.parametrize( + 'workflow_run_mode', [('live'), ('skip')]) +async def test_run_mode_override_from_config( + workflow_run_mode, flow, scheduler, run, complete, log_filter +): + """Test that ``[runtime][TASK]run mode`` overrides workflow modes. + """ + cfg = { + "scheduler": {"cycle point format": "%Y"}, + "scheduling": { + "initial cycle point": "1000", + "final cycle point": "1000", + "graph": {"P1Y": "live_\nskip_\ndefault_"}}, + "runtime": { + "skip_": {"run mode": "skip"}, + "live_": {"run mode": "live"} + } + } + id_ = flow(cfg) + schd = scheduler(id_, run_mode=workflow_run_mode, paused_start=False) + expect_template = ( + '[1000/{}_/01:preparing] submitted to localhost:background') + + async with run(schd) as log: + await complete(schd) + + # Live task has been really submitted: + assert log_filter(log, contains=expect_template.format('live')) + + # Default is the same as workflow: + if workflow_run_mode == 'live': + assert log_filter(log, contains=expect_template.format('default')) + else: + assert log_filter( + log, contains='[1000/default_/01:running] => succeeded') + assert not log_filter( + log, contains=expect_template.format('default')) + + # Skip task has run, but not actually been submitted: + assert log_filter(log, contains='[1000/skip_/01:running] => succeeded') + assert not log_filter(log, contains=expect_template.format('skip')) + + +async def test_force_trigger_does_not_override_run_mode( + flow, + scheduler, + start, +): + """Force-triggering a task will not override the run mode. + + Tasks with run mode = skip will continue to abide by + the is_held flag as normal. + + Taken from spec at + https://github.com/cylc/cylc-admin/blob/master/ + docs/proposal-skip-mode.md#proposal + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'foo'}}, + 'runtime': {'foo': {'run mode': 'skip'}} + }) + schd = scheduler(wid) + async with start(schd): + # Check that task isn't held at first + foo = schd.pool.get_tasks()[0] + assert foo.state.is_held is False + + # Hold task, check that it's held: + schd.pool.hold_tasks('1/foo') + assert foo.state.is_held is True + + # Trigger task, check that it's _still_ held: + schd.pool.force_trigger_tasks('1/foo', [1]) + assert foo.state.is_held is True + + # run_mode will always be simulation from test + # workflow before submit routine... + assert not foo.run_mode + + # ... but job submission will always change this to the correct mode: + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo], + schd.server.curve_auth, + schd.server.client_pub_key_dir) + assert foo.run_mode == 'skip' + + +async def test_run_mode_override_from_broadcast( + flow, scheduler, run, complete, log_filter +): + """Test that run_mode modifications only apply to one task. + """ + cfg = { + "scheduler": {"cycle point format": "%Y"}, + "scheduling": { + "initial cycle point": "1000", + "final cycle point": "1001", + "graph": {"P1Y": "foo"}}, + "runtime": { + } + } + id_ = flow(cfg) + schd = scheduler(id_, run_mode='live', paused_start=False) + + async with run(schd): + schd.broadcast_mgr.put_broadcast( + ['1000'], ['foo'], [{'run mode': 'skip'}]) + + foo_1000, foo_1001 = schd.pool.get_tasks() + + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo_1000, foo_1001], + schd.server.curve_auth, + schd.server.client_pub_key_dir) + + assert foo_1000.run_mode == 'skip' + assert foo_1001.run_mode == 'live' diff --git a/tests/integration/run_modes/test_nonlive.py b/tests/integration/run_modes/test_nonlive.py new file mode 100644 index 00000000000..42ddca128ce --- /dev/null +++ b/tests/integration/run_modes/test_nonlive.py @@ -0,0 +1,120 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from typing import Any, Dict + +# Define here to ensure test doesn't just mirror code: +KGO = { + 'live': { + 'flow_nums': '[1]', + 'is_manual_submit': 0, + 'try_num': 1, + 'submit_status': None, + 'run_signal': None, + 'run_status': None, + 'platform_name': 'localhost', + 'job_runner_name': 'background', + 'job_id': None}, + 'skip': { + 'flow_nums': '[1]', + 'is_manual_submit': 0, + 'try_num': 1, + 'submit_status': 0, + 'run_signal': None, + 'run_status': 0, + 'platform_name': 'skip', + 'job_runner_name': 'simulation', + 'job_id': None}, +} + + +def not_time(data: Dict[str, Any]): + """Filter out fields containing times to reduce risk of + flakiness""" + return {k: v for k, v in data.items() if 'time' not in k} + + +async def test_task_jobs(flow, scheduler, start): + """Ensure that task job data is added to the database correctly + for each run mode. + """ + schd = scheduler(flow({ + 'scheduling': {'graph': { + 'R1': '&'.join(KGO)}}, + 'runtime': { + mode: {'run mode': mode} for mode in KGO} + })) + async with start(schd): + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + schd.workflow_db_mgr.process_queued_ops() + + for mode, kgo in KGO.items(): + taskdata = not_time( + schd.workflow_db_mgr.pub_dao.select_task_job(1, mode)) + assert taskdata == kgo, ( + f'Mode {mode}: incorrect db entries.') + + schd.pool.set_prereqs_and_outputs('*', ['failed'], [], []) + + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + schd.workflow_db_mgr.process_queued_ops() + + for mode, kgo in KGO.items(): + taskdata = not_time( + schd.workflow_db_mgr.pub_dao.select_task_job(1, mode)) + assert taskdata == kgo, ( + f'Mode {mode}: incorrect db entries.') + + +async def test_mean_task_time(flow, scheduler, run, complete): + """Non-live tasks are not added to the list of task times, + so skipping tasks will not affect how long Cylc expects tasks to run. + """ + schd = scheduler(flow({ + 'scheduling': { + 'initial cycle point': '1000', + 'final cycle point': '1002', + 'graph': {'P1Y': 'foo'}} + }), run_mode='live') + + async with run(schd): + tasks = schd.pool.get_tasks() + tdef = tasks[0].tdef + assert list(tdef.elapsed_times) == [] + + # Make the task run in skip mode at one cycle: + schd.broadcast_mgr.put_broadcast( + ['1000'], ['foo'], [{'run mode': 'skip'}]) + + # Submit two tasks: + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + tasks[:2], + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + await complete(schd, '10010101T0000Z/foo') + assert len(tdef.elapsed_times) == 1 diff --git a/tests/integration/run_modes/test_simulation.py b/tests/integration/run_modes/test_simulation.py index 4d1cd0b7ed9..4c48a572b15 100644 --- a/tests/integration/run_modes/test_simulation.py +++ b/tests/integration/run_modes/test_simulation.py @@ -1,6 +1,6 @@ # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. # Copyright (C) NIWA & British Crown (Met Office) & Contributors. - +# # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or @@ -16,6 +16,14 @@ """Test the workings of simulation mode""" +from pathlib import Path +import pytest +from pytest import param + +from cylc.flow import commands +from cylc.flow.cycling.iso8601 import ISO8601Point +from cylc.flow.run_modes.simulation import sim_time_check + async def test_started_trigger(flow, reftest, scheduler): """Does the started task output trigger downstream tasks @@ -32,3 +40,430 @@ async def test_started_trigger(flow, reftest, scheduler): ('1/a', None), ('1/b', ('1/a',)) } + + +@pytest.fixture +def monkeytime(monkeypatch): + """Convenience function monkeypatching time.""" + def _inner(time_: int): + monkeypatch.setattr('cylc.flow.task_job_mgr.time', lambda: time_) + monkeypatch.setattr( + 'cylc.flow.run_modes.simulation.time', lambda: time_) + return _inner + + +@pytest.fixture +def run_simjob(monkeytime): + """Run a simulated job to completion. + + Returns the output status. + """ + def _run_simjob(schd, point, task): + itask = schd.pool.get_task(point, task) + itask.state.is_queued = False + monkeytime(0) + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + monkeytime(itask.mode_settings.timeout + 1) + + # Run Time Check + assert sim_time_check( + schd.task_events_mgr, [itask], + schd.workflow_db_mgr + ) is True + + # Capture result process queue. + return itask + return _run_simjob + + +@pytest.fixture(scope='module') +async def sim_time_check_setup( + mod_flow, mod_scheduler, mod_start, mod_one_conf, +): + schd = mod_scheduler(mod_flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': { + 'R1': 'one & fail_all & fast_forward', + 'P1Y': 'fail_once & fail_all_submits' + } + }, + 'runtime': { + 'one': {}, + 'fail_all': { + 'simulation': { + 'fail cycle points': 'all', + 'fail try 1 only': False + }, + 'outputs': {'foo': 'bar'} + }, + # This task ought not be finished quickly, but for the speed up + 'fast_forward': { + 'execution time limit': 'PT1M', + 'simulation': {'speedup factor': 2} + }, + 'fail_once': { + 'simulation': { + 'fail cycle points': '1066, 1068', + } + }, + 'fail_all_submits': { + 'simulation': { + 'fail cycle points': '1066', + 'fail try 1 only': False, + } + } + } + })) + async with mod_start(schd): + itasks = schd.pool.get_tasks() + [schd.task_job_mgr._set_retry_timers(i) for i in itasks] + yield schd, itasks + + +def test_false_if_not_running( + sim_time_check_setup, monkeypatch +): + schd, itasks = sim_time_check_setup + + itasks = [i for i in itasks if i.state.status != 'running'] + + # False if task status not running: + assert sim_time_check(schd.task_events_mgr, itasks, '') is False + + +@pytest.mark.parametrize( + 'itask, point, results', + ( + # Task fails this CP, first submit. + param( + 'fail_once', '1066', (True, False, False), + id='only-fail-on-submit-1'), + # Task succeeds this CP, all submits. + param( + 'fail_once', '1067', (False, False, False), + id='do-not-fail-this-cp'), + # Task fails this CP, first submit. + param( + 'fail_once', '1068', (True, False, False), + id='and-another-cp'), + # Task fails this CP, all submits. + param( + 'fail_all_submits', '1066', (True, True, True), + id='fail-all-submits'), + # Task succeeds this CP, all submits. + param( + 'fail_all_submits', '1067', (False, False, False), + id='fail-no-submits'), + ) +) +def test_fail_once(sim_time_check_setup, itask, point, results, monkeypatch): + """A task with a fail cycle point only fails + at that cycle point, and then only on the first submission. + """ + schd, _ = sim_time_check_setup + + itask = schd.pool.get_task( + ISO8601Point(point), itask) + + for i, result in enumerate(results): + itask.try_timers['execution-retry'].num = i + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert itask.mode_settings.sim_task_fails is result + + +def test_task_finishes(sim_time_check_setup, monkeytime, caplog): + """...and an appropriate message sent. + + Checks that failed and bar are output if a task is set to fail. + + Does NOT check every possible cause of an outcome - this is done + in unit tests. + """ + schd, _ = sim_time_check_setup + monkeytime(0) + + # Setup a task to fail, submit it. + fail_all_1066 = schd.pool.get_task(ISO8601Point('1066'), 'fail_all') + fail_all_1066.state.status = 'running' + fail_all_1066.state.is_queued = False + schd.task_job_mgr._nonlive_submit_task_jobs( + [fail_all_1066], schd.workflow, 'simulation') + + # For the purpose of the test delete the started time set by + # _nonlive_submit_task_jobs. + fail_all_1066.summary['started_time'] = 0 + + # Before simulation time is up: + assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is False + + # Time's up... + monkeytime(12) + + # After simulation time is up it Fails and records custom outputs: + assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is True + outputs = fail_all_1066.state.outputs + assert outputs.is_message_complete('succeeded') is False + assert outputs.is_message_complete('bar') is True + assert outputs.is_message_complete('failed') is True + + +def test_task_sped_up(sim_time_check_setup, monkeytime): + """Task will speed up by a factor set in config.""" + + schd, _ = sim_time_check_setup + fast_forward_1066 = schd.pool.get_task( + ISO8601Point('1066'), 'fast_forward') + + # Run the job submission method: + monkeytime(0) + schd.task_job_mgr._nonlive_submit_task_jobs( + [fast_forward_1066], schd.workflow, 'simulation') + fast_forward_1066.state.is_queued = False + + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is False + monkeytime(29) + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is False + monkeytime(31) + result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') + assert result is True + + +async def test_settings_restart( + monkeytime, flow, scheduler, start +): + """Check that simulation mode settings are correctly restored + upon restart. + + In the case of start time this is collected from the database + from task_jobs.start_time. + + tasks: + one: Runs straighforwardly. + two: Test case where database is missing started_time + because it was upgraded from an earlier version of Cylc. + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': { + 'R1': 'one & two' + } + }, + 'runtime': { + 'root': { + 'execution time limit': 'PT1M', + 'execution retry delays': 'P0Y', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': 'all', + 'fail try 1 only': True, + } + }, + } + }) + schd = scheduler(id_) + + # Start the workflow: + async with start(schd): + og_timeouts = {} + for itask in schd.pool.get_tasks(): + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + + og_timeouts[itask.identity] = itask.mode_settings.timeout + + # Mock wallclock < sim end timeout + monkeytime(itask.mode_settings.timeout - 1) + assert sim_time_check( + schd.task_events_mgr, [itask], schd.workflow_db_mgr + ) is False + + # Stop and restart the scheduler: + schd = scheduler(id_) + async with start(schd): + # Get our tasks and fix wallclock: + itasks = schd.pool.get_tasks() + for itask in itasks: + + # Check that we haven't got started time & mode settings back: + assert itask.summary['started_time'] is None + assert itask.mode_settings is None + + if itask.identity == '1066/two': + # Delete the database entry for `two`: Ensure that + # we don't break sim mode on upgrade to this version of Cylc. + schd.workflow_db_mgr.pri_dao.connect().execute( + 'UPDATE task_jobs' + '\n SET time_submit = NULL' + '\n WHERE (name == \'two\')' + ) + schd.workflow_db_mgr.process_queued_ops() + monkeytime(42) + expected_timeout = 102.0 + else: + monkeytime(og_timeouts[itask.identity] - 1) + expected_timeout = float(int(og_timeouts[itask.identity])) + + assert sim_time_check( + schd.task_events_mgr, [itask], schd.workflow_db_mgr + ) is False + + # Check that the itask.mode_settings is now re-created + assert itask.mode_settings.__dict__ == { + 'simulated_run_length': 60.0, + 'sim_task_fails': True, + 'timeout': expected_timeout + } + + +async def test_settings_reload( + flow, scheduler, start, run_simjob +): + """Check that simulation mode settings are changed for future + pseudo jobs on reload. + + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': {'R1': 'one'} + }, + 'runtime': { + 'one': { + 'execution time limit': 'PT1M', + 'execution retry delays': 'P0Y', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': 'all', + 'fail try 1 only': False, + } + }, + } + }) + schd = scheduler(id_) + async with start(schd): + # Submit first psuedo-job and "run" to failure: + one_1066 = schd.pool.get_tasks()[0] + + itask = run_simjob(schd, one_1066.point, 'one') + assert itask.state.outputs.is_message_complete('failed') is False + + # Modify config as if reinstall had taken place: + conf_file = Path(schd.workflow_run_dir) / 'flow.cylc' + conf_file.write_text( + conf_file.read_text().replace('False', 'True')) + + # Reload Workflow: + await commands.run_cmd(commands.reload_workflow, schd) + + # Submit second psuedo-job and "run" to success: + itask = run_simjob(schd, one_1066.point, 'one') + assert itask.state.outputs.is_message_complete('succeeded') is True + + +async def test_settings_broadcast( + flow, scheduler, start, monkeytime +): + """Assert that broadcasting a change in the settings for a task + affects subsequent psuedo-submissions. + """ + id_ = flow({ + 'scheduler': {'cycle point format': '%Y'}, + 'scheduling': { + 'initial cycle point': '1066', + 'graph': {'R1': 'one'} + }, + 'runtime': { + 'one': { + 'execution time limit': 'PT1S', + 'execution retry delays': '2*PT5S', + 'simulation': { + 'speedup factor': 1, + 'fail cycle points': '1066', + 'fail try 1 only': False + } + }, + } + }, defaults=False) + schd = scheduler(id_, paused_start=False, run_mode='simulation') + async with start(schd) as log: + itask = schd.pool.get_tasks()[0] + itask.state.is_queued = False + + # Submit the first - the sim task will fail: + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert itask.mode_settings.sim_task_fails is True + + # Let task finish. + monkeytime(itask.mode_settings.timeout + 1) + assert sim_time_check( + schd.task_events_mgr, [itask], + schd.workflow_db_mgr + ) is True + + # The mode_settings object has been cleared: + assert itask.mode_settings is None + # Change a setting using broadcast: + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': ''} + }]) + # Submit again - result is different: + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert itask.mode_settings.sim_task_fails is False + + # Assert Clearing the broadcast works + schd.broadcast_mgr.clear_broadcast() + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert itask.mode_settings.sim_task_fails is True + + # Assert that list of broadcasts doesn't change if we submit + # Invalid fail cycle points to broadcast. + itask.mode_settings = None + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': 'higadfuhasgiurguj'} + }]) + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert ( + 'Invalid ISO 8601 date representation: higadfuhasgiurguj' + in log.messages[-1]) + + # Check that the invalid broadcast hasn't + # changed the itask sim mode settings: + assert itask.mode_settings.sim_task_fails is True + + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': '1'} + }]) + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert ( + 'Invalid ISO 8601 date representation: 1' + in log.messages[-1]) + + # Broadcast tasks will reparse correctly: + schd.broadcast_mgr.put_broadcast( + ['1066'], ['one'], [{ + 'simulation': {'fail cycle points': '1945, 1977, 1066'}, + 'execution retry delays': '3*PT2S' + }]) + schd.task_job_mgr._nonlive_submit_task_jobs( + [itask], schd.workflow, 'simulation') + assert itask.mode_settings.sim_task_fails is True + assert itask.try_timers['execution-retry'].delays == [2.0, 2.0, 2.0] + # n.b. rtconfig should remain unchanged, lest we cancel broadcasts: + assert itask.tdef.rtconfig['execution retry delays'] == [5.0, 5.0] diff --git a/tests/integration/run_modes/test_skip.py b/tests/integration/run_modes/test_skip.py new file mode 100644 index 00000000000..bc9f29116f2 --- /dev/null +++ b/tests/integration/run_modes/test_skip.py @@ -0,0 +1,249 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Test for skip mode integration. +""" + + +async def test_settings_override_from_broadcast( + flow, scheduler, start, complete, log_filter +): + """Test that skip mode runs differently if settings are modified. + """ + cfg = { + "scheduling": {"graph": {"R1": "foo:failed => bar"}}, + "runtime": { + "foo": { + "events": { + 'handler events': 'failed', + "handlers": 'echo "HELLO"' + } + } + } + } + id_ = flow(cfg) + schd = scheduler(id_, run_mode='live') + + async with start(schd): + schd.broadcast_mgr.put_broadcast( + ['1'], + ['foo'], + [ + {'run mode': 'skip'}, + {'skip': {'outputs': 'failed'}}, + {'skip': {'disable task event handlers': "False"}} + ] + ) + + foo, = schd.pool.get_tasks() + + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + schd.pool.get_tasks(), + schd.server.curve_auth, + schd.server.client_pub_key_dir + ) + # Run mode has changed: + assert foo.platform['name'] == 'skip' + # Output failed emitted: + assert foo.state.status == 'failed' + # After processing events there is a handler in the subprocpool: + schd.task_events_mgr.process_events(schd) + assert 'echo "HELLO"' in schd.proc_pool.is_not_done()[0][0].cmd + + +async def test_broadcast_changes_set_skip_outputs( + flow, scheduler, start, complete, log_filter +): + """When cylc set --out skip is used, task outputs are updated with + broadcasts. + + Skip mode proposal point 4 + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | The cylc set --out option should accept the skip value which should + | set the outputs defined in [runtime][][skip]outputs. + | The skip keyword should not be allowed in custom outputs. + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'foo:expect_this'}}, + 'runtime': {'foo': {'outputs': {'expect_this': 'some message'}}} + }) + schd = scheduler(wid, run_mode='live') + async with start(schd): + schd.broadcast_mgr.put_broadcast( + ['1'], + ['foo'], + [{'skip': {'outputs': 'expect_this'}}], + ) + foo, = schd.pool.get_tasks() + schd.pool.set_prereqs_and_outputs( + '1/foo', ['skip'], [], ['all']) + + foo_outputs = foo.state.outputs.get_completed_outputs() + + assert 'expect_this' in foo_outputs + assert foo_outputs['expect_this'] == '(manually completed)' + + +async def test_skip_mode_outputs( + flow, scheduler, reftest, +): + """Nearly a functional test of the output emission of skip mode tasks + + Skip mode proposal point 2 + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + """ + graph = """ + # By default, all required outputs will be generated + # plus succeeded if success is optional: + foo? & foo:required_out => success_if_optional & required_outs + + # The outputs submitted and started are always produced + # and do not need to be defined in outputs: + foo:submitted => submitted_always + foo:started => started_always + + # If outputs is specified and does not include either + # succeeded or failed then succeeded will be produced. + opt:optional_out? => optional_outs_produced + + should_fail:fail => did_fail + """ + wid = flow({ + 'scheduling': {'graph': {'R1': graph}}, + 'runtime': { + 'root': { + 'run mode': 'skip', + 'outputs': { + 'required_out': 'the plans have been on display...', + 'optional_out': 'its only four light years away...' + } + }, + 'opt': { + 'skip': { + 'outputs': 'optional_out' + } + }, + 'should_fail': { + 'skip': { + 'outputs': 'failed' + } + } + } + }) + schd = scheduler(wid, run_mode='live', paused_start=False) + assert await reftest(schd) == { + ('1/did_fail', ('1/should_fail',),), + ('1/foo', None,), + ('1/opt', None,), + ('1/optional_outs_produced', ('1/opt',),), + ('1/required_outs', ('1/foo', '1/foo',),), + ('1/should_fail', None,), + ('1/started_always', ('1/foo',),), + ('1/submitted_always', ('1/foo',),), + ('1/success_if_optional', ('1/foo', '1/foo',),), + } + + +async def test_doesnt_release_held_tasks( + one_conf, flow, scheduler, start, log_filter +): + """Point 5 of the proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | Tasks with run mode = skip will continue to abide by the is_held + | flag as normal. + + """ + schd = scheduler(flow(one_conf), run_mode='skip') + async with start(schd) as log: + itask = schd.pool.get_tasks()[0] + msg = 'held tasks shoudn\'t {}' + + # Set task to held and check submission in skip mode doesn't happen: + itask.state.is_held = True + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [itask], + schd.server.curve_auth, + schd.server.client_pub_key_dir, + run_mode=schd.get_run_mode() + ) + assert not log_filter(log, contains='=> running'), msg.format('run') + assert not log_filter(log, contains='=> succeeded'), msg.format( + 'succeed') + + # Release held task and assert that it now skips successfully: + schd.pool.release_held_tasks(['1/one']) + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [itask], + schd.server.curve_auth, + schd.server.client_pub_key_dir, + run_mode=schd.get_run_mode() + ) + assert log_filter(log, contains='=> running'), msg.format('run') + assert log_filter(log, contains='=> succeeded'), msg.format('succeed') + + +async def test_force_trigger_doesnt_change_mode( + flow, scheduler, run, complete +): + """Point 6 from the skip mode proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | Force-triggering a task will not override the run mode. + """ + wid = flow({ + 'scheduling': {'graph': {'R1': 'slow => skip'}}, + 'runtime': { + 'slow': {'script': 'sleep 6'}, + 'skip': {'script': 'exit 1', 'run mode': 'skip'} + } + }) + schd = scheduler(wid, run_mode='live', paused_start=False) + async with run(schd): + schd.pool.force_trigger_tasks(['1/skip'], [1]) + # This will timeout if the skip task has become live on triggering: + await complete(schd, '1/skip', timeout=6) + + +async def test_prereqs_marked_satisfied_by_skip_mode( + flow, scheduler, start, log_filter, complete +): + """Point 8 from the skip mode proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | When tasks are run in skip mode, the prerequisites which correspond + | to the outputs they generate should be marked as "satisfied by skip mode" + | rather than "satisfied naturally" for provenance reasons. + """ + schd = scheduler(flow({ + 'scheduling': {'graph': {'R1': 'foo => bar'}} + }), run_mode='skip') + + async with start(schd) as log: + foo, = schd.pool.get_tasks() + schd.task_job_mgr.submit_task_jobs( + schd.workflow, + [foo], + schd.server.curve_auth, + schd.server.client_pub_key_dir, + run_mode=schd.get_run_mode() + ) + bar, = schd.pool.get_tasks() + satisfied_message, = bar.state.prerequisites[0]._satisfied.values() + assert satisfied_message == 'satisfied by skip mode' diff --git a/tests/integration/scripts/test_validate_integration.py b/tests/integration/scripts/test_validate_integration.py index dcf697aac36..7f9073f66f4 100644 --- a/tests/integration/scripts/test_validate_integration.py +++ b/tests/integration/scripts/test_validate_integration.py @@ -158,7 +158,7 @@ def test_pre_cylc8(flow, validate, caplog): assert warning in caplog.messages -def test_graph_upgrade_msg_default(flow, validate, caplog): +def test_graph_upgrade_msg_default(flow, validate, caplog, log_filter): """It lists Cycling definitions which need upgrading.""" id_ = flow({ 'scheduler': {'allow implicit tasks': True}, @@ -171,11 +171,11 @@ def test_graph_upgrade_msg_default(flow, validate, caplog): }, }) validate(id_) - assert '[scheduling][dependencies][X]graph' in caplog.messages[0] - assert 'for X in:\n P1Y, R1' in caplog.messages[0] + assert log_filter(caplog, contains='[scheduling][dependencies][X]graph') + assert log_filter(caplog, contains='for X in:\n P1Y, R1') -def test_graph_upgrade_msg_graph_equals(flow, validate, caplog): +def test_graph_upgrade_msg_graph_equals(flow, validate, caplog, log_filter): """It gives a more useful message in special case where graph is key rather than section: @@ -188,11 +188,12 @@ def test_graph_upgrade_msg_graph_equals(flow, validate, caplog): 'scheduling': {'dependencies': {'graph': 'foo => bar'}}, }) validate(id_) - expect = ('[scheduling][dependencies]graph -> [scheduling][graph]R1') - assert expect in caplog.messages[0] + assert log_filter( + caplog, + contains='[scheduling][dependencies]graph -> [scheduling][graph]R1') -def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog): +def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog, log_filter): """Both an implicit R1 and explict reccurance exist: It appends a note. """ @@ -212,4 +213,4 @@ def test_graph_upgrade_msg_graph_equals2(flow, validate, caplog): '\n P1Y, graph' '\n ([scheduling][dependencies]graph moves to [scheduling][graph]R1)' ) - assert expect in caplog.messages[0] + assert log_filter(caplog, contains=expect) diff --git a/tests/integration/test_config.py b/tests/integration/test_config.py index c75797e9cbb..e10ecf6b64f 100644 --- a/tests/integration/test_config.py +++ b/tests/integration/test_config.py @@ -17,13 +17,13 @@ import logging from pathlib import Path import sqlite3 +from textwrap import dedent from typing import Any import pytest from cylc.flow.cfgspec.glbl_cfg import glbl_cfg from cylc.flow.cfgspec.globalcfg import GlobalConfig from cylc.flow.exceptions import ( - PointParsingError, ServiceFileError, WorkflowConfigError, XtriggerConfigError, @@ -274,7 +274,7 @@ def test_parse_special_tasks_families(flow, scheduler, validate, section): } -def test_queue_treated_as_implicit(flow, validate, caplog): +def test_queue_treated_as_implicit(flow, validate, caplog, log_filter): """Tasks in queues but not in runtime generate a warning. https://github.com/cylc/cylc-flow/issues/5260 @@ -289,10 +289,9 @@ def test_queue_treated_as_implicit(flow, validate, caplog): } ) validate(id_) - assert ( - 'Queues contain tasks not defined in runtime' - in caplog.records[0].message - ) + assert log_filter( + caplog, + contains='Queues contain tasks not defined in runtime') def test_queue_treated_as_comma_separated(flow, validate): @@ -596,25 +595,36 @@ def _inner(*args, **kwargs): assert get_platforms(glbl_cfg()) == {'localhost', 'foo', 'bar'} -def test_validate_run_mode(flow: Fixture, validate: Fixture): - """Test that Cylc validate will only check simulation mode settings - if validate --mode simulation or dummy. - - Discovered in: - https://github.com/cylc/cylc-flow/pull/6213#issuecomment-2225365825 +def test_nonlive_mode_validation(flow, validate, caplog, log_filter): + """Nonlive tasks return a warning at validation. """ + msg1 = dedent('The following tasks are set to run in skip mode:\n * skip') + wid = flow({ - 'scheduling': {'graph': {'R1': 'mytask'}}, - 'runtime': {'mytask': {'simulation': {'fail cycle points': 'alll'}}} + 'scheduling': { + 'graph': { + 'R1': 'live => skip => simulation => dummy => default' + } + }, + 'runtime': { + 'default': {}, + 'live': {'run mode': 'live'}, + 'skip': { + 'run mode': 'skip', + 'skip': {'outputs': 'started, submitted'} + }, + }, }) - # It's fine with run mode live validate(wid) + assert log_filter(caplog, contains=msg1) - # It fails with run mode simulation: - with pytest.raises(PointParsingError, match='Incompatible value'): - validate(wid, run_mode='simulation') - # It fails with run mode dummy: - with pytest.raises(PointParsingError, match='Incompatible value'): - validate(wid, run_mode='dummy') +def test_skip_forbidden_as_output(flow, validate): + """Run mode names are forbidden as task output names.""" + wid = flow({ + 'scheduling': {'graph': {'R1': 'task'}}, + 'runtime': {'task': {'outputs': {'skip': 'message for skip'}}} + }) + with pytest.raises(WorkflowConfigError, match='message for skip'): + validate(wid) diff --git a/tests/integration/test_simulation.py b/tests/integration/test_simulation.py deleted file mode 100644 index c7e1b42fe27..00000000000 --- a/tests/integration/test_simulation.py +++ /dev/null @@ -1,445 +0,0 @@ -# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. -# Copyright (C) NIWA & British Crown (Met Office) & Contributors. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from pathlib import Path -import pytest -from pytest import param - -from cylc.flow import commands -from cylc.flow.cycling.iso8601 import ISO8601Point -from cylc.flow.simulation import sim_time_check - - -@pytest.fixture -def monkeytime(monkeypatch): - """Convenience function monkeypatching time.""" - def _inner(time_: int): - monkeypatch.setattr('cylc.flow.task_job_mgr.time', lambda: time_) - monkeypatch.setattr('cylc.flow.simulation.time', lambda: time_) - return _inner - - -@pytest.fixture -def run_simjob(monkeytime): - """Run a simulated job to completion. - - Returns the output status. - """ - def _run_simjob(schd, point, task): - itask = schd.pool.get_task(point, task) - itask.state.is_queued = False - monkeytime(0) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - monkeytime(itask.mode_settings.timeout + 1) - - # Run Time Check - assert sim_time_check( - schd.task_events_mgr, [itask], - schd.workflow_db_mgr - ) is True - - # Capture result process queue. - return itask - return _run_simjob - - -@pytest.fixture(scope='module') -async def sim_time_check_setup( - mod_flow, mod_scheduler, mod_start, mod_one_conf, -): - schd = mod_scheduler(mod_flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': { - 'R1': 'one & fail_all & fast_forward', - 'P1Y': 'fail_once & fail_all_submits' - } - }, - 'runtime': { - 'one': {}, - 'fail_all': { - 'simulation': { - 'fail cycle points': 'all', - 'fail try 1 only': False - }, - 'outputs': {'foo': 'bar'} - }, - # This task ought not be finished quickly, but for the speed up - 'fast_forward': { - 'execution time limit': 'PT1M', - 'simulation': {'speedup factor': 2} - }, - 'fail_once': { - 'simulation': { - 'fail cycle points': '1066, 1068', - } - }, - 'fail_all_submits': { - 'simulation': { - 'fail cycle points': '1066', - 'fail try 1 only': False, - } - } - } - })) - async with mod_start(schd): - itasks = schd.pool.get_tasks() - [schd.task_job_mgr._set_retry_timers(i) for i in itasks] - yield schd, itasks - - -def test_false_if_not_running( - sim_time_check_setup, monkeypatch -): - schd, itasks = sim_time_check_setup - - itasks = [i for i in itasks if i.state.status != 'running'] - - # False if task status not running: - assert sim_time_check(schd.task_events_mgr, itasks, '') is False - - -@pytest.mark.parametrize( - 'itask, point, results', - ( - # Task fails this CP, first submit. - param( - 'fail_once', '1066', (True, False, False), - id='only-fail-on-submit-1'), - # Task succeeds this CP, all submits. - param( - 'fail_once', '1067', (False, False, False), - id='do-not-fail-this-cp'), - # Task fails this CP, first submit. - param( - 'fail_once', '1068', (True, False, False), - id='and-another-cp'), - # Task fails this CP, all submits. - param( - 'fail_all_submits', '1066', (True, True, True), - id='fail-all-submits'), - # Task succeeds this CP, all submits. - param( - 'fail_all_submits', '1067', (False, False, False), - id='fail-no-submits'), - ) -) -def test_fail_once(sim_time_check_setup, itask, point, results, monkeypatch): - """A task with a fail cycle point only fails - at that cycle point, and then only on the first submission. - """ - schd, _ = sim_time_check_setup - - itask = schd.pool.get_task( - ISO8601Point(point), itask) - - for i, result in enumerate(results): - itask.try_timers['execution-retry'].num = i - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is result - - -def test_task_finishes(sim_time_check_setup, monkeytime, caplog): - """...and an appropriate message sent. - - Checks that failed and bar are output if a task is set to fail. - - Does NOT check every possible cause of an outcome - this is done - in unit tests. - """ - schd, _ = sim_time_check_setup - monkeytime(0) - - # Setup a task to fail, submit it. - fail_all_1066 = schd.pool.get_task(ISO8601Point('1066'), 'fail_all') - fail_all_1066.state.status = 'running' - fail_all_1066.state.is_queued = False - schd.task_job_mgr._simulation_submit_task_jobs( - [fail_all_1066], schd.workflow) - - # For the purpose of the test delete the started time set by - # _simulation_submit_task_jobs. - fail_all_1066.summary['started_time'] = 0 - - # Before simulation time is up: - assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is False - - # Time's up... - monkeytime(12) - - # After simulation time is up it Fails and records custom outputs: - assert sim_time_check(schd.task_events_mgr, [fail_all_1066], '') is True - outputs = fail_all_1066.state.outputs - assert outputs.is_message_complete('succeeded') is False - assert outputs.is_message_complete('bar') is True - assert outputs.is_message_complete('failed') is True - - -def test_task_sped_up(sim_time_check_setup, monkeytime): - """Task will speed up by a factor set in config.""" - - schd, _ = sim_time_check_setup - fast_forward_1066 = schd.pool.get_task( - ISO8601Point('1066'), 'fast_forward') - - # Run the job submission method: - monkeytime(0) - schd.task_job_mgr._simulation_submit_task_jobs( - [fast_forward_1066], schd.workflow) - fast_forward_1066.state.is_queued = False - - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is False - monkeytime(29) - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is False - monkeytime(31) - result = sim_time_check(schd.task_events_mgr, [fast_forward_1066], '') - assert result is True - - -async def test_settings_restart( - monkeytime, flow, scheduler, start -): - """Check that simulation mode settings are correctly restored - upon restart. - - In the case of start time this is collected from the database - from task_jobs.start_time. - - tasks: - one: Runs straighforwardly. - two: Test case where database is missing started_time - because it was upgraded from an earlier version of Cylc. - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': { - 'R1': 'one & two' - } - }, - 'runtime': { - 'root': { - 'execution time limit': 'PT1M', - 'execution retry delays': 'P0Y', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': 'all', - 'fail try 1 only': True, - } - }, - } - }) - schd = scheduler(id_) - - # Start the workflow: - async with start(schd): - og_timeouts = {} - for itask in schd.pool.get_tasks(): - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - - og_timeouts[itask.identity] = itask.mode_settings.timeout - - # Mock wallclock < sim end timeout - monkeytime(itask.mode_settings.timeout - 1) - assert sim_time_check( - schd.task_events_mgr, [itask], schd.workflow_db_mgr - ) is False - - # Stop and restart the scheduler: - schd = scheduler(id_) - async with start(schd): - # Get our tasks and fix wallclock: - itasks = schd.pool.get_tasks() - for itask in itasks: - - # Check that we haven't got started time & mode settings back: - assert itask.summary['started_time'] is None - assert itask.mode_settings is None - - if itask.identity == '1066/two': - # Delete the database entry for `two`: Ensure that - # we don't break sim mode on upgrade to this version of Cylc. - schd.workflow_db_mgr.pri_dao.connect().execute( - 'UPDATE task_jobs' - '\n SET time_submit = NULL' - '\n WHERE (name == \'two\')' - ) - schd.workflow_db_mgr.process_queued_ops() - monkeytime(42) - expected_timeout = 102.0 - else: - monkeytime(og_timeouts[itask.identity] - 1) - expected_timeout = float(int(og_timeouts[itask.identity])) - - assert sim_time_check( - schd.task_events_mgr, [itask], schd.workflow_db_mgr - ) is False - - # Check that the itask.mode_settings is now re-created - assert itask.mode_settings.__dict__ == { - 'simulated_run_length': 60.0, - 'sim_task_fails': True, - 'timeout': expected_timeout - } - - -async def test_settings_reload( - flow, scheduler, start, run_simjob -): - """Check that simulation mode settings are changed for future - pseudo jobs on reload. - - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': {'R1': 'one'} - }, - 'runtime': { - 'one': { - 'execution time limit': 'PT1M', - 'execution retry delays': 'P0Y', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': 'all', - 'fail try 1 only': False, - } - }, - } - }) - schd = scheduler(id_) - async with start(schd): - # Submit first psuedo-job and "run" to failure: - one_1066 = schd.pool.get_tasks()[0] - - itask = run_simjob(schd, one_1066.point, 'one') - assert itask.state.outputs.is_message_complete('failed') is False - - # Modify config as if reinstall had taken place: - conf_file = Path(schd.workflow_run_dir) / 'flow.cylc' - conf_file.write_text( - conf_file.read_text().replace('False', 'True')) - - # Reload Workflow: - await commands.run_cmd(commands.reload_workflow, schd) - - # Submit second psuedo-job and "run" to success: - itask = run_simjob(schd, one_1066.point, 'one') - assert itask.state.outputs.is_message_complete('succeeded') is True - - -async def test_settings_broadcast( - flow, scheduler, start, monkeytime -): - """Assert that broadcasting a change in the settings for a task - affects subsequent psuedo-submissions. - """ - id_ = flow({ - 'scheduler': {'cycle point format': '%Y'}, - 'scheduling': { - 'initial cycle point': '1066', - 'graph': {'R1': 'one'} - }, - 'runtime': { - 'one': { - 'execution time limit': 'PT1S', - 'execution retry delays': '2*PT5S', - 'simulation': { - 'speedup factor': 1, - 'fail cycle points': '1066', - 'fail try 1 only': False - } - }, - } - }, defaults=False) - schd = scheduler(id_, paused_start=False, run_mode='simulation') - async with start(schd) as log: - itask = schd.pool.get_tasks()[0] - itask.state.is_queued = False - - # Submit the first - the sim task will fail: - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - - # Let task finish. - monkeytime(itask.mode_settings.timeout + 1) - assert sim_time_check( - schd.task_events_mgr, [itask], - schd.workflow_db_mgr - ) is True - - # The mode_settings object has been cleared: - assert itask.mode_settings is None - # Change a setting using broadcast: - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': ''} - }]) - # Submit again - result is different: - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is False - - # Assert Clearing the broadcast works - schd.broadcast_mgr.clear_broadcast() - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - - # Assert that list of broadcasts doesn't change if we submit - # Invalid fail cycle points to broadcast. - itask.mode_settings = None - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': 'higadfuhasgiurguj'} - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert ( - 'Invalid ISO 8601 date representation: higadfuhasgiurguj' - in log.messages[-1]) - - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': '1'} - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert ( - 'Invalid ISO 8601 date representation: 1' - in log.messages[-1]) - - # Broadcast tasks will reparse correctly: - schd.broadcast_mgr.put_broadcast( - ['1066'], ['one'], [{ - 'simulation': {'fail cycle points': '1945, 1977, 1066'}, - 'execution retry delays': '3*PT2S' - }]) - schd.task_job_mgr._simulation_submit_task_jobs( - [itask], schd.workflow) - assert itask.mode_settings.sim_task_fails is True - assert itask.try_timers['execution-retry'].delays == [2.0, 2.0, 2.0] - # n.b. rtconfig should remain unchanged, lest we cancel broadcasts: - assert itask.tdef.rtconfig['execution retry delays'] == [5.0, 5.0] diff --git a/tests/integration/test_task_events_mgr.py b/tests/integration/test_task_events_mgr.py index 7ac12274d7b..08ed816414d 100644 --- a/tests/integration/test_task_events_mgr.py +++ b/tests/integration/test_task_events_mgr.py @@ -152,7 +152,7 @@ async def test__always_insert_task_job( schd.pool.get_tasks(), schd.server.curve_auth, schd.server.client_pub_key_dir, - is_simulation=False + run_mode='live' ) # Both tasks are in a waiting state: diff --git a/tests/integration/test_task_pool.py b/tests/integration/test_task_pool.py index 0428e3af6c1..35ad044d006 100644 --- a/tests/integration/test_task_pool.py +++ b/tests/integration/test_task_pool.py @@ -645,7 +645,8 @@ def list_tasks(schd): ('1', 'z', 'waiting'), ], [ - {('1', 'a', 'succeeded'): 'satisfied naturally'}, + {('1', 'a', 'succeeded'): + 'satisfied by simulation mode'}, {('1', 'b', 'succeeded'): False}, {('1', 'c', 'succeeded'): False}, ], @@ -673,7 +674,8 @@ def list_tasks(schd): ('1', 'z', 'waiting'), ], [ - {('1', 'a', 'succeeded'): 'satisfied naturally'}, + {('1', 'a', 'succeeded'): + 'satisfied by simulation mode'}, {('1', 'b', 'succeeded'): False}, ], id='removed' @@ -768,7 +770,8 @@ async def test_restart_prereqs( ('1', 'z', 'waiting'), ], [ - {('1', 'a', 'succeeded'): 'satisfied naturally'}, + {('1', 'a', 'succeeded'): + 'satisfied by simulation mode'}, {('1', 'b', 'succeeded'): False}, {('1', 'c', 'succeeded'): False}, ], @@ -796,7 +799,8 @@ async def test_restart_prereqs( ('1', 'z', 'waiting'), ], [ - {('1', 'a', 'succeeded'): 'satisfied naturally'}, + {('1', 'a', 'succeeded'): + 'satisfied by simulation mode'}, {('1', 'b', 'succeeded'): False}, ], id='removed' @@ -895,7 +899,7 @@ async def _test_restart_prereqs_sat(): for prereq in task_c.state.prerequisites for key, satisfied in prereq.items() ) == [ - ('1', 'a', 'succeeded', 'satisfied naturally'), + ('1', 'a', 'succeeded', 'satisfied by simulation mode'), ('1', 'b', 'succeeded', 'satisfied from database') ] @@ -912,7 +916,7 @@ async def _test_restart_prereqs_sat(): for prereq in task_c_prereqs for condition in prereq.conditions ) == [ - ('1/a', True, 'satisfied naturally'), + ('1/a', True, 'satisfied by simulation mode'), ('1/b', True, 'satisfied from database'), ] @@ -1586,6 +1590,75 @@ async def test_set_outputs_future( assert log_filter(log, contains="completed output y") +async def test_set_outputs_from_skip_settings( + flow, + scheduler, + start, + log_filter, + validate +): + """Check working of ``cylc set --out=skip``: + + 1. --out=skip can be used to set all required outputs. + 2. --out=skip,other_output can be used to set other outputs. + + """ + id_ = flow( + { + 'scheduler': { + 'allow implicit tasks': 'True', + }, + 'scheduling': { + 'cycling mode': 'integer', + 'initial cycle point': 1, + 'final cycle point': 2, + 'graph': { + 'P1': """ + a => after_asucceeded + a:x => after_ax + a:y? => after_ay + """ + } + }, + 'runtime': { + 'a': { + 'outputs': { + 'x': 'xebec', + 'y': 'yacht' + }, + 'skip': {'outputs': 'x'} + } + } + } + ) + validate(id_) + schd = scheduler(id_) + + async with start(schd): + + # it should start up with just tasks a: + assert pool_get_task_ids(schd.pool) == ['1/a', '2/a'] + + # setting 1/a output to skip should set output x, but not + # y (because y is optional). + schd.pool.set_prereqs_and_outputs( + ['1/a'], ['skip'], None, ['all']) + assert (pool_get_task_ids(schd.pool) == [ + '1/after_asucceeded', + '1/after_ax', + '2/a']) + + # You should be able to set skip as part of a list of outputs: + schd.pool.set_prereqs_and_outputs( + ['2/a'], ['skip', 'y'], None, ['all']) + assert (pool_get_task_ids(schd.pool) == [ + '1/after_asucceeded', + '1/after_ax', + '2/after_asucceeded', + '2/after_ax', + '2/after_ay']) + + async def test_prereq_satisfaction( flow, scheduler, diff --git a/tests/integration/utils/flow_tools.py b/tests/integration/utils/flow_tools.py index fef15e3e3dc..3da32733ffc 100644 --- a/tests/integration/utils/flow_tools.py +++ b/tests/integration/utils/flow_tools.py @@ -115,6 +115,10 @@ def __make_scheduler(id_: str, **opts: Any) -> Scheduler: schd.workflow_db_mgr.on_workflow_shutdown() +def caplogprinter(caplog): + _ = [print(i) for i in caplog.messages] + + @asynccontextmanager async def _start_flow( caplog: Optional[pytest.LogCaptureFixture], @@ -124,6 +128,8 @@ async def _start_flow( """Start a scheduler but don't set the main loop running.""" if caplog: caplog.set_level(level, CYLC_LOG) + # Debug functionality + caplog.print = lambda: caplogprinter(caplog) await schd.install() @@ -154,6 +160,8 @@ async def _run_flow( """Start a scheduler and set the main loop running.""" if caplog: caplog.set_level(level, CYLC_LOG) + # Debug functionality + caplog.print = lambda: caplogprinter(caplog) await schd.install() diff --git a/tests/unit/run_modes/test_dummy.py b/tests/unit/run_modes/test_dummy.py new file mode 100644 index 00000000000..998c13767c9 --- /dev/null +++ b/tests/unit/run_modes/test_dummy.py @@ -0,0 +1,40 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Tests for utilities supporting dummy mode. +""" +import pytest +from cylc.flow.run_modes.dummy import build_dummy_script + + +@pytest.mark.parametrize( + 'fail_one_time_only', (True, False) +) +def test_build_dummy_script(fail_one_time_only): + rtc = { + 'outputs': {'foo': '1', 'bar': '2'}, + 'simulation': { + 'fail try 1 only': fail_one_time_only, + 'fail cycle points': '1', + } + } + result = build_dummy_script(rtc, 60) + assert result.split('\n') == [ + 'sleep 60', + "cylc message '1'", + "cylc message '2'", + f"cylc__job__dummy_result {str(fail_one_time_only).lower()}" + " 1 || exit 1" + ] diff --git a/tests/unit/run_modes/test_nonlive.py b/tests/unit/run_modes/test_nonlive.py new file mode 100644 index 00000000000..71695f2c96b --- /dev/null +++ b/tests/unit/run_modes/test_nonlive.py @@ -0,0 +1,51 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Unit tests for utilities supporting all nonlive modes +""" + +from types import SimpleNamespace + +from cylc.flow.run_modes.nonlive import run_mode_validate_checks + + +def test_run_mode_validate_checks(monkeypatch, caplog): + """It warns us if we've set a task config to nonlive mode. + + (And not otherwise) + + Point 3 from the skip mode proposal + https://github.com/cylc/cylc-admin/blob/master/docs/proposal-skip-mode.md + + | If the run mode is set to simulation or skip in the workflow + | configuration, then cylc validate and cylc lint should produce + | warning (similar to development features in other languages / systems). + """ + taskdefs = { + f'{run_mode}_task': SimpleNamespace( + rtconfig={'run mode': run_mode}, + name=f'{run_mode}_task' + ) + for run_mode + in ['live', 'skip'] + } + + run_mode_validate_checks(taskdefs) + + message = caplog.messages[0] + + assert 'skip mode:\n * skip_task' in message + assert ' live mode' not in message # Avoid matching "non-live mode" + assert 'workflow mode' not in message diff --git a/tests/unit/test_simulation.py b/tests/unit/run_modes/test_simulation.py similarity index 86% rename from tests/unit/test_simulation.py rename to tests/unit/run_modes/test_simulation.py index 920a872503a..109174c8b43 100644 --- a/tests/unit/test_simulation.py +++ b/tests/unit/run_modes/test_simulation.py @@ -20,9 +20,8 @@ from cylc.flow.cycling.integer import IntegerPoint from cylc.flow.cycling.iso8601 import ISO8601Point -from cylc.flow.simulation import ( +from cylc.flow.run_modes.simulation import ( parse_fail_cycle_points, - build_dummy_script, disable_platforms, get_simulated_run_len, sim_task_failed, @@ -56,27 +55,6 @@ def test_get_simulated_run_len( assert get_simulated_run_len(rtc) == 3600 -@pytest.mark.parametrize( - 'fail_one_time_only', (True, False) -) -def test_set_simulation_script(fail_one_time_only): - rtc = { - 'outputs': {'foo': '1', 'bar': '2'}, - 'simulation': { - 'fail try 1 only': fail_one_time_only, - 'fail cycle points': '1', - } - } - result = build_dummy_script(rtc, 60) - assert result.split('\n') == [ - 'sleep 60', - "cylc message '1'", - "cylc message '2'", - f"cylc__job__dummy_result {str(fail_one_time_only).lower()}" - " 1 || exit 1" - ] - - @pytest.mark.parametrize( 'rtc, expect', ( ({'platform': 'skarloey'}, 'localhost'), @@ -100,7 +78,7 @@ def test_disable_platforms(rtc, expect): def test_parse_fail_cycle_points(set_cycling_type): before = ['2', '4'] set_cycling_type() - assert parse_fail_cycle_points(before) == [ + assert parse_fail_cycle_points(before, ['']) == [ IntegerPoint(i) for i in before ] diff --git a/tests/unit/run_modes/test_skip.py b/tests/unit/run_modes/test_skip.py new file mode 100644 index 00000000000..f5ad89381d7 --- /dev/null +++ b/tests/unit/run_modes/test_skip.py @@ -0,0 +1,101 @@ +# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE. +# Copyright (C) NIWA & British Crown (Met Office) & Contributors. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +"""Unit tests for utilities supporting skip modes +""" +import pytest +from pytest import param, raises +from types import SimpleNamespace + +from cylc.flow.exceptions import WorkflowConfigError +from cylc.flow.run_modes.skip import check_task_skip_config, process_outputs + + +@pytest.mark.parametrize( + 'conf', + ( + param({}, id='no-skip-config'), + param({'skip': {'outputs': []}}, id='no-skip-outputs'), + param({'skip': {'outputs': ['foo1', 'failed']}}, id='ok-skip-outputs'), + ) +) +def test_good_check_task_skip_config(conf): + """It returns none if the problems this function checks are not present. + """ + tdef = SimpleNamespace(rtconfig=conf) + tdef.name = 'foo' + assert check_task_skip_config(tdef) is None + + +def test_raises_check_task_skip_config(): + """It raises an error if succeeded and failed are set. + """ + tdef = SimpleNamespace( + rtconfig={'skip': {'outputs': ['foo1', 'failed', 'succeeded']}} + ) + tdef.name = 'foo' + with raises(WorkflowConfigError, match='succeeded AND failed'): + check_task_skip_config(tdef) + + +@pytest.mark.parametrize( + 'outputs, required, expect', + ( + param([], [], ['succeeded'], id='implicit-succeded'), + param( + ['succeeded'], ['succeeded'], ['succeeded'], + id='explicit-succeded' + ), + param(['submitted'], [], ['succeeded'], id='only-1-submit'), + param( + ['foo', 'bar', 'baz', 'qux'], + ['bar', 'qux'], + ['bar', 'qux', 'succeeded'], + id='required-only' + ), + param( + ['foo', 'baz'], + ['bar', 'qux'], + ['succeeded'], + id='no-required' + ), + param( + ['failed'], + [], + ['failed'], + id='explicit-failed' + ), + ) +) +def test_process_outputs(outputs, required, expect): + """Check that skip outputs: + + 1. Doesn't send submitted twice. + 2. Sends every required output. + 3. If failed is set send failed + 4. If failed in not set send succeeded. + """ + # Create a mocked up task-proxy: + rtconf = {'skip': {'outputs': outputs}} + itask = SimpleNamespace( + tdef=SimpleNamespace( + rtconfig=rtconf), + state=SimpleNamespace( + outputs=SimpleNamespace( + iter_required_messages=lambda exclude: iter(required), + _message_to_trigger={v: v for v in required} + ))) + + assert process_outputs(itask, rtconf) == ['submitted', 'started'] + expect diff --git a/tests/unit/scripts/test_lint.py b/tests/unit/scripts/test_lint.py index 20ff738a1ac..1486765e68b 100644 --- a/tests/unit/scripts/test_lint.py +++ b/tests/unit/scripts/test_lint.py @@ -182,7 +182,10 @@ [[[directives]]] -l walltime = 666 [[baz]] + run mode = skip platform = `no backticks` + [[[skip]]] + outputs = succeeded, failed """ + ( '\nscript = the quick brown fox jumps over the lazy dog until it becomes ' 'clear that this line is longer than the default 130 character limit.' diff --git a/tests/unit/test_config.py b/tests/unit/test_config.py index 9cdcee89003..b830228103a 100644 --- a/tests/unit/test_config.py +++ b/tests/unit/test_config.py @@ -17,8 +17,8 @@ import os import sys from optparse import Values -from typing import Any, Callable, Dict, List, Optional, Tuple, Type -from pathlib import Path +from typing import ( + TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type) import pytest import logging from types import SimpleNamespace @@ -47,8 +47,9 @@ from cylc.flow.cycling.iso8601 import ISO8601Point - -Fixture = Any +if TYPE_CHECKING: + from pathlib import Path + Fixture = Any def _tmp_flow_config(tmp_run_dir: Callable): @@ -60,8 +61,8 @@ def _tmp_flow_config(tmp_run_dir: Callable): Returns the path to the flow file. """ - def __tmp_flow_config(id_: str, config: str) -> Path: - run_dir: Path = tmp_run_dir(id_) + def __tmp_flow_config(id_: str, config: str) -> 'Path': + run_dir: 'Path' = tmp_run_dir(id_) flow_file = run_dir / WorkflowFiles.FLOW_FILE flow_file.write_text(config) return flow_file @@ -82,7 +83,7 @@ class TestWorkflowConfig: """Test class for the Cylc WorkflowConfig object.""" def test_xfunction_imports( - self, mock_glbl_cfg: Fixture, tmp_path: Path): + self, mock_glbl_cfg: 'Fixture', tmp_path: 'Path'): """Test for a workflow configuration with valid xtriggers""" mock_glbl_cfg( 'cylc.flow.platforms.glbl_cfg', @@ -175,7 +176,8 @@ def test_xfunction_attribute_error(self, mock_glbl_cfg, tmp_path): with pytest.raises(XtriggerConfigError) as excinfo: WorkflowConfig(workflow="capybara_workflow", fpath=flow_file, options=SimpleNamespace()) - assert "module 'capybara' has no attribute 'capybara'" in str(excinfo.value) + assert "module 'capybara' has no attribute 'capybara'" in str( + excinfo.value) def test_xfunction_not_callable(self, mock_glbl_cfg, tmp_path): """Test for error when a xtrigger function is not callable.""" @@ -358,7 +360,7 @@ def test_process_icp( expected_icp: Optional[str], expected_opt_icp: Optional[str], expected_err: Optional[Tuple[Type[Exception], str]], - monkeypatch: pytest.MonkeyPatch, set_cycling_type: Fixture + monkeypatch: pytest.MonkeyPatch, set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_initial_cycle_point(). @@ -445,7 +447,7 @@ def test_process_startcp( starttask: Optional[str], expected: str, expected_err: Optional[Tuple[Type[Exception], str]], - monkeypatch: pytest.MonkeyPatch, set_cycling_type: Fixture + monkeypatch: pytest.MonkeyPatch, set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_start_cycle_point(). @@ -648,7 +650,7 @@ def test_process_fcp( options_fcp: Optional[str], expected_fcp: Optional[str], expected_err: Optional[Tuple[Type[Exception], str]], - set_cycling_type: Fixture + set_cycling_type: 'Fixture' ) -> None: """Test WorkflowConfig.process_final_cycle_point(). @@ -671,7 +673,7 @@ def test_process_fcp( initial_point=loader.get_point( scheduling_cfg['initial cycle point'] ).standardise(), - final_point = None, + final_point=None, options=SimpleNamespace(fcp=options_fcp), ) @@ -812,7 +814,7 @@ def test_stopcp_after_fcp( cycle point is handled correctly.""" caplog.set_level(logging.WARNING, CYLC_LOG) id_ = 'cassini' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] allow implicit tasks = True [scheduling] @@ -1366,7 +1368,7 @@ def test_implicit_tasks( """ # Setup id_ = 'rincewind' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] { f'allow implicit tasks = {allow_implicit_tasks}' @@ -1470,7 +1472,7 @@ def test_zero_interval( """Test that a zero-duration recurrence with >1 repetition gets an appropriate warning.""" id_ = 'ordinary' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] UTC mode = True allow implicit tasks = True @@ -1514,7 +1516,7 @@ def test_chain_expr( Note the order matters when "nominal" units (years, months) are used. """ id_ = 'osgiliath' - flow_file: Path = tmp_flow_config(id_, f""" + flow_file: 'Path' = tmp_flow_config(id_, f""" [scheduler] UTC mode = True allow implicit tasks = True @@ -1693,7 +1695,7 @@ def test__warn_if_queues_have_implicit_tasks(caplog): ] ) def test_cylc_env_at_parsing( - tmp_path: Path, + tmp_path: 'Path', monkeypatch: pytest.MonkeyPatch, installed, run_dir, diff --git a/tests/unit/test_platforms.py b/tests/unit/test_platforms.py index 68d08cb8d16..3167afabf70 100644 --- a/tests/unit/test_platforms.py +++ b/tests/unit/test_platforms.py @@ -34,6 +34,7 @@ PlatformLookupError, GlobalConfigError ) +from cylc.flow.task_state import RunMode PLATFORMS = { @@ -470,9 +471,9 @@ def test_get_install_target_to_platforms_map( for install_target in _map: _map[install_target] = sorted(_map[install_target], key=lambda k: k['name']) + result.pop('localhost') assert result == expected_map - @pytest.mark.parametrize( 'platform, job, remote, expect', [ diff --git a/tests/unit/test_task_outputs.py b/tests/unit/test_task_outputs.py index 70a297edff5..840a4229a8f 100644 --- a/tests/unit/test_task_outputs.py +++ b/tests/unit/test_task_outputs.py @@ -274,7 +274,7 @@ def test_iter_required_outputs(): assert set(outputs.iter_required_messages()) == set() # the preconditions expiry/submitted are excluded from this logic when - # defined as optional + # defined as optional: outputs = TaskOutputs( tdef( {TASK_OUTPUT_SUCCEEDED, 'x', 'y'}, @@ -288,6 +288,32 @@ def test_iter_required_outputs(): 'y', } + # Get all outputs required for success path (excluding failure, what + # is still required): + outputs = TaskOutputs( + tdef( + {}, + {'a', 'succeeded', 'b', 'y', 'failed', 'x'}, + '(x and y and failed) or (a and b and succeeded)' + ) + ) + + # Excluding succeeded leaves us with failure required outputs: + assert set(outputs.iter_required_messages( + exclude=TASK_OUTPUT_SUCCEEDED)) == { + TASK_OUTPUT_FAILED, 'x', 'y',} + + # Excluding failed leaves us with succeeded required outputs: + assert set(outputs.iter_required_messages( + exclude=TASK_OUTPUT_FAILED)) == { + TASK_OUTPUT_SUCCEEDED, 'a', 'b',} + + # Excluding an abitrary output leaves us with required outputs + # from another branch: + assert set(outputs.iter_required_messages( + exclude='a')) == { + TASK_OUTPUT_FAILED, 'x', 'y',} + def test_get_trigger_completion_variable_maps(): """It should return a bi-map of triggers to compvars.""" diff --git a/tests/unit/test_task_remote_mgr.py b/tests/unit/test_task_remote_mgr.py index c41e415eba3..61cdcce2bc5 100644 --- a/tests/unit/test_task_remote_mgr.py +++ b/tests/unit/test_task_remote_mgr.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from contextlib import suppress from pathlib import Path from time import sleep import pytest @@ -229,6 +230,9 @@ def flatten_install_targets_map(itm): install_targets_map = TaskRemoteMgr._get_remote_tidy_targets( set(platform_names), set(install_targets)) + with suppress(KeyError): + install_targets_map.pop('localhost') + assert ( expect['targets'] == flatten_install_targets_map(install_targets_map)) diff --git a/tests/unit/test_task_state.py b/tests/unit/test_task_state.py index 4b5d796a043..1a2041fcba5 100644 --- a/tests/unit/test_task_state.py +++ b/tests/unit/test_task_state.py @@ -15,11 +15,13 @@ # along with this program. If not, see . import pytest +from types import SimpleNamespace from cylc.flow.taskdef import TaskDef from cylc.flow.cycling.integer import IntegerSequence, IntegerPoint from cylc.flow.task_trigger import Dependency, TaskTrigger from cylc.flow.task_state import ( + RunMode, TaskState, TASK_STATUS_PREPARING, TASK_STATUS_SUBMIT_FAILED, @@ -118,3 +120,31 @@ def test_task_state_order(): assert tstate.is_gte(TASK_STATUS_SUBMITTED) assert not tstate.is_gt(TASK_STATUS_RUNNING) assert not tstate.is_gte(TASK_STATUS_RUNNING) + + +@pytest.mark.parametrize( + 'itask_run_mode, disable_handlers, expect', + ( + ('live', True, False), + ('live', False, False), + ('dummy', True, False), + ('dummy', False, False), + ('simulation', True, True), + ('simulation', False, True), + ('skip', True, True), + ('skip', False, False), + ) +) +def test_disable_task_event_handlers(itask_run_mode, disable_handlers, expect): + """Conditions under which task event handlers should not be used. + """ + # Construct a fake itask object: + itask = SimpleNamespace( + run_mode=itask_run_mode, + platform={'disable task event handlers': disable_handlers}, + tdef=SimpleNamespace( + rtconfig={ + 'skip': {'disable task event handlers': disable_handlers}}) + ) + # Check method: + assert RunMode.disable_task_event_handlers(itask) is expect